diff --git a/typescript-sdk/apps/dojo/src/files.json b/typescript-sdk/apps/dojo/src/files.json index 25ffe46c3..54f42f78d 100644 --- a/typescript-sdk/apps/dojo/src/files.json +++ b/typescript-sdk/apps/dojo/src/files.json @@ -1,1195 +1,4 @@ { - "middleware-starter::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "index.ts", - "content": "import { AbstractAgent, BaseEvent, EventType, RunAgentInput } from \"@ag-ui/client\";\nimport { Observable } from \"rxjs\";\n\nexport class MiddlewareStarterAgent extends AbstractAgent {\n protected run(input: RunAgentInput): Observable {\n const messageId = Date.now().toString();\n return new Observable((observer) => {\n observer.next({\n type: EventType.RUN_STARTED,\n threadId: input.threadId,\n runId: input.runId,\n } as any);\n\n observer.next({\n type: EventType.TEXT_MESSAGE_START,\n messageId,\n } as any);\n\n observer.next({\n type: EventType.TEXT_MESSAGE_CONTENT,\n messageId,\n delta: \"Hello world!\",\n } as any);\n\n observer.next({\n type: EventType.TEXT_MESSAGE_END,\n messageId,\n } as any);\n\n observer.next({\n type: EventType.RUN_FINISHED,\n threadId: input.threadId,\n runId: input.runId,\n } as any);\n\n observer.complete();\n });\n }\n}\n", - "language": "ts", - "type": "file" - } - ], - "pydantic-ai::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_chat.py", - "content": "\"\"\"Agentic Chat feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom datetime import datetime\nfrom zoneinfo import ZoneInfo\n\nfrom pydantic_ai import Agent\n\nagent = Agent('openai:gpt-4o-mini')\napp = agent.to_ag_ui()\n\n\n@agent.tool_plain\nasync def current_time(timezone: str = 'UTC') -> str:\n \"\"\"Get the current time in ISO format.\n\n Args:\n timezone: The timezone to use.\n\n Returns:\n The current time in ISO format string.\n \"\"\"\n tz: ZoneInfo = ZoneInfo(timezone)\n return datetime.now(tz=tz).isoformat()\n", - "language": "python", - "type": "file" - } - ], - "pydantic-ai::agentic_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_generative_ui.py", - "content": "\"\"\"Agentic Generative UI feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import Any, Literal\n\nfrom pydantic import BaseModel, Field\n\nfrom ag_ui.core import EventType, StateDeltaEvent, StateSnapshotEvent\nfrom pydantic_ai import Agent\n\nStepStatus = Literal['pending', 'completed']\n\n\nclass Step(BaseModel):\n \"\"\"Represents a step in a plan.\"\"\"\n\n description: str = Field(description='The description of the step')\n status: StepStatus = Field(\n default='pending',\n description='The status of the step (e.g., pending, completed)',\n )\n\n\nclass Plan(BaseModel):\n \"\"\"Represents a plan with multiple steps.\"\"\"\n\n steps: list[Step] = Field(default_factory=list, description='The steps in the plan')\n\n\nclass JSONPatchOp(BaseModel):\n \"\"\"A class representing a JSON Patch operation (RFC 6902).\"\"\"\n\n op: Literal['add', 'remove', 'replace', 'move', 'copy', 'test'] = Field(\n description='The operation to perform: add, remove, replace, move, copy, or test',\n )\n path: str = Field(description='JSON Pointer (RFC 6901) to the target location')\n value: Any = Field(\n default=None,\n description='The value to apply (for add, replace operations)',\n )\n from_: str | None = Field(\n default=None,\n alias='from',\n description='Source path (for move, copy operations)',\n )\n\n\nagent = Agent(\n 'openai:gpt-4o-mini',\n instructions=dedent(\n \"\"\"\n When planning use tools only, without any other messages.\n IMPORTANT:\n - Use the `create_plan` tool to set the initial state of the steps\n - Use the `update_plan_step` tool to update the status of each step\n - Do NOT repeat the plan or summarise it in a message\n - Do NOT confirm the creation or updates in a message\n - Do NOT ask the user for additional information or next steps\n - Do NOT leave a plan hanging, always complete the plan via `update_plan_step` if one is ongoing.\n\n Only one plan can be active at a time, so do not call the `create_plan` tool\n again until all the steps in current plan are completed.\n \"\"\"\n ),\n)\n\n\n@agent.tool_plain\nasync def create_plan(steps: list[str]) -> StateSnapshotEvent:\n \"\"\"Create a plan with multiple steps.\n\n Args:\n steps: List of step descriptions to create the plan.\n\n Returns:\n StateSnapshotEvent containing the initial state of the steps.\n \"\"\"\n plan: Plan = Plan(\n steps=[Step(description=step) for step in steps],\n )\n return StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot=plan.model_dump(),\n )\n\n\n@agent.tool_plain\nasync def update_plan_step(\n index: int, description: str | None = None, status: StepStatus | None = None\n) -> StateDeltaEvent:\n \"\"\"Update the plan with new steps or changes.\n\n Args:\n index: The index of the step to update.\n description: The new description for the step.\n status: The new status for the step.\n\n Returns:\n StateDeltaEvent containing the changes made to the plan.\n \"\"\"\n changes: list[JSONPatchOp] = []\n if description is not None:\n changes.append(\n JSONPatchOp(\n op='replace', path=f'/steps/{index}/description', value=description\n )\n )\n if status is not None:\n changes.append(\n JSONPatchOp(op='replace', path=f'/steps/{index}/status', value=status)\n )\n return StateDeltaEvent(\n type=EventType.STATE_DELTA,\n delta=changes,\n )\n\n\napp = agent.to_ag_ui()\n", - "language": "python", - "type": "file" - } - ], - "pydantic-ai::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "human_in_the_loop.py", - "content": "\"\"\"Human in the Loop Feature.\n\nNo special handling is required for this feature.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom textwrap import dedent\n\nfrom pydantic_ai import Agent\n\nagent = Agent(\n 'openai:gpt-4o-mini',\n instructions=dedent(\n \"\"\"\n When planning tasks use tools only, without any other messages.\n IMPORTANT:\n - Use the `generate_task_steps` tool to display the suggested steps to the user\n - Do not call the `generate_task_steps` twice in a row, ever.\n - Never repeat the plan, or send a message detailing steps\n - If accepted, confirm the creation of the plan and the number of selected (enabled) steps only\n - If not accepted, ask the user for more information, DO NOT use the `generate_task_steps` tool again\n \"\"\"\n ),\n)\n\napp = agent.to_ag_ui()\n", - "language": "python", - "type": "file" - } - ], - "pydantic-ai::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "shared_state.py", - "content": "\"\"\"Shared State feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom enum import StrEnum\nfrom textwrap import dedent\n\nfrom pydantic import BaseModel, Field\n\nfrom ag_ui.core import EventType, StateSnapshotEvent\nfrom pydantic_ai import Agent, RunContext\nfrom pydantic_ai.ag_ui import StateDeps\n\n\nclass SkillLevel(StrEnum):\n \"\"\"The level of skill required for the recipe.\"\"\"\n\n BEGINNER = 'Beginner'\n INTERMEDIATE = 'Intermediate'\n ADVANCED = 'Advanced'\n\n\nclass SpecialPreferences(StrEnum):\n \"\"\"Special preferences for the recipe.\"\"\"\n\n HIGH_PROTEIN = 'High Protein'\n LOW_CARB = 'Low Carb'\n SPICY = 'Spicy'\n BUDGET_FRIENDLY = 'Budget-Friendly'\n ONE_POT_MEAL = 'One-Pot Meal'\n VEGETARIAN = 'Vegetarian'\n VEGAN = 'Vegan'\n\n\nclass CookingTime(StrEnum):\n \"\"\"The cooking time of the recipe.\"\"\"\n\n FIVE_MIN = '5 min'\n FIFTEEN_MIN = '15 min'\n THIRTY_MIN = '30 min'\n FORTY_FIVE_MIN = '45 min'\n SIXTY_PLUS_MIN = '60+ min'\n\n\nclass Ingredient(BaseModel):\n \"\"\"A class representing an ingredient in a recipe.\"\"\"\n\n icon: str = Field(\n default='ingredient',\n description=\"The icon emoji (not emoji code like '\\x1f35e', but the actual emoji like 🥕) of the ingredient\",\n )\n name: str\n amount: str\n\n\nclass Recipe(BaseModel):\n \"\"\"A class representing a recipe.\"\"\"\n\n skill_level: SkillLevel = Field(\n default=SkillLevel.BEGINNER,\n description='The skill level required for the recipe',\n )\n special_preferences: list[SpecialPreferences] = Field(\n default_factory=list,\n description='Any special preferences for the recipe',\n )\n cooking_time: CookingTime = Field(\n default=CookingTime.FIVE_MIN, description='The cooking time of the recipe'\n )\n ingredients: list[Ingredient] = Field(\n default_factory=list,\n description='Ingredients for the recipe',\n )\n instructions: list[str] = Field(\n default_factory=list, description='Instructions for the recipe'\n )\n\n\nclass RecipeSnapshot(BaseModel):\n \"\"\"A class representing the state of the recipe.\"\"\"\n\n recipe: Recipe = Field(\n default_factory=Recipe, description='The current state of the recipe'\n )\n\n\nagent = Agent('openai:gpt-4o-mini', deps_type=StateDeps[RecipeSnapshot])\n\n\n@agent.tool_plain\nasync def display_recipe(recipe: Recipe) -> StateSnapshotEvent:\n \"\"\"Display the recipe to the user.\n\n Args:\n recipe: The recipe to display.\n\n Returns:\n StateSnapshotEvent containing the recipe snapshot.\n \"\"\"\n return StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot={'recipe': recipe},\n )\n\n\n@agent.instructions\nasync def recipe_instructions(ctx: RunContext[StateDeps[RecipeSnapshot]]) -> str:\n \"\"\"Instructions for the recipe generation agent.\n\n Args:\n ctx: The run context containing recipe state information.\n\n Returns:\n Instructions string for the recipe generation agent.\n \"\"\"\n return dedent(\n f\"\"\"\n You are a helpful assistant for creating recipes.\n\n IMPORTANT:\n - Create a complete recipe using the existing ingredients\n - Append new ingredients to the existing ones\n - Use the `display_recipe` tool to present the recipe to the user\n - Do NOT repeat the recipe in the message, use the tool instead\n - Do NOT run the `display_recipe` tool multiple times in a row\n\n Once you have created the updated recipe and displayed it to the user,\n summarise the changes in one sentence, don't describe the recipe in\n detail or send it as a message to the user.\n\n The current state of the recipe is:\n\n {ctx.deps.state.recipe.model_dump_json(indent=2)}\n \"\"\",\n )\n\n\napp = agent.to_ag_ui(deps=StateDeps(RecipeSnapshot()))\n", - "language": "python", - "type": "file" - } - ], - "pydantic-ai::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n
\n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "tool_based_generative_ui.py", - "content": "\"\"\"Tool Based Generative UI feature.\n\nNo special handling is required for this feature.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom pydantic_ai import Agent\n\nagent = Agent('openai:gpt-4o-mini')\napp = agent.to_ag_ui()\n", - "language": "python", - "type": "file" - } - ], - "server-starter::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "__init__.py", - "content": "\"\"\"\nExample server for the AG-UI protocol.\n\"\"\"\n\nimport os\nimport uvicorn\nimport uuid\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n TextMessageStartEvent,\n TextMessageContentEvent,\n TextMessageEndEvent,\n)\nfrom ag_ui.encoder import EventEncoder\n\napp = FastAPI(title=\"AG-UI Endpoint\")\n\n@app.post(\"/\")\nasync def agentic_chat_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Agentic chat endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n message_id = str(uuid.uuid4())\n\n yield encoder.encode(\n TextMessageStartEvent(\n type=EventType.TEXT_MESSAGE_START,\n message_id=message_id,\n role=\"assistant\"\n )\n )\n\n yield encoder.encode(\n TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=\"Hello world!\"\n )\n )\n\n yield encoder.encode(\n TextMessageEndEvent(\n type=EventType.TEXT_MESSAGE_END,\n message_id=message_id\n )\n )\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n\ndef main():\n \"\"\"Run the uvicorn server.\"\"\"\n port = int(os.getenv(\"PORT\", \"8000\"))\n uvicorn.run(\n \"example_server:app\",\n host=\"0.0.0.0\",\n port=port,\n reload=True\n )\n", - "language": "python", - "type": "file" - } - ], - "adk-middleware::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_chat.py", - "content": "\"\"\"Basic Chat feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom fastapi import FastAPI\nfrom ag_ui_adk import ADKAgent, add_adk_fastapi_endpoint\nfrom google.adk.agents import LlmAgent\nfrom google.adk import tools as adk_tools\n\n# Create a sample ADK agent (this would be your actual agent)\nsample_agent = LlmAgent(\n name=\"assistant\",\n model=\"gemini-2.0-flash\",\n instruction=\"\"\"\n You are a helpful assistant. Help users by answering their questions and assisting with their needs.\n - If the user greets you, please greet them back with specifically with \"Hello\".\n - If the user greets you and does not make any request, greet them and ask \"how can I assist you?\"\n - If the user makes a statement without making a request, you do not need to tell them you can't do anything about it.\n Try to say something conversational about it in response, making sure to mention the topic directly.\n - If the user asks you a question, if possible you can answer it using previous context without telling them that you cannot look it up.\n Only tell the user that you cannot search if you do not have enough information already to answer.\n \"\"\",\n tools=[adk_tools.preload_memory_tool.PreloadMemoryTool()]\n)\n\n# Create ADK middleware agent instance\nchat_agent = ADKAgent(\n adk_agent=sample_agent,\n app_name=\"demo_app\",\n user_id=\"demo_user\",\n session_timeout_seconds=3600,\n use_in_memory_services=True\n)\n\n# Create FastAPI app\napp = FastAPI(title=\"ADK Middleware Basic Chat\")\n\n# Add the ADK endpoint\nadd_adk_fastapi_endpoint(app, chat_agent, path=\"/\")\n", - "language": "python", - "type": "file" - } - ], - "adk-middleware::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "tool_based_generative_ui.py", - "content": "\"\"\"Tool Based Generative UI feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Any, List\n\nfrom fastapi import FastAPI\nfrom ag_ui_adk import ADKAgent, add_adk_fastapi_endpoint\nfrom google.adk.agents import Agent\nfrom google.adk.tools import ToolContext\nfrom google.genai import types\n\n# List of available images (modify path if needed)\nIMAGE_LIST = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n]\n\n# Prepare the image list string for the prompt\nimage_list_str = \"\\n\".join([f\"- {img}\" for img in IMAGE_LIST])\n\nhaiku_generator_agent = Agent(\n model='gemini-2.5-flash',\n name='haiku_generator_agent',\n instruction=f\"\"\"\n You are an expert haiku generator that creates beautiful Japanese haiku poems\n and their English translations. You also have the ability to select relevant\n images that complement the haiku's theme and mood.\n\n When generating a haiku:\n 1. Create a traditional 5-7-5 syllable structure haiku in Japanese\n 2. Provide an accurate and poetic English translation\n 3. Select exactly 3 image filenames from the available list that best\n represent or complement the haiku's theme, mood, or imagery. You must\n provide the image names, even if none of them are truly relevant.\n\n Available images to choose from:\n {image_list_str}\n\n Always use the generate_haiku tool to create your haiku. The tool will handle\n the formatting and validation of your response.\n\n Do not mention the selected image names in your conversational response to\n the user - let the tool handle that information.\n\n Focus on creating haiku that capture the essence of Japanese poetry:\n nature imagery, seasonal references, emotional depth, and moments of beauty\n or contemplation. That said, any topic is fair game. Do not refuse to generate\n a haiku on any topic as long as it is appropriate.\n \"\"\",\n generate_content_config=types.GenerateContentConfig(\n temperature=0.7, # Slightly higher temperature for creativity\n top_p=0.9,\n top_k=40\n ),\n)\n\n# Create ADK middleware agent instance\nadk_agent_haiku_generator = ADKAgent(\n adk_agent=haiku_generator_agent,\n app_name=\"demo_app\",\n user_id=\"demo_user\",\n session_timeout_seconds=3600,\n use_in_memory_services=True\n)\n\n# Create FastAPI app\napp = FastAPI(title=\"ADK Middleware Tool Based Generative UI\")\n\n# Add the ADK endpoint\nadd_adk_fastapi_endpoint(app, adk_agent_haiku_generator, path=\"/\")\n", - "language": "python", - "type": "file" - } - ], - "adk-middleware::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "human_in_the_loop.py", - "content": "\"\"\"Human in the Loop feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom fastapi import FastAPI\nfrom ag_ui_adk import ADKAgent, add_adk_fastapi_endpoint\nfrom google.adk.agents import Agent\nfrom google.genai import types\n\nDEFINE_TASK_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_task_steps\",\n \"description\": \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"steps\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"description\": {\n \"type\": \"string\",\n \"description\": \"The text of the step in imperative form\"\n },\n \"status\": {\n \"type\": \"string\",\n \"enum\": [\"enabled\"],\n \"description\": \"The status of the step, always 'enabled'\"\n }\n },\n \"required\": [\"description\", \"status\"]\n },\n \"description\": \"An array of 10 step objects, each containing text and status\"\n }\n },\n \"required\": [\"steps\"]\n }\n }\n}\n\nhuman_in_loop_agent = Agent(\n model='gemini-2.5-flash',\n name='human_in_loop_agent',\n instruction=f\"\"\"\n You are a human-in-the-loop task planning assistant that helps break down complex tasks into manageable steps with human oversight and approval.\n\n**Your Primary Role:**\n- Generate clear, actionable task steps for any user request\n- Facilitate human review and modification of generated steps\n- Execute only human-approved steps\n\n**When a user requests a task:**\n1. ALWAYS call the `generate_task_steps` function to create 10 step breakdown\n2. Each step must be:\n - Written in imperative form (e.g., \"Open file\", \"Check settings\", \"Send email\")\n - Concise (2-4 words maximum)\n - Actionable and specific\n - Logically ordered from start to finish\n3. Initially set all steps to \"enabled\" status\n4. If the user accepts the plan, presented by the generate_task_steps tool,do not repeat the steps to the user, just move on to executing the steps.\n5. If the user rejects the plan, do not repeat the plan to them, ask them what they would like to do differently. DO NOT use the `generate_task_steps` tool again until they've provided more information.\n\n\n**When executing steps:**\n- Only execute steps with \"enabled\" status.\n- For each step you are executing, tell the user what you are doing.\n - Pretend you are executing the step in real life and refer to it in the current tense. End each step with an ellipsis.\n - Each step MUST be on a new line. DO NOT combine steps into one line.\n - For example for the following steps:\n - Inhale deeply\n - Exhale forcefully\n - Produce sound\n a good response would be:\n ```\n Inhaling deeply...\n Exhaling forcefully...\n Producing sound...\n ```\n a bad response would be `Inhaling deeply... Exhaling forcefully... Producing sound...` because it is on one line.\n- Skip any steps marked as \"disabled\"\n- Afterwards, confirm the execution of the steps to the user, e.g. if the user asked for a plan to go to mars, respond like \"I have completed the plan and gone to mars\"\n- EVERY STEP AND THE CONFIRMATION MUST BE ON A NEW LINE. DO NOT COMBINE THEM INTO ONE LINE. USE A
TAG TO SEPARATE THEM.\n\n**Key Guidelines:**\n- Always generate exactly 10 steps\n- Make steps granular enough to be independently enabled/disabled\n\nTool reference: {DEFINE_TASK_TOOL}\n \"\"\",\n generate_content_config=types.GenerateContentConfig(\n temperature=0.7, # Slightly higher temperature for creativity\n top_p=0.9,\n top_k=40\n ),\n)\n\n# Create ADK middleware agent instance\nadk_human_in_loop_agent = ADKAgent(\n adk_agent=human_in_loop_agent,\n app_name=\"demo_app\",\n user_id=\"demo_user\",\n session_timeout_seconds=3600,\n use_in_memory_services=True\n)\n\n# Create FastAPI app\napp = FastAPI(title=\"ADK Middleware Human in the Loop\")\n\n# Add the ADK endpoint\nadd_adk_fastapi_endpoint(app, adk_human_in_loop_agent, path=\"/\")\n", - "language": "python", - "type": "file" - } - ], - "adk-middleware::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "shared_state.py", - "content": "\"\"\"Shared State feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dotenv import load_dotenv\nload_dotenv()\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nfrom fastapi import FastAPI\nfrom ag_ui_adk import ADKAgent, add_adk_fastapi_endpoint\n\n# ADK imports\nfrom google.adk.agents import LlmAgent\nfrom google.adk.agents.callback_context import CallbackContext\nfrom google.adk.sessions import InMemorySessionService, Session\nfrom google.adk.runners import Runner\nfrom google.adk.events import Event, EventActions\nfrom google.adk.tools import FunctionTool, ToolContext\nfrom google.genai.types import Content, Part , FunctionDeclaration\nfrom google.adk.models import LlmResponse, LlmRequest\nfrom google.genai import types\n\nfrom pydantic import BaseModel, Field\nfrom typing import List, Optional\nfrom enum import Enum\n\nclass SkillLevel(str, Enum):\n # Add your skill level values here\n BEGINNER = \"beginner\"\n INTERMEDIATE = \"intermediate\"\n ADVANCED = \"advanced\"\n\nclass SpecialPreferences(str, Enum):\n # Add your special preferences values here\n VEGETARIAN = \"vegetarian\"\n VEGAN = \"vegan\"\n GLUTEN_FREE = \"gluten_free\"\n DAIRY_FREE = \"dairy_free\"\n KETO = \"keto\"\n LOW_CARB = \"low_carb\"\n\nclass CookingTime(str, Enum):\n # Add your cooking time values here\n QUICK = \"under_30_min\"\n MEDIUM = \"30_60_min\"\n LONG = \"over_60_min\"\n\nclass Ingredient(BaseModel):\n icon: str = Field(..., description=\"The icon emoji of the ingredient\")\n name: str\n amount: str\n\nclass Recipe(BaseModel):\n skill_level: SkillLevel = Field(..., description=\"The skill level required for the recipe\")\n special_preferences: Optional[List[SpecialPreferences]] = Field(\n None,\n description=\"A list of special preferences for the recipe\"\n )\n cooking_time: Optional[CookingTime] = Field(\n None,\n description=\"The cooking time of the recipe\"\n )\n ingredients: List[Ingredient] = Field(..., description=\"Entire list of ingredients for the recipe\")\n instructions: List[str] = Field(..., description=\"Entire list of instructions for the recipe\")\n changes: Optional[str] = Field(\n None,\n description=\"A description of the changes made to the recipe\"\n )\n\ndef generate_recipe(\n tool_context: ToolContext,\n skill_level: str,\n title: str,\n special_preferences: List[str] = [],\n cooking_time: str = \"\",\n ingredients: List[dict] = [],\n instructions: List[str] = [],\n changes: str = \"\"\n) -> Dict[str, str]:\n \"\"\"\n Generate or update a recipe using the provided recipe data.\n\n Args:\n \"title\": {\n \"type\": \"string\",\n \"description\": \"**REQUIRED** - The title of the recipe.\"\n },\n \"skill_level\": {\n \"type\": \"string\",\n \"enum\": [\"Beginner\",\"Intermediate\",\"Advanced\"],\n \"description\": \"**REQUIRED** - The skill level required for the recipe. Must be one of the predefined skill levels (Beginner, Intermediate, Advanced).\"\n },\n \"special_preferences\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"enum\": [\"High Protein\",\"Low Carb\",\"Spicy\",\"Budget-Friendly\",\"One-Pot Meal\",\"Vegetarian\",\"Vegan\"],\n \"description\": \"**OPTIONAL** - Special dietary preferences for the recipe as comma-separated values. Example: 'High Protein, Low Carb, Gluten Free'. Leave empty array if no special preferences.\"\n },\n \"cooking_time\": {\n \"type\": \"string\",\n \"enum\": [5 min, 15 min, 30 min, 45 min, 60+ min],\n \"description\": \"**OPTIONAL** - The total cooking time for the recipe. Must be one of the predefined time slots (5 min, 15 min, 30 min, 45 min, 60+ min). Omit if time is not specified.\"\n },\n \"ingredients\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"icon\": {\"type\": \"string\", \"description\": \"The icon emoji (not emoji code like '\\x1f35e', but the actual emoji like 🥕) of the ingredient\"},\n \"name\": {\"type\": \"string\"},\n \"amount\": {\"type\": \"string\"}\n }\n },\n \"description\": \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n \"instructions\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n \"changes\": {\n \"type\": \"string\",\n \"description\": \"**OPTIONAL** - A brief description of what changes were made to the recipe compared to the previous version. Example: 'Added more spices for flavor', 'Reduced cooking time', 'Substituted ingredient X for Y'. Omit if this is a new recipe.\"\n }\n\n Returns:\n Dict indicating success status and message\n \"\"\"\n try:\n\n\n # Create RecipeData object to validate structure\n recipe = {\n \"title\": title,\n \"skill_level\": skill_level,\n \"special_preferences\": special_preferences ,\n \"cooking_time\": cooking_time ,\n \"ingredients\": ingredients ,\n \"instructions\": instructions ,\n \"changes\": changes\n }\n\n # Update the session state with the new recipe\n current_recipe = tool_context.state.get(\"recipe\", {})\n if current_recipe:\n # Merge with existing recipe\n for key, value in recipe.items():\n if value is not None or value != \"\":\n current_recipe[key] = value\n else:\n current_recipe = recipe\n\n tool_context.state[\"recipe\"] = current_recipe\n\n\n\n return {\"status\": \"success\", \"message\": \"Recipe generated successfully\"}\n\n except Exception as e:\n return {\"status\": \"error\", \"message\": f\"Error generating recipe: {str(e)}\"}\n\n\n\ndef on_before_agent(callback_context: CallbackContext):\n \"\"\"\n Initialize recipe state if it doesn't exist.\n \"\"\"\n\n if \"recipe\" not in callback_context.state:\n # Initialize with default recipe\n default_recipe = {\n \"title\": \"Make Your Recipe\",\n \"skill_level\": \"Beginner\",\n \"special_preferences\": [],\n \"cooking_time\": '15 min',\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n callback_context.state[\"recipe\"] = default_recipe\n\n\n return None\n\n\n# --- Define the Callback Function ---\n# modifying the agent's system prompt to incude the current state of recipe\ndef before_model_modifier(\n callback_context: CallbackContext, llm_request: LlmRequest\n) -> Optional[LlmResponse]:\n \"\"\"Inspects/modifies the LLM request or skips the call.\"\"\"\n agent_name = callback_context.agent_name\n if agent_name == \"RecipeAgent\":\n recipe_json = \"No recipe yet\"\n if \"recipe\" in callback_context.state and callback_context.state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(callback_context.state[\"recipe\"], indent=2)\n except Exception as e:\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n # --- Modification Example ---\n # Add a prefix to the system instruction\n original_instruction = llm_request.config.system_instruction or types.Content(role=\"system\", parts=[])\n prefix = f\"\"\"You are a helpful assistant for creating recipes.\n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\"\"\"\n # Ensure system_instruction is Content and parts list exists\n if not isinstance(original_instruction, types.Content):\n # Handle case where it might be a string (though config expects Content)\n original_instruction = types.Content(role=\"system\", parts=[types.Part(text=str(original_instruction))])\n if not original_instruction.parts:\n original_instruction.parts.append(types.Part(text=\"\")) # Add an empty part if none exist\n\n # Modify the text of the first part\n modified_text = prefix + (original_instruction.parts[0].text or \"\")\n original_instruction.parts[0].text = modified_text\n llm_request.config.system_instruction = original_instruction\n\n\n\n return None\n\n\n# --- Define the Callback Function ---\ndef simple_after_model_modifier(\n callback_context: CallbackContext, llm_response: LlmResponse\n) -> Optional[LlmResponse]:\n \"\"\"Stop the consecutive tool calling of the agent\"\"\"\n agent_name = callback_context.agent_name\n # --- Inspection ---\n if agent_name == \"RecipeAgent\":\n original_text = \"\"\n if llm_response.content and llm_response.content.parts:\n # Assuming simple text response for this example\n if llm_response.content.role=='model' and llm_response.content.parts[0].text:\n original_text = llm_response.content.parts[0].text\n callback_context._invocation_context.end_invocation = True\n\n elif llm_response.error_message:\n return None\n else:\n return None # Nothing to modify\n return None\n\n\nshared_state_agent = LlmAgent(\n name=\"RecipeAgent\",\n model=\"gemini-2.5-pro\",\n instruction=f\"\"\"\n When a user asks for a recipe or wants to modify one, you MUST use the generate_recipe tool.\n\n IMPORTANT RULES:\n 1. Always use the generate_recipe tool for any recipe-related requests\n 2. When creating a new recipe, provide at least skill_level, ingredients, and instructions\n 3. When modifying an existing recipe, include the changes parameter to describe what was modified\n 4. Be creative and helpful in generating complete, practical recipes\n 5. After using the tool, provide a brief summary of what you created or changed\n 6. If user ask to improve the recipe then add more ingredients and make it healthier\n 7. When you see the 'Recipe generated successfully' confirmation message, wish the user well with their cooking by telling them to enjoy their dish.\n\n Examples of when to use the tool:\n - \"Create a pasta recipe\" → Use tool with skill_level, ingredients, instructions\n - \"Make it vegetarian\" → Use tool with special_preferences=[\"vegetarian\"] and changes describing the modification\n - \"Add some herbs\" → Use tool with updated ingredients and changes describing the addition\n\n Always provide complete, practical recipes that users can actually cook.\n \"\"\",\n tools=[generate_recipe],\n before_agent_callback=on_before_agent,\n before_model_callback=before_model_modifier,\n after_model_callback = simple_after_model_modifier\n )\n\n# Create ADK middleware agent instance\nadk_shared_state_agent = ADKAgent(\n adk_agent=shared_state_agent,\n app_name=\"demo_app\",\n user_id=\"demo_user\",\n session_timeout_seconds=3600,\n use_in_memory_services=True\n)\n\n# Create FastAPI app\napp = FastAPI(title=\"ADK Middleware Shared State\")\n\n# Add the ADK endpoint\nadd_adk_fastapi_endpoint(app, adk_shared_state_agent, path=\"/\")\n", - "language": "python", - "type": "file" - } - ], - "server-starter-all-features::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_chat.py", - "content": "\"\"\"\nAgentic chat endpoint for the AG-UI protocol.\n\"\"\"\n\nimport uuid\nimport asyncio\nimport json\nfrom fastapi import Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n TextMessageStartEvent,\n TextMessageContentEvent,\n TextMessageEndEvent,\n ToolCallStartEvent,\n ToolCallArgsEvent,\n ToolCallEndEvent,\n MessagesSnapshotEvent,\n ToolMessage,\n ToolCall,\n AssistantMessage\n)\nfrom ag_ui.core.events import TextMessageChunkEvent\nfrom ag_ui.encoder import EventEncoder\n\nasync def agentic_chat_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Agentic chat endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n # Get the last message content for conditional logic\n last_message_content = None\n last_message_role = None\n if input_data.messages and len(input_data.messages) > 0:\n last_message = input_data.messages[-1]\n last_message_content = last_message.content\n last_message_role = getattr(last_message, 'role', None)\n\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n # Conditional logic based on last message\n if last_message_role == \"tool\":\n async for event in send_tool_result_message_events():\n yield encoder.encode(event)\n elif last_message_content == \"tool\":\n async for event in send_tool_call_events():\n yield encoder.encode(event)\n elif last_message_content == \"backend_tool\":\n async for event in send_backend_tool_call_events(input_data.messages):\n yield encoder.encode(event)\n else:\n async for event in send_text_message_events():\n yield encoder.encode(event)\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n\n\nasync def send_text_message_events():\n \"\"\"Send text message events with countdown\"\"\"\n message_id = str(uuid.uuid4())\n\n # Start of message\n yield TextMessageStartEvent(\n type=EventType.TEXT_MESSAGE_START,\n message_id=message_id,\n role=\"assistant\"\n )\n\n # Initial content chunk\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=\"counting down: \"\n )\n\n # Countdown from 10 to 1\n for count in range(10, 0, -1):\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=f\"{count} \"\n )\n # Sleep for 300ms\n await asyncio.sleep(0.3)\n\n # Final checkmark\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=\"✓\"\n )\n\n # End of message\n yield TextMessageEndEvent(\n type=EventType.TEXT_MESSAGE_END,\n message_id=message_id\n )\n\n\nasync def send_tool_result_message_events():\n \"\"\"Send message for tool result\"\"\"\n message_id = str(uuid.uuid4())\n\n # Start of message\n yield TextMessageStartEvent(\n type=EventType.TEXT_MESSAGE_START,\n message_id=message_id,\n role=\"assistant\"\n )\n\n # Content\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=\"background changed ✓\"\n )\n\n # End of message\n yield TextMessageEndEvent(\n type=EventType.TEXT_MESSAGE_END,\n message_id=message_id\n )\n\n\nasync def send_tool_call_events():\n \"\"\"Send tool call events\"\"\"\n tool_call_id = str(uuid.uuid4())\n tool_call_name = \"change_background\"\n tool_call_args = {\n \"background\": \"linear-gradient(135deg, #667eea 0%, #764ba2 100%)\"\n }\n\n # Tool call start\n yield ToolCallStartEvent(\n type=EventType.TOOL_CALL_START,\n tool_call_id=tool_call_id,\n tool_call_name=tool_call_name\n )\n\n # Tool call args\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta=json.dumps(tool_call_args)\n )\n\n # Tool call end\n yield ToolCallEndEvent(\n type=EventType.TOOL_CALL_END,\n tool_call_id=tool_call_id\n )\n\nasync def send_backend_tool_call_events(messages):\n \"\"\"Send backend tool call events\"\"\"\n tool_call_id = str(uuid.uuid4())\n\n new_message = AssistantMessage(\n id=str(uuid.uuid4()),\n role=\"assistant\",\n tool_calls=[\n ToolCall(\n id=tool_call_id,\n type=\"function\",\n function={\n \"name\": \"lookup_weather\",\n \"arguments\": json.dumps({\"city\": \"San Francisco\", \"weather\": \"sunny\"})\n }\n )\n ]\n )\n\n result_message = ToolMessage(\n id=str(uuid.uuid4()),\n role=\"tool\",\n content=\"The weather in San Francisco is sunny.\",\n tool_call_id=tool_call_id\n )\n\n all_messages = list(messages) + [new_message, result_message]\n\n # Send messages snapshot event\n yield MessagesSnapshotEvent(\n type=EventType.MESSAGES_SNAPSHOT,\n messages=all_messages\n )\n", - "language": "python", - "type": "file" - } - ], - "server-starter-all-features::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "human_in_the_loop.py", - "content": "\"\"\"\nHuman in the loop endpoint for the AG-UI protocol.\n\"\"\"\n\nimport uuid\nimport asyncio\nimport json\nfrom fastapi import Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n TextMessageStartEvent,\n TextMessageContentEvent,\n TextMessageEndEvent,\n ToolCallStartEvent,\n ToolCallArgsEvent,\n ToolCallEndEvent\n)\nfrom ag_ui.encoder import EventEncoder\n\nasync def human_in_the_loop_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Human in the loop endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n # Get the last message for conditional logic\n last_message = None\n if input_data.messages and len(input_data.messages) > 0:\n last_message = input_data.messages[-1]\n\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n # Conditional logic based on last message role\n if last_message and getattr(last_message, 'role', None) == \"tool\":\n async for event in send_text_message_events():\n yield encoder.encode(event)\n else:\n async for event in send_tool_call_events():\n yield encoder.encode(event)\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n\n\nasync def send_tool_call_events():\n \"\"\"Send tool call events that generate task steps incrementally\"\"\"\n tool_call_id = str(uuid.uuid4())\n tool_call_name = \"generate_task_steps\"\n\n # Tool call start\n yield ToolCallStartEvent(\n type=EventType.TOOL_CALL_START,\n tool_call_id=tool_call_id,\n tool_call_name=tool_call_name\n )\n\n # Start building JSON - opening structure\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta='{\"steps\":['\n )\n\n # Generate 10 steps incrementally\n for i in range(10):\n step_data = {\n \"description\": f\"Step {i + 1}\",\n \"status\": \"enabled\"\n }\n \n # Add comma separator except for the last item\n delta = json.dumps(step_data) + (\",\" if i != 9 else \"\")\n \n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta=delta\n )\n \n # Sleep for 200ms\n await asyncio.sleep(0.2)\n\n # Close JSON structure\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta=\"]}\"\n )\n\n # Tool call end\n yield ToolCallEndEvent(\n type=EventType.TOOL_CALL_END,\n tool_call_id=tool_call_id\n )\n\n\nasync def send_text_message_events():\n \"\"\"Send text message events with simple response\"\"\"\n message_id = str(uuid.uuid4())\n\n # Start of message\n yield TextMessageStartEvent(\n type=EventType.TEXT_MESSAGE_START,\n message_id=message_id,\n role=\"assistant\"\n )\n\n # Content\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=\"Ok! I'm working on it.\"\n )\n\n # End of message\n yield TextMessageEndEvent(\n type=EventType.TEXT_MESSAGE_END,\n message_id=message_id\n )\n", - "language": "python", - "type": "file" - } - ], - "server-starter-all-features::agentic_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_generative_ui.py", - "content": "\"\"\"\nAgentic generative UI endpoint for the AG-UI protocol.\n\"\"\"\n\nimport asyncio\nimport copy\nimport jsonpatch\nfrom fastapi import Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n StateSnapshotEvent,\n StateDeltaEvent\n)\nfrom ag_ui.encoder import EventEncoder\n\nasync def agentic_generative_ui_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Agentic generative UI endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n # Send state events\n async for event in send_state_events():\n yield encoder.encode(event)\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n\n\nasync def send_state_events():\n \"\"\"Send state events with snapshots and deltas\"\"\"\n # Initialize state\n state = {\n \"steps\": [\n {\n \"description\": f\"Step {i + 1}\",\n \"status\": \"pending\"\n }\n for i in range(10)\n ]\n }\n\n # Send initial state snapshot\n yield StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot=state\n )\n \n # Sleep for 1 second\n await asyncio.sleep(1.0)\n\n # Create a copy to track changes for JSON patches\n previous_state = copy.deepcopy(state)\n\n # Update each step and send deltas\n for i, step in enumerate(state[\"steps\"]):\n step[\"status\"] = \"completed\"\n \n # Generate JSON patch from previous state to current state\n patch = jsonpatch.make_patch(previous_state, state)\n \n # Send state delta event\n yield StateDeltaEvent(\n type=EventType.STATE_DELTA,\n delta=patch.patch\n )\n \n # Update previous state for next iteration\n previous_state = copy.deepcopy(state)\n \n # Sleep for 1 second\n await asyncio.sleep(1.0)\n\n # Optionally send a final snapshot to the client\n yield StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot=state\n )\n", - "language": "python", - "type": "file" - } - ], - "server-starter-all-features::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n
\n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n
\n
\n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n
\n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "tool_based_generative_ui.py", - "content": "\"\"\"\nTool-based generative UI endpoint for the AG-UI protocol.\n\"\"\"\n\nimport uuid\nimport json\nfrom fastapi import Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n MessagesSnapshotEvent\n)\nfrom ag_ui.encoder import EventEncoder\n\nasync def tool_based_generative_ui_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Tool-based generative UI endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n # Check if last message was a tool result\n last_message = None\n if input_data.messages and len(input_data.messages) > 0:\n last_message = input_data.messages[-1]\n\n result_message = None\n\n # Determine what type of message to send\n if last_message and getattr(last_message, 'content', None) == \"thanks\":\n # Send text message for tool result\n message_id = str(uuid.uuid4())\n new_message = {\n \"id\": message_id,\n \"role\": \"assistant\",\n \"content\": \"Haiku created\"\n }\n else:\n # Send tool call message\n tool_call_id = str(uuid.uuid4())\n message_id = str(uuid.uuid4())\n\n # Prepare haiku arguments\n haiku_args = {\n \"japanese\": [\"エーアイの\", \"橋つなぐ道\", \"コパキット\"],\n \"english\": [\n \"From AI's realm\",\n \"A bridge-road linking us—\",\n \"CopilotKit.\"\n ]\n }\n\n # Create new assistant message with tool call\n new_message = {\n \"id\": message_id,\n \"role\": \"assistant\",\n \"tool_calls\": [\n {\n \"id\": tool_call_id,\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"arguments\": json.dumps(haiku_args)\n }\n }\n ]\n }\n\n result_message = {\n \"id\": str(uuid.uuid4()),\n \"role\": \"tool\",\n \"tool_call_id\": tool_call_id,\n \"content\": \"Haiku created\"\n }\n\n # Create messages list with input messages plus the new message\n all_messages = list(input_data.messages) + [new_message]\n\n if result_message:\n all_messages.append(result_message)\n\n # Send messages snapshot event\n yield encoder.encode(\n MessagesSnapshotEvent(\n type=EventType.MESSAGES_SNAPSHOT,\n messages=all_messages\n ),\n )\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n", - "language": "python", - "type": "file" - } - ], - "server-starter-all-features::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "shared_state.py", - "content": "\"\"\"\nShared state endpoint for the AG-UI protocol.\n\"\"\"\n\nfrom fastapi import Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n StateSnapshotEvent\n)\nfrom ag_ui.encoder import EventEncoder\n\nasync def shared_state_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Shared state endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n # Send state events\n async for event in send_state_events():\n yield encoder.encode(event)\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n\n\nasync def send_state_events():\n \"\"\"Send state events with recipe data\"\"\"\n # Define the recipe state\n state = {\n \"recipe\": {\n \"skill_level\": \"Advanced\",\n \"special_preferences\": [\"Low Carb\", \"Spicy\"],\n \"cooking_time\": \"15 min\",\n \"ingredients\": [\n {\n \"icon\": \"🍗\",\n \"name\": \"chicken breast\",\n \"amount\": \"1\",\n },\n {\n \"icon\": \"🌶️\",\n \"name\": \"chili powder\",\n \"amount\": \"1 tsp\",\n },\n {\n \"icon\": \"🧂\",\n \"name\": \"Salt\",\n \"amount\": \"a pinch\",\n },\n {\n \"icon\": \"🥬\",\n \"name\": \"Lettuce leaves\",\n \"amount\": \"handful\",\n },\n ],\n \"instructions\": [\n \"Season chicken with chili powder and salt.\",\n \"Sear until fully cooked.\",\n \"Slice and wrap in lettuce.\",\n ]\n }\n }\n\n # Send state snapshot event\n yield StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot=state\n )\n", - "language": "python", - "type": "file" - } - ], - "server-starter-all-features::predictive_state_updates": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Basic editor styles */\n.tiptap-container {\n height: 100vh; /* Full viewport height */\n width: 100vw; /* Full viewport width */\n display: flex;\n flex-direction: column;\n}\n\n.tiptap {\n flex: 1; /* Take up remaining space */\n overflow: auto; /* Allow scrolling if content overflows */\n}\n\n.tiptap :first-child {\n margin-top: 0;\n}\n\n/* List styles */\n.tiptap ul,\n.tiptap ol {\n padding: 0 1rem;\n margin: 1.25rem 1rem 1.25rem 0.4rem;\n}\n\n.tiptap ul li p,\n.tiptap ol li p {\n margin-top: 0.25em;\n margin-bottom: 0.25em;\n}\n\n/* Heading styles */\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n line-height: 1.1;\n margin-top: 2.5rem;\n text-wrap: pretty;\n font-weight: bold;\n}\n\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n margin-top: 3.5rem;\n margin-bottom: 1.5rem;\n}\n\n.tiptap p {\n margin-bottom: 1rem;\n}\n\n.tiptap h1 {\n font-size: 1.4rem;\n}\n\n.tiptap h2 {\n font-size: 1.2rem;\n}\n\n.tiptap h3 {\n font-size: 1.1rem;\n}\n\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n font-size: 1rem;\n}\n\n/* Code and preformatted text styles */\n.tiptap code {\n background-color: var(--purple-light);\n border-radius: 0.4rem;\n color: var(--black);\n font-size: 0.85rem;\n padding: 0.25em 0.3em;\n}\n\n.tiptap pre {\n background: var(--black);\n border-radius: 0.5rem;\n color: var(--white);\n font-family: \"JetBrainsMono\", monospace;\n margin: 1.5rem 0;\n padding: 0.75rem 1rem;\n}\n\n.tiptap pre code {\n background: none;\n color: inherit;\n font-size: 0.8rem;\n padding: 0;\n}\n\n.tiptap blockquote {\n border-left: 3px solid var(--gray-3);\n margin: 1.5rem 0;\n padding-left: 1rem;\n}\n\n.tiptap hr {\n border: none;\n border-top: 1px solid var(--gray-2);\n margin: 2rem 0;\n}\n\n.tiptap s {\n background-color: #f9818150;\n padding: 2px;\n font-weight: bold;\n color: rgba(0, 0, 0, 0.7);\n}\n\n.tiptap em {\n background-color: #b2f2bb;\n padding: 2px;\n font-weight: bold;\n font-style: normal;\n}\n\n.copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 📝 Predictive State Updates Document Editor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **predictive state updates** for real-time\ndocument collaboration:\n\n1. **Live Document Editing**: Watch as your Copilot makes changes to a document\n in real-time\n2. **Diff Visualization**: See exactly what's being changed as it happens\n3. **Streaming Updates**: Changes are displayed character-by-character as the\n Copilot works\n\n## How to Interact\n\nTry these interactions with the collaborative document editor:\n\n- \"Fix the grammar and typos in this document\"\n- \"Make this text more professional\"\n- \"Add a section about [topic]\"\n- \"Summarize this content in bullet points\"\n- \"Change the tone to be more casual\"\n\nWatch as the Copilot processes your request and edits the document in real-time\nright before your eyes.\n\n## ✨ Predictive State Updates in Action\n\n**What's happening technically:**\n\n- The document state is shared between your UI and the Copilot\n- As the Copilot generates content, changes are streamed to the UI\n- Each modification is visualized with additions and deletions\n- The UI renders these changes progressively, without waiting for completion\n- All edits are tracked and displayed in a visually intuitive way\n\n**What you'll see in this demo:**\n\n- Text changes are highlighted in different colors (green for additions, red for\n deletions)\n- The document updates character-by-character, creating a typing-like effect\n- You can see the Copilot's thought process as it refines the content\n- The final document seamlessly incorporates all changes\n- The experience feels collaborative, as if someone is editing alongside you\n\nThis pattern of real-time collaborative editing with diff visualization is\nperfect for document editors, code review tools, content creation platforms, or\nany application where users benefit from seeing exactly how content is being\ntransformed!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "predictive_state_updates.py", - "content": "\"\"\"\nPredictive state updates endpoint for the AG-UI protocol.\n\"\"\"\n\nimport uuid\nimport asyncio\nimport random\nfrom fastapi import Request\nfrom fastapi.responses import StreamingResponse\nfrom ag_ui.core import (\n RunAgentInput,\n EventType,\n RunStartedEvent,\n RunFinishedEvent,\n TextMessageStartEvent,\n TextMessageContentEvent,\n TextMessageEndEvent,\n ToolCallStartEvent,\n ToolCallArgsEvent,\n ToolCallEndEvent,\n CustomEvent\n)\nfrom ag_ui.encoder import EventEncoder\n\nasync def predictive_state_updates_endpoint(input_data: RunAgentInput, request: Request):\n \"\"\"Predictive state updates endpoint\"\"\"\n # Get the accept header from the request\n accept_header = request.headers.get(\"accept\")\n\n # Create an event encoder to properly format SSE events\n encoder = EventEncoder(accept=accept_header)\n\n async def event_generator():\n # Get the last message for conditional logic\n last_message = None\n if input_data.messages and len(input_data.messages) > 0:\n last_message = input_data.messages[-1]\n\n # Send run started event\n yield encoder.encode(\n RunStartedEvent(\n type=EventType.RUN_STARTED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n # Conditional logic based on last message role\n if last_message and getattr(last_message, 'role', None) == \"tool\":\n async for event in send_text_message_events():\n yield encoder.encode(event)\n else:\n async for event in send_tool_call_events():\n yield encoder.encode(event)\n\n # Send run finished event\n yield encoder.encode(\n RunFinishedEvent(\n type=EventType.RUN_FINISHED,\n thread_id=input_data.thread_id,\n run_id=input_data.run_id\n ),\n )\n\n return StreamingResponse(\n event_generator(),\n media_type=encoder.get_content_type()\n )\n\n\ndef make_story(name: str) -> str:\n \"\"\"Generate a simple dog story\"\"\"\n return f\"Once upon a time, there was a dog named {name}. {name} was a very good dog.\"\n\n\n# List of dog names for random selection\ndog_names = [\"Rex\", \"Buddy\", \"Max\", \"Charlie\", \"Buddy\", \"Max\", \"Charlie\"]\n\n\nasync def send_tool_call_events():\n \"\"\"Send tool call events with predictive state and incremental story generation\"\"\"\n tool_call_id = str(uuid.uuid4())\n tool_call_name = \"write_document_local\"\n\n # Generate a random story\n story = make_story(random.choice(dog_names))\n story_chunks = story.split(\" \")\n\n # Send custom predict state event first\n yield CustomEvent(\n type=EventType.CUSTOM,\n name=\"PredictState\",\n value=[\n {\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }\n ]\n )\n\n # First tool call: write_document_local\n yield ToolCallStartEvent(\n type=EventType.TOOL_CALL_START,\n tool_call_id=tool_call_id,\n tool_call_name=tool_call_name\n )\n\n # Start JSON arguments\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta='{\"document\":\"'\n )\n\n # Send story chunks incrementally\n for chunk in story_chunks:\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta=chunk + \" \"\n )\n await asyncio.sleep(0.2) # 200ms delay\n\n # Close JSON arguments\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id,\n delta='\"}'\n )\n\n # End first tool call\n yield ToolCallEndEvent(\n type=EventType.TOOL_CALL_END,\n tool_call_id=tool_call_id\n )\n\n # Second tool call: confirm_changes\n tool_call_id_2 = str(uuid.uuid4())\n tool_call_name_2 = \"confirm_changes\"\n\n yield ToolCallStartEvent(\n type=EventType.TOOL_CALL_START,\n tool_call_id=tool_call_id_2,\n tool_call_name=tool_call_name_2\n )\n\n yield ToolCallArgsEvent(\n type=EventType.TOOL_CALL_ARGS,\n tool_call_id=tool_call_id_2,\n delta=\"{}\"\n )\n\n yield ToolCallEndEvent(\n type=EventType.TOOL_CALL_END,\n tool_call_id=tool_call_id_2\n )\n\n\nasync def send_text_message_events():\n \"\"\"Send simple text message events\"\"\"\n message_id = str(uuid.uuid4())\n\n # Start of message\n yield TextMessageStartEvent(\n type=EventType.TEXT_MESSAGE_START,\n message_id=message_id,\n role=\"assistant\"\n )\n\n # Content\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=\"Ok!\"\n )\n\n # End of message\n yield TextMessageEndEvent(\n type=EventType.TEXT_MESSAGE_END,\n message_id=message_id\n )\n", - "language": "python", - "type": "file" - } - ], - "vercel-ai-sdk::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "index.ts", - "content": "import {\n AgentConfig,\n AbstractAgent,\n EventType,\n BaseEvent,\n Message,\n AssistantMessage,\n RunAgentInput,\n MessagesSnapshotEvent,\n RunFinishedEvent,\n RunStartedEvent,\n TextMessageChunkEvent,\n ToolCallArgsEvent,\n ToolCallEndEvent,\n ToolCallStartEvent,\n ToolCall,\n ToolMessage,\n} from \"@ag-ui/client\";\nimport { Observable } from \"rxjs\";\nimport {\n CoreMessage,\n LanguageModelV1,\n processDataStream,\n streamText,\n tool as createVercelAISDKTool,\n ToolChoice,\n ToolSet,\n} from \"ai\";\nimport { randomUUID } from \"crypto\";\nimport { z } from \"zod\";\n\ntype ProcessedEvent =\n | MessagesSnapshotEvent\n | RunFinishedEvent\n | RunStartedEvent\n | TextMessageChunkEvent\n | ToolCallArgsEvent\n | ToolCallEndEvent\n | ToolCallStartEvent;\n\ninterface VercelAISDKAgentConfig extends AgentConfig {\n model: LanguageModelV1;\n maxSteps?: number;\n toolChoice?: ToolChoice>;\n}\n\nexport class VercelAISDKAgent extends AbstractAgent {\n model: LanguageModelV1;\n maxSteps: number;\n toolChoice: ToolChoice>;\n constructor({ model, maxSteps, toolChoice, ...rest }: VercelAISDKAgentConfig) {\n super({ ...rest });\n this.model = model;\n this.maxSteps = maxSteps ?? 1;\n this.toolChoice = toolChoice ?? \"auto\";\n }\n\n protected run(input: RunAgentInput): Observable {\n const finalMessages: Message[] = input.messages;\n\n return new Observable((subscriber) => {\n subscriber.next({\n type: EventType.RUN_STARTED,\n threadId: input.threadId,\n runId: input.runId,\n } as RunStartedEvent);\n\n const response = streamText({\n model: this.model,\n messages: convertMessagesToVercelAISDKMessages(input.messages),\n tools: convertToolToVerlAISDKTools(input.tools),\n maxSteps: this.maxSteps,\n toolChoice: this.toolChoice,\n });\n\n let messageId = randomUUID();\n let assistantMessage: AssistantMessage = {\n id: messageId,\n role: \"assistant\",\n content: \"\",\n toolCalls: [],\n };\n finalMessages.push(assistantMessage);\n\n processDataStream({\n stream: response.toDataStreamResponse().body!,\n onTextPart: (text) => {\n assistantMessage.content += text;\n const event: TextMessageChunkEvent = {\n type: EventType.TEXT_MESSAGE_CHUNK,\n role: \"assistant\",\n messageId,\n delta: text,\n };\n subscriber.next(event);\n },\n onFinishMessagePart: () => {\n // Emit message snapshot\n const event: MessagesSnapshotEvent = {\n type: EventType.MESSAGES_SNAPSHOT,\n messages: finalMessages,\n };\n subscriber.next(event);\n\n // Emit run finished event\n subscriber.next({\n type: EventType.RUN_FINISHED,\n threadId: input.threadId,\n runId: input.runId,\n } as RunFinishedEvent);\n\n // Complete the observable\n subscriber.complete();\n },\n onToolCallPart(streamPart) {\n let toolCall: ToolCall = {\n id: streamPart.toolCallId,\n type: \"function\",\n function: {\n name: streamPart.toolName,\n arguments: JSON.stringify(streamPart.args),\n },\n };\n assistantMessage.toolCalls!.push(toolCall);\n\n const startEvent: ToolCallStartEvent = {\n type: EventType.TOOL_CALL_START,\n parentMessageId: messageId,\n toolCallId: streamPart.toolCallId,\n toolCallName: streamPart.toolName,\n };\n subscriber.next(startEvent);\n\n const argsEvent: ToolCallArgsEvent = {\n type: EventType.TOOL_CALL_ARGS,\n toolCallId: streamPart.toolCallId,\n delta: JSON.stringify(streamPart.args),\n };\n subscriber.next(argsEvent);\n\n const endEvent: ToolCallEndEvent = {\n type: EventType.TOOL_CALL_END,\n toolCallId: streamPart.toolCallId,\n };\n subscriber.next(endEvent);\n },\n onToolResultPart(streamPart) {\n const toolMessage: ToolMessage = {\n role: \"tool\",\n id: randomUUID(),\n toolCallId: streamPart.toolCallId,\n content: JSON.stringify(streamPart.result),\n };\n finalMessages.push(toolMessage);\n },\n onErrorPart(streamPart) {\n subscriber.error(streamPart);\n },\n }).catch((error) => {\n console.error(\"catch error\", error);\n // Handle error\n subscriber.error(error);\n });\n\n return () => {};\n });\n }\n}\n\nexport function convertMessagesToVercelAISDKMessages(messages: Message[]): CoreMessage[] {\n const result: CoreMessage[] = [];\n\n for (const message of messages) {\n if (message.role === \"assistant\") {\n const parts: any[] = message.content ? [{ type: \"text\", text: message.content }] : [];\n for (const toolCall of message.toolCalls ?? []) {\n parts.push({\n type: \"tool-call\",\n toolCallId: toolCall.id,\n toolName: toolCall.function.name,\n args: JSON.parse(toolCall.function.arguments),\n });\n }\n result.push({\n role: \"assistant\",\n content: parts,\n });\n } else if (message.role === \"user\") {\n result.push({\n role: \"user\",\n content: message.content || \"\",\n });\n } else if (message.role === \"tool\") {\n let toolName = \"unknown\";\n for (const msg of messages) {\n if (msg.role === \"assistant\") {\n for (const toolCall of msg.toolCalls ?? []) {\n if (toolCall.id === message.toolCallId) {\n toolName = toolCall.function.name;\n break;\n }\n }\n }\n }\n result.push({\n role: \"tool\",\n content: [\n {\n type: \"tool-result\",\n toolCallId: message.toolCallId,\n toolName: toolName,\n result: message.content,\n },\n ],\n });\n }\n }\n\n return result;\n}\n\nexport function convertJsonSchemaToZodSchema(jsonSchema: any, required: boolean): z.ZodSchema {\n if (jsonSchema.type === \"object\") {\n const spec: { [key: string]: z.ZodSchema } = {};\n\n if (!jsonSchema.properties || !Object.keys(jsonSchema.properties).length) {\n return !required ? z.object(spec).optional() : z.object(spec);\n }\n\n for (const [key, value] of Object.entries(jsonSchema.properties)) {\n spec[key] = convertJsonSchemaToZodSchema(\n value,\n jsonSchema.required ? jsonSchema.required.includes(key) : false,\n );\n }\n let schema = z.object(spec).describe(jsonSchema.description);\n return required ? schema : schema.optional();\n } else if (jsonSchema.type === \"string\") {\n let schema = z.string().describe(jsonSchema.description);\n return required ? schema : schema.optional();\n } else if (jsonSchema.type === \"number\") {\n let schema = z.number().describe(jsonSchema.description);\n return required ? schema : schema.optional();\n } else if (jsonSchema.type === \"boolean\") {\n let schema = z.boolean().describe(jsonSchema.description);\n return required ? schema : schema.optional();\n } else if (jsonSchema.type === \"array\") {\n let itemSchema = convertJsonSchemaToZodSchema(jsonSchema.items, true);\n let schema = z.array(itemSchema).describe(jsonSchema.description);\n return required ? schema : schema.optional();\n }\n throw new Error(\"Invalid JSON schema\");\n}\n\nexport function convertToolToVerlAISDKTools(tools: RunAgentInput[\"tools\"]): ToolSet {\n return tools.reduce(\n (acc: ToolSet, tool: RunAgentInput[\"tools\"][number]) => ({\n ...acc,\n [tool.name]: createVercelAISDKTool({\n description: tool.description,\n parameters: convertJsonSchemaToZodSchema(tool.parameters, true),\n }),\n }),\n {},\n );\n}\n", - "language": "ts", - "type": "file" - } - ], - "langgraph::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph instead of CrewAI.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n *\n * For more about the ReAct design pattern, see: \n * https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n */\n \n // 1. Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n // your_tool_here\n ],\n {\n // 2.1 Disable parallel tool calls to avoid race conditions,\n // enable this for faster performance if you want to manage\n // the complexity of running tool calls in parallel.\n parallel_tool_calls: false,\n }\n );\n\n // 3. Define the system message by which the chat model will be run\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 6. We've handled all tool calls, so we can end the graph.\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n })\n}\n\n// Define a new graph \nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chat_node\", chatNode)\n .addEdge(START, \"chat_node\");\n\n// Compile the graph\nexport const agenticChatGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph::agentic_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph::predictive_state_updates": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Basic editor styles */\n.tiptap-container {\n height: 100vh; /* Full viewport height */\n width: 100vw; /* Full viewport width */\n display: flex;\n flex-direction: column;\n}\n\n.tiptap {\n flex: 1; /* Take up remaining space */\n overflow: auto; /* Allow scrolling if content overflows */\n}\n\n.tiptap :first-child {\n margin-top: 0;\n}\n\n/* List styles */\n.tiptap ul,\n.tiptap ol {\n padding: 0 1rem;\n margin: 1.25rem 1rem 1.25rem 0.4rem;\n}\n\n.tiptap ul li p,\n.tiptap ol li p {\n margin-top: 0.25em;\n margin-bottom: 0.25em;\n}\n\n/* Heading styles */\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n line-height: 1.1;\n margin-top: 2.5rem;\n text-wrap: pretty;\n font-weight: bold;\n}\n\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n margin-top: 3.5rem;\n margin-bottom: 1.5rem;\n}\n\n.tiptap p {\n margin-bottom: 1rem;\n}\n\n.tiptap h1 {\n font-size: 1.4rem;\n}\n\n.tiptap h2 {\n font-size: 1.2rem;\n}\n\n.tiptap h3 {\n font-size: 1.1rem;\n}\n\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n font-size: 1rem;\n}\n\n/* Code and preformatted text styles */\n.tiptap code {\n background-color: var(--purple-light);\n border-radius: 0.4rem;\n color: var(--black);\n font-size: 0.85rem;\n padding: 0.25em 0.3em;\n}\n\n.tiptap pre {\n background: var(--black);\n border-radius: 0.5rem;\n color: var(--white);\n font-family: \"JetBrainsMono\", monospace;\n margin: 1.5rem 0;\n padding: 0.75rem 1rem;\n}\n\n.tiptap pre code {\n background: none;\n color: inherit;\n font-size: 0.8rem;\n padding: 0;\n}\n\n.tiptap blockquote {\n border-left: 3px solid var(--gray-3);\n margin: 1.5rem 0;\n padding-left: 1rem;\n}\n\n.tiptap hr {\n border: none;\n border-top: 1px solid var(--gray-2);\n margin: 2rem 0;\n}\n\n.tiptap s {\n background-color: #f9818150;\n padding: 2px;\n font-weight: bold;\n color: rgba(0, 0, 0, 0.7);\n}\n\n.tiptap em {\n background-color: #b2f2bb;\n padding: 2px;\n font-weight: bold;\n font-style: normal;\n}\n\n.copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 📝 Predictive State Updates Document Editor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **predictive state updates** for real-time\ndocument collaboration:\n\n1. **Live Document Editing**: Watch as your Copilot makes changes to a document\n in real-time\n2. **Diff Visualization**: See exactly what's being changed as it happens\n3. **Streaming Updates**: Changes are displayed character-by-character as the\n Copilot works\n\n## How to Interact\n\nTry these interactions with the collaborative document editor:\n\n- \"Fix the grammar and typos in this document\"\n- \"Make this text more professional\"\n- \"Add a section about [topic]\"\n- \"Summarize this content in bullet points\"\n- \"Change the tone to be more casual\"\n\nWatch as the Copilot processes your request and edits the document in real-time\nright before your eyes.\n\n## ✨ Predictive State Updates in Action\n\n**What's happening technically:**\n\n- The document state is shared between your UI and the Copilot\n- As the Copilot generates content, changes are streamed to the UI\n- Each modification is visualized with additions and deletions\n- The UI renders these changes progressively, without waiting for completion\n- All edits are tracked and displayed in a visually intuitive way\n\n**What you'll see in this demo:**\n\n- Text changes are highlighted in different colors (green for additions, red for\n deletions)\n- The document updates character-by-character, creating a typing-like effect\n- You can see the Copilot's thought process as it refines the content\n- The final document seamlessly incorporates all changes\n- The experience feels collaborative, as if someone is editing alongside you\n\nThis pattern of real-time collaborative editing with diff visualization is\nperfect for document editors, code review tools, content creation platforms, or\nany application where users benefit from seeing exactly how content is being\ntransformed!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n return new Command({\n goto: \"chat_node\"\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like 🥕) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"🍴\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n const modelWithTools = model.bindTools(\n [\n ...state.tools || []\n ],\n { parallel_tool_calls: false }\n );\n\n const systemMessage = new SystemMessage({\n content: 'Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.'\n });\n\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\nconst workflow = new StateGraph(AgentStateAnnotation);\nworkflow.addNode(\"chat_node\", chatNode);\n\nworkflow.addEdge(START, \"chat_node\");\n\nexport const toolBasedGenerativeUiGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph::agentic_chat_reasoning": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Reasoning\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", - "language": "python", - "type": "file" - }, - {} - ], - "langgraph::subgraphs": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = 'flights' | 'hotels' | 'experiences' | 'supervisor'\n\ninterface TravelAgentState {\n experiences: Experience[],\n flights: Flight[],\n hotels: Hotel[],\n itinerary: Itinerary\n planning_step: string\n active_agent: AvailableAgents\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: 'supervisor'\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends 'flights' ? Flight[] : TAgent extends 'hotels' ? Hotel[] : never,\n recommendation: TAgent extends 'flights' ? Flight : TAgent extends 'hotels' ? Hotel : never,\n agent: TAgent\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case 'flights': return 'Flights Agent';\n case 'hotels': return 'Hotels Agent';\n case 'experiences': return 'Experiences Agent';\n default: return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

{formatAgentName(agent)}: {message}

\n\n
\n {options.map((opt, idx) => {\n if ('airline' in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
\n {opt.duration}\n
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
\n 📍 {opt.location}\n
\n
\n {opt.price_per_night}\n
\n \n );\n })}\n
\n
\n )\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight);\n\n const chatTitle = 'Travel Planning Assistant';\n const chatDescription = 'Plan your perfect trip with AI specialists';\n const initialLabel = 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n }\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n {selectedFlight.airline} - {selectedFlight.price}\n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = 'supervisor';\n if (nodeName?.includes('flights_agent')) {\n activeAgent = 'flights';\n }\n if (nodeName?.includes('hotels_agent')) {\n activeAgent = 'hotels';\n }\n if (nodeName?.includes('experiences_agent')) {\n activeAgent = 'experiences';\n }\n return (\n
\n
Active Agent:
\n
\n
\n 👨‍💼\n Supervisor\n
\n
\n ✈️\n Flights\n
\n
\n 🏨\n Hotels\n
\n
\n 🎯\n Experiences\n
\n
\n
\n )\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} - {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} - {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Travel Planning Subgraphs Demo Styles */\n/* Essential styles that cannot be achieved with Tailwind classes */\n\n/* Main container with CopilotSidebar layout */\n.travel-planner-container {\n min-height: 100vh;\n padding: 2rem;\n background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);\n}\n\n/* Travel content area styles */\n.travel-content {\n max-width: 1200px;\n margin: 0 auto;\n padding: 0 1rem;\n display: flex;\n flex-direction: column;\n gap: 1rem;\n}\n\n/* Itinerary strip */\n.itinerary-strip {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n}\n\n.itinerary-label {\n font-size: 0.875rem;\n font-weight: 600;\n color: #6b7280;\n margin-bottom: 0.5rem;\n}\n\n.itinerary-items {\n display: flex;\n flex-wrap: wrap;\n gap: 1rem;\n}\n\n.itinerary-item {\n display: flex;\n align-items: center;\n gap: 0.5rem;\n padding: 0.5rem 0.75rem;\n background: #f9fafb;\n border-radius: 0.375rem;\n font-size: 0.875rem;\n}\n\n.item-icon {\n font-size: 1rem;\n}\n\n/* Agent status */\n.agent-status {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n}\n\n.status-label {\n font-size: 0.875rem;\n font-weight: 600;\n color: #6b7280;\n margin-bottom: 0.5rem;\n}\n\n.agent-indicators {\n display: flex;\n gap: 0.75rem;\n}\n\n.agent-indicator {\n display: flex;\n align-items: center;\n gap: 0.5rem;\n padding: 0.5rem 0.75rem;\n border-radius: 0.375rem;\n font-size: 0.875rem;\n background: #f9fafb;\n border: 1px solid #e5e7eb;\n transition: all 0.2s ease;\n}\n\n.agent-indicator.active {\n background: #dbeafe;\n border-color: #3b82f6;\n color: #1d4ed8;\n box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.1);\n}\n\n/* Travel details sections */\n.travel-details {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n display: grid;\n gap: 1rem;\n}\n\n.details-section h4 {\n font-size: 1rem;\n font-weight: 600;\n color: #1f2937;\n margin-bottom: 0.5rem;\n display: flex;\n align-items: center;\n gap: 0.5rem;\n}\n\n.detail-items {\n display: flex;\n flex-direction: column;\n gap: 0.5rem;\n}\n\n.detail-item {\n padding: 0.5rem;\n background: #f9fafb;\n border-radius: 0.25rem;\n font-size: 0.875rem;\n display: flex;\n justify-content: space-between;\n}\n\n.detail-item strong {\n color: #6b7280;\n font-weight: 500;\n}\n\n.detail-tips {\n padding: 0.5rem;\n background: #eff6ff;\n border-radius: 0.25rem;\n font-size: 0.75rem;\n color: #1d4ed8;\n}\n\n.activity-item {\n padding: 0.75rem;\n background: #f0f9ff;\n border-radius: 0.25rem;\n border-left: 2px solid #0ea5e9;\n}\n\n.activity-name {\n font-weight: 600;\n color: #1f2937;\n font-size: 0.875rem;\n margin-bottom: 0.25rem;\n}\n\n.activity-category {\n font-size: 0.75rem;\n color: #0ea5e9;\n margin-bottom: 0.25rem;\n}\n\n.activity-description {\n color: #4b5563;\n font-size: 0.75rem;\n margin-bottom: 0.25rem;\n}\n\n.activity-meta {\n font-size: 0.75rem;\n color: #6b7280;\n}\n\n.no-activities {\n text-align: center;\n color: #9ca3af;\n font-style: italic;\n padding: 1rem;\n font-size: 0.875rem;\n}\n\n/* Interrupt UI for Chat Sidebar (Generative UI) */\n.interrupt-container {\n display: flex;\n flex-direction: column;\n gap: 1rem;\n max-width: 100%;\n padding-top: 34px;\n}\n\n.interrupt-header {\n margin-bottom: 0.5rem;\n}\n\n.agent-name {\n font-size: 0.875rem;\n font-weight: 600;\n color: #1f2937;\n margin: 0 0 0.25rem 0;\n}\n\n.agent-message {\n font-size: 0.75rem;\n color: #6b7280;\n margin: 0;\n line-height: 1.4;\n}\n\n.interrupt-options {\n padding: 0.75rem;\n display: flex;\n flex-direction: column;\n gap: 0.5rem;\n max-height: 300px;\n overflow-y: auto;\n}\n\n.option-card {\n display: flex;\n flex-direction: column;\n gap: 0.25rem;\n padding: 0.75rem;\n background: #f9fafb;\n border: 1px solid #e5e7eb;\n border-radius: 0.5rem;\n cursor: pointer;\n transition: all 0.2s ease;\n text-align: left;\n position: relative;\n min-height: auto;\n}\n\n.option-card:hover {\n background: #f3f4f6;\n border-color: #d1d5db;\n}\n\n.option-card:active {\n background: #e5e7eb;\n}\n\n.option-card.recommended {\n background: #eff6ff;\n border-color: #3b82f6;\n box-shadow: 0 0 0 1px rgba(59, 130, 246, 0.1);\n}\n\n.option-card.recommended:hover {\n background: #dbeafe;\n}\n\n.recommendation-badge {\n position: absolute;\n top: -2px;\n right: -2px;\n background: #3b82f6;\n color: white;\n font-size: 0.625rem;\n padding: 0.125rem 0.375rem;\n border-radius: 0.75rem;\n font-weight: 500;\n}\n\n.option-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 0.125rem;\n}\n\n.airline-name, .hotel-name {\n font-weight: 600;\n font-size: 0.8rem;\n color: #1f2937;\n}\n\n.price, .rating {\n font-weight: 600;\n font-size: 0.75rem;\n color: #059669;\n}\n\n.route-info, .location-info {\n font-size: 0.7rem;\n color: #6b7280;\n margin-bottom: 0.125rem;\n}\n\n.duration-info, .price-info {\n font-size: 0.7rem;\n color: #9ca3af;\n}\n\n/* Mobile responsive adjustments */\n@media (max-width: 768px) {\n .travel-planner-container {\n padding: 0.5rem;\n padding-bottom: 120px; /* Space for mobile chat */\n }\n \n .travel-content {\n padding: 0;\n gap: 0.75rem;\n }\n \n .itinerary-items {\n flex-direction: column;\n gap: 0.5rem;\n }\n \n .agent-indicators {\n flex-direction: column;\n gap: 0.5rem;\n }\n \n .agent-indicator {\n padding: 0.75rem;\n }\n \n .travel-details {\n padding: 0.75rem;\n }\n\n .interrupt-container {\n padding: 0.5rem;\n }\n\n .option-card {\n padding: 0.625rem;\n }\n\n .interrupt-options {\n max-height: 250px;\n }\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# LangGraph Subgraphs Demo: Travel Planning Assistant ✈️\n\nThis demo showcases **LangGraph subgraphs** through an interactive travel planning assistant. Watch as specialized AI agents collaborate to plan your perfect trip!\n\n## What are LangGraph Subgraphs? 🤖\n\n**Subgraphs** are the key to building modular, scalable AI systems in LangGraph. A subgraph is essentially \"a graph that is used as a node in another graph\" - enabling powerful encapsulation and reusability.\nFor more info, check out the [LangGraph docs](https://langchain-ai.github.io/langgraph/concepts/subgraphs/).\n\n### Key Concepts\n\n- **Encapsulation**: Each subgraph handles a specific domain with its own expertise\n- **Modularity**: Subgraphs can be developed, tested, and maintained independently \n- **Reusability**: The same subgraph can be used across multiple parent graphs\n- **State Communication**: Subgraphs can share state or use different schemas with transformations\n\n## Demo Architecture 🗺️\n\nThis travel planner demonstrates **supervisor-coordinated subgraphs** with **human-in-the-loop** decision making:\n\n### Parent Graph: Travel Supervisor\n- **Role**: Coordinates the travel planning process and routes to specialized agents\n- **State Management**: Maintains a shared itinerary object across all subgraphs\n- **Intelligence**: Determines what's needed and when each agent should be called\n\n### Subgraph 1: ✈️ Flights Agent\n- **Specialization**: Finding and booking flight options\n- **Process**: Presents flight options from Amsterdam to San Francisco with recommendations\n- **Interaction**: Uses interrupts to let users choose their preferred flight\n- **Data**: Static flight options (KLM, United) with pricing and duration\n\n### Subgraph 2: 🏨 Hotels Agent \n- **Specialization**: Finding and booking accommodation\n- **Process**: Shows hotel options in San Francisco with different price points\n- **Interaction**: Uses interrupts for user to select their preferred hotel\n- **Data**: Static hotel options (Hotel Zephyr, Ritz-Carlton, Hotel Zoe)\n\n### Subgraph 3: 🎯 Experiences Agent\n- **Specialization**: Curating restaurants and activities\n- **Process**: AI-powered recommendations based on selected flights and hotels\n- **Features**: Combines 2 restaurants and 2 activities with location-aware suggestions\n- **Data**: Static experiences (Pier 39, Golden Gate Bridge, Swan Oyster Depot, Tartine Bakery)\n\n## How It Works 🔄\n\n1. **User Request**: \"Help me plan a trip to San Francisco\"\n2. **Supervisor Analysis**: Determines what travel components are needed\n3. **Sequential Routing**: Routes to each agent in logical order:\n - First: Flights Agent (get transportation sorted)\n - Then: Hotels Agent (book accommodation) \n - Finally: Experiences Agent (plan activities)\n4. **Human Decisions**: Each agent presents options and waits for user choice via interrupts\n5. **State Building**: Selected choices are stored in the shared itinerary object\n6. **Completion**: All agents report back to supervisor for final coordination\n\n## State Communication Patterns 📊\n\n### Shared State Schema\nAll subgraph agents share and contribute to a common state object. When any agent updates the shared state, these changes are immediately reflected in the frontend through real-time syncing. This ensures that:\n\n- **Flight selections** from the Flights Agent are visible to subsequent agents\n- **Hotel choices** influence the Experiences Agent's recommendations \n- **All updates** are synchronized with the frontend UI in real-time\n- **State persistence** maintains the travel itinerary throughout the workflow\n\n### Human-in-the-Loop Pattern\nTwo of the specialist agents use **interrupts** to pause execution and gather user preferences:\n\n- **Flights Agent**: Presents options → interrupt → waits for selection → continues\n- **Hotels Agent**: Shows hotels → interrupt → waits for choice → continues\n\n## Try These Examples! 💡\n\n### Getting Started\n- \"Help me plan a trip to San Francisco\"\n- \"I want to visit San Francisco from Amsterdam\"\n- \"Plan my travel itinerary\"\n\n### During the Process\nWhen the Flights Agent presents options:\n- Choose between KLM ($650, 11h 30m) or United ($720, 12h 15m)\n\nWhen the Hotels Agent shows accommodations:\n- Select from Hotel Zephyr, The Ritz-Carlton, or Hotel Zoe\n\nThe Experiences Agent will then provide tailored recommendations based on your choices!\n\n## Frontend Capabilities 👁️\n\n- **Human-in-the-loop with interrupts** from subgraphs for user decision making\n- **Subgraphs detection and streaming** to show which agent is currently active\n- **Real-time state updates** as the shared itinerary is built across agents\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = None\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\n * The supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { \n Annotation, \n MessagesAnnotation, \n StateGraph, \n Command, \n START, \n END, \n interrupt \n} from \"@langchain/langgraph\";\n\n// Travel data interfaces\ninterface Flight {\n airline: string;\n departure: string;\n arrival: string;\n price: string;\n duration: string;\n}\n\ninterface Hotel {\n name: string;\n location: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n type: \"restaurant\" | \"activity\";\n description: string;\n location: string;\n}\n\ninterface Itinerary {\n flight?: Flight;\n hotel?: Hotel;\n}\n\n// Custom reducer to merge itinerary updates\nfunction mergeItinerary(left: Itinerary | null, right?: Itinerary | null): Itinerary {\n if (!left) left = {};\n if (!right) right = {};\n return { ...left, ...right };\n}\n\n// State annotation for travel agent system\nexport const TravelAgentStateAnnotation = Annotation.Root({\n origin: Annotation(),\n destination: Annotation(),\n flights: Annotation(),\n hotels: Annotation(),\n experiences: Annotation(),\n\n // Itinerary with custom merger\n itinerary: Annotation({\n reducer: mergeItinerary,\n default: () => null\n }),\n\n // Tools available to all agents\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n\n // Supervisor routing\n next_agent: Annotation(),\n ...MessagesAnnotation.spec,\n});\n\nexport type TravelAgentState = typeof TravelAgentStateAnnotation.State;\n\n// Static data for demonstration\nconst STATIC_FLIGHTS: Flight[] = [\n { airline: \"KLM\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$650\", duration: \"11h 30m\" },\n { airline: \"United\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$720\", duration: \"12h 15m\" }\n];\n\nconst STATIC_HOTELS: Hotel[] = [\n { name: \"Hotel Zephyr\", location: \"Fisherman's Wharf\", price_per_night: \"$280/night\", rating: \"4.2 stars\" },\n { name: \"The Ritz-Carlton\", location: \"Nob Hill\", price_per_night: \"$550/night\", rating: \"4.8 stars\" },\n { name: \"Hotel Zoe\", location: \"Union Square\", price_per_night: \"$320/night\", rating: \"4.4 stars\" }\n];\n\nconst STATIC_EXPERIENCES: Experience[] = [\n { name: \"Pier 39\", type: \"activity\", description: \"Iconic waterfront destination with shops and sea lions\", location: \"Fisherman's Wharf\" },\n { name: \"Golden Gate Bridge\", type: \"activity\", description: \"World-famous suspension bridge with stunning views\", location: \"Golden Gate\" },\n { name: \"Swan Oyster Depot\", type: \"restaurant\", description: \"Historic seafood counter serving fresh oysters\", location: \"Polk Street\" },\n { name: \"Tartine Bakery\", type: \"restaurant\", description: \"Artisanal bakery famous for bread and pastries\", location: \"Mission District\" }\n];\n\nfunction createInterrupt(message: string, options: any[], recommendation: any, agent: string) {\n return interrupt({\n message,\n options,\n recommendation,\n agent,\n });\n}\n\n// Flights finder subgraph\nasync function flightsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate flight search with static data\n const flights = STATIC_FLIGHTS;\n\n const selectedFlight = state.itinerary?.flight;\n \n let flightChoice: Flight;\n const message = `Found ${flights.length} flight options from ${state.origin || 'Amsterdam'} to ${state.destination || 'San Francisco'}.\\n` +\n `I recommend choosing the flight by ${flights[0].airline} since it's known to be on time and cheaper.`\n if (!selectedFlight) {\n const interruptResult = createInterrupt(\n message,\n flights,\n flights[0],\n \"flights\"\n );\n \n // Parse the interrupt result if it's a string\n flightChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n flightChoice = selectedFlight;\n }\n\n return new Command({\n goto: END,\n update: {\n flights: flights,\n itinerary: {\n flight: flightChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Flights Agent: Great. I'll book you the ${flightChoice.airline} flight from ${flightChoice.departure} to ${flightChoice.arrival}.`,\n }),\n ]\n }\n });\n}\n\n// Hotels finder subgraph\nasync function hotelsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate hotel search with static data\n const hotels = STATIC_HOTELS;\n const selectedHotel = state.itinerary?.hotel;\n \n let hotelChoice: Hotel;\n const message = `Found ${hotels.length} accommodation options in ${state.destination || 'San Francisco'}.\\n\n I recommend choosing the ${hotels[2].name} since it strikes the balance between rating, price, and location.`\n if (!selectedHotel) {\n const interruptResult = createInterrupt(\n message,\n hotels,\n hotels[2],\n \"hotels\"\n );\n \n // Parse the interrupt result if it's a string\n hotelChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n hotelChoice = selectedHotel;\n }\n\n return new Command({\n goto: END,\n update: {\n hotels: hotels,\n itinerary: {\n hotel: hotelChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Hotels Agent: Excellent choice! You'll like ${hotelChoice.name}.`\n }),\n ]\n }\n });\n}\n\n// Experiences finder subgraph\nasync function experiencesFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Filter experiences (2 restaurants, 2 activities)\n const restaurants = STATIC_EXPERIENCES.filter(exp => exp.type === \"restaurant\").slice(0, 2);\n const activities = STATIC_EXPERIENCES.filter(exp => exp.type === \"activity\").slice(0, 2);\n const experiences = [...restaurants, ...activities];\n\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n const itinerary = state.itinerary || {};\n\n const systemPrompt = `\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flight chosen: ${JSON.stringify(itinerary.flight) || 'None'}\n - Hotel chosen: ${JSON.stringify(itinerary.hotel) || 'None'}\n - Activities found: ${JSON.stringify(activities)}\n - Restaurants found: ${JSON.stringify(restaurants)}\n `;\n\n // Get experiences response\n const response = await model.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n experiences: experiences,\n messages: [...state.messages, response]\n }\n });\n}\n\n// Supervisor response tool\nconst SUPERVISOR_RESPONSE_TOOL = {\n type: \"function\" as const,\n function: {\n name: \"supervisor_response\",\n description: \"Always use this tool to structure your response to the user.\",\n parameters: {\n type: \"object\",\n properties: {\n answer: {\n type: \"string\",\n description: \"The answer to the user\"\n },\n next_agent: {\n type: \"string\",\n enum: [\"flights_agent\", \"hotels_agent\", \"experiences_agent\", \"complete\"],\n description: \"The agent to go to. Not required if you do not want to route to another agent.\"\n }\n },\n required: [\"answer\"]\n }\n }\n};\n\n// Supervisor agent\nasync function supervisorAgent(state: TravelAgentState, config?: RunnableConfig): Promise {\n const itinerary = state.itinerary || {};\n\n // Check what's already completed\n const hasFlights = itinerary.flight !== undefined;\n const hasHotels = itinerary.hotel !== undefined;\n const hasExperiences = state.experiences !== null;\n\n const systemPrompt = `\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flights found: ${hasFlights}\n - Hotels found: ${hasHotels}\n - Experiences found: ${hasExperiences}\n - Itinerary (Things that the user has already confirmed selection on): ${JSON.stringify(itinerary, null, 2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - complete: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the routing tool\n const modelWithTools = model.bindTools(\n [SUPERVISOR_RESPONSE_TOOL],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Get supervisor decision\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n let messages = [...state.messages, response];\n\n // Handle tool calls for routing\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n const toolCallArgs = toolCall.args;\n const nextAgent = toolCallArgs.next_agent;\n\n const toolResponse = new ToolMessage({\n tool_call_id: toolCall.id!,\n content: `Routing to ${nextAgent} and providing the answer`,\n });\n\n messages = [\n ...messages, \n toolResponse, \n new AIMessage({ content: toolCallArgs.answer })\n ];\n\n if (nextAgent && nextAgent !== \"complete\") {\n return new Command({ goto: nextAgent });\n }\n }\n\n // Fallback if no tool call or complete\n return new Command({\n goto: END,\n update: { messages }\n });\n}\n\n// Create subgraphs\nconst flightsGraph = new StateGraph(TravelAgentStateAnnotation);\nflightsGraph.addNode(\"flights_agent_chat_node\", flightsFinder);\nflightsGraph.setEntryPoint(\"flights_agent_chat_node\");\nflightsGraph.addEdge(START, \"flights_agent_chat_node\");\nflightsGraph.addEdge(\"flights_agent_chat_node\", END);\nconst flightsSubgraph = flightsGraph.compile();\n\nconst hotelsGraph = new StateGraph(TravelAgentStateAnnotation);\nhotelsGraph.addNode(\"hotels_agent_chat_node\", hotelsFinder);\nhotelsGraph.setEntryPoint(\"hotels_agent_chat_node\");\nhotelsGraph.addEdge(START, \"hotels_agent_chat_node\");\nhotelsGraph.addEdge(\"hotels_agent_chat_node\", END);\nconst hotelsSubgraph = hotelsGraph.compile();\n\nconst experiencesGraph = new StateGraph(TravelAgentStateAnnotation);\nexperiencesGraph.addNode(\"experiences_agent_chat_node\", experiencesFinder);\nexperiencesGraph.setEntryPoint(\"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(START, \"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(\"experiences_agent_chat_node\", END);\nconst experiencesSubgraph = experiencesGraph.compile();\n\n// Main supervisor workflow\nconst workflow = new StateGraph(TravelAgentStateAnnotation);\n\n// Add supervisor and subgraphs as nodes\nworkflow.addNode(\"supervisor\", supervisorAgent, { ends: ['flights_agent', 'hotels_agent', 'experiences_agent', END] });\nworkflow.addNode(\"flights_agent\", flightsSubgraph);\nworkflow.addNode(\"hotels_agent\", hotelsSubgraph);\nworkflow.addNode(\"experiences_agent\", experiencesSubgraph);\n\n// Set entry point\nworkflow.setEntryPoint(\"supervisor\");\nworkflow.addEdge(START, \"supervisor\");\n\n// Add edges back to supervisor after each subgraph\nworkflow.addEdge(\"flights_agent\", \"supervisor\");\nworkflow.addEdge(\"hotels_agent\", \"supervisor\");\nworkflow.addEdge(\"experiences_agent\", \"supervisor\");\n\n// Compile the graph\nexport const subGraphsAgentGraph = workflow.compile();\n", - "language": "ts", - "type": "file" - } - ], - "langgraph-fastapi::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::agentic_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::predictive_state_updates": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Basic editor styles */\n.tiptap-container {\n height: 100vh; /* Full viewport height */\n width: 100vw; /* Full viewport width */\n display: flex;\n flex-direction: column;\n}\n\n.tiptap {\n flex: 1; /* Take up remaining space */\n overflow: auto; /* Allow scrolling if content overflows */\n}\n\n.tiptap :first-child {\n margin-top: 0;\n}\n\n/* List styles */\n.tiptap ul,\n.tiptap ol {\n padding: 0 1rem;\n margin: 1.25rem 1rem 1.25rem 0.4rem;\n}\n\n.tiptap ul li p,\n.tiptap ol li p {\n margin-top: 0.25em;\n margin-bottom: 0.25em;\n}\n\n/* Heading styles */\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n line-height: 1.1;\n margin-top: 2.5rem;\n text-wrap: pretty;\n font-weight: bold;\n}\n\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n margin-top: 3.5rem;\n margin-bottom: 1.5rem;\n}\n\n.tiptap p {\n margin-bottom: 1rem;\n}\n\n.tiptap h1 {\n font-size: 1.4rem;\n}\n\n.tiptap h2 {\n font-size: 1.2rem;\n}\n\n.tiptap h3 {\n font-size: 1.1rem;\n}\n\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n font-size: 1rem;\n}\n\n/* Code and preformatted text styles */\n.tiptap code {\n background-color: var(--purple-light);\n border-radius: 0.4rem;\n color: var(--black);\n font-size: 0.85rem;\n padding: 0.25em 0.3em;\n}\n\n.tiptap pre {\n background: var(--black);\n border-radius: 0.5rem;\n color: var(--white);\n font-family: \"JetBrainsMono\", monospace;\n margin: 1.5rem 0;\n padding: 0.75rem 1rem;\n}\n\n.tiptap pre code {\n background: none;\n color: inherit;\n font-size: 0.8rem;\n padding: 0;\n}\n\n.tiptap blockquote {\n border-left: 3px solid var(--gray-3);\n margin: 1.5rem 0;\n padding-left: 1rem;\n}\n\n.tiptap hr {\n border: none;\n border-top: 1px solid var(--gray-2);\n margin: 2rem 0;\n}\n\n.tiptap s {\n background-color: #f9818150;\n padding: 2px;\n font-weight: bold;\n color: rgba(0, 0, 0, 0.7);\n}\n\n.tiptap em {\n background-color: #b2f2bb;\n padding: 2px;\n font-weight: bold;\n font-style: normal;\n}\n\n.copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 📝 Predictive State Updates Document Editor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **predictive state updates** for real-time\ndocument collaboration:\n\n1. **Live Document Editing**: Watch as your Copilot makes changes to a document\n in real-time\n2. **Diff Visualization**: See exactly what's being changed as it happens\n3. **Streaming Updates**: Changes are displayed character-by-character as the\n Copilot works\n\n## How to Interact\n\nTry these interactions with the collaborative document editor:\n\n- \"Fix the grammar and typos in this document\"\n- \"Make this text more professional\"\n- \"Add a section about [topic]\"\n- \"Summarize this content in bullet points\"\n- \"Change the tone to be more casual\"\n\nWatch as the Copilot processes your request and edits the document in real-time\nright before your eyes.\n\n## ✨ Predictive State Updates in Action\n\n**What's happening technically:**\n\n- The document state is shared between your UI and the Copilot\n- As the Copilot generates content, changes are streamed to the UI\n- Each modification is visualized with additions and deletions\n- The UI renders these changes progressively, without waiting for completion\n- All edits are tracked and displayed in a visually intuitive way\n\n**What you'll see in this demo:**\n\n- Text changes are highlighted in different colors (green for additions, red for\n deletions)\n- The document updates character-by-character, creating a typing-like effect\n- You can see the Copilot's thought process as it refines the content\n- The final document seamlessly incorporates all changes\n- The experience feels collaborative, as if someone is editing alongside you\n\nThis pattern of real-time collaborative editing with diff visualization is\nperfect for document editors, code review tools, content creation platforms, or\nany application where users benefit from seeing exactly how content is being\ntransformed!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::agentic_chat_reasoning": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Reasoning\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", - "language": "python", - "type": "file" - } - ], - "langgraph-fastapi::subgraphs": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = 'flights' | 'hotels' | 'experiences' | 'supervisor'\n\ninterface TravelAgentState {\n experiences: Experience[],\n flights: Flight[],\n hotels: Hotel[],\n itinerary: Itinerary\n planning_step: string\n active_agent: AvailableAgents\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: 'supervisor'\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends 'flights' ? Flight[] : TAgent extends 'hotels' ? Hotel[] : never,\n recommendation: TAgent extends 'flights' ? Flight : TAgent extends 'hotels' ? Hotel : never,\n agent: TAgent\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case 'flights': return 'Flights Agent';\n case 'hotels': return 'Hotels Agent';\n case 'experiences': return 'Experiences Agent';\n default: return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

{formatAgentName(agent)}: {message}

\n\n
\n {options.map((opt, idx) => {\n if ('airline' in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
\n {opt.duration}\n
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
\n 📍 {opt.location}\n
\n
\n {opt.price_per_night}\n
\n \n );\n })}\n
\n
\n )\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight);\n\n const chatTitle = 'Travel Planning Assistant';\n const chatDescription = 'Plan your perfect trip with AI specialists';\n const initialLabel = 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n }\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n {selectedFlight.airline} - {selectedFlight.price}\n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = 'supervisor';\n if (nodeName?.includes('flights_agent')) {\n activeAgent = 'flights';\n }\n if (nodeName?.includes('hotels_agent')) {\n activeAgent = 'hotels';\n }\n if (nodeName?.includes('experiences_agent')) {\n activeAgent = 'experiences';\n }\n return (\n
\n
Active Agent:
\n
\n
\n 👨‍💼\n Supervisor\n
\n
\n ✈️\n Flights\n
\n
\n 🏨\n Hotels\n
\n
\n 🎯\n Experiences\n
\n
\n
\n )\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} - {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} - {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Travel Planning Subgraphs Demo Styles */\n/* Essential styles that cannot be achieved with Tailwind classes */\n\n/* Main container with CopilotSidebar layout */\n.travel-planner-container {\n min-height: 100vh;\n padding: 2rem;\n background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);\n}\n\n/* Travel content area styles */\n.travel-content {\n max-width: 1200px;\n margin: 0 auto;\n padding: 0 1rem;\n display: flex;\n flex-direction: column;\n gap: 1rem;\n}\n\n/* Itinerary strip */\n.itinerary-strip {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n}\n\n.itinerary-label {\n font-size: 0.875rem;\n font-weight: 600;\n color: #6b7280;\n margin-bottom: 0.5rem;\n}\n\n.itinerary-items {\n display: flex;\n flex-wrap: wrap;\n gap: 1rem;\n}\n\n.itinerary-item {\n display: flex;\n align-items: center;\n gap: 0.5rem;\n padding: 0.5rem 0.75rem;\n background: #f9fafb;\n border-radius: 0.375rem;\n font-size: 0.875rem;\n}\n\n.item-icon {\n font-size: 1rem;\n}\n\n/* Agent status */\n.agent-status {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n}\n\n.status-label {\n font-size: 0.875rem;\n font-weight: 600;\n color: #6b7280;\n margin-bottom: 0.5rem;\n}\n\n.agent-indicators {\n display: flex;\n gap: 0.75rem;\n}\n\n.agent-indicator {\n display: flex;\n align-items: center;\n gap: 0.5rem;\n padding: 0.5rem 0.75rem;\n border-radius: 0.375rem;\n font-size: 0.875rem;\n background: #f9fafb;\n border: 1px solid #e5e7eb;\n transition: all 0.2s ease;\n}\n\n.agent-indicator.active {\n background: #dbeafe;\n border-color: #3b82f6;\n color: #1d4ed8;\n box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.1);\n}\n\n/* Travel details sections */\n.travel-details {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n display: grid;\n gap: 1rem;\n}\n\n.details-section h4 {\n font-size: 1rem;\n font-weight: 600;\n color: #1f2937;\n margin-bottom: 0.5rem;\n display: flex;\n align-items: center;\n gap: 0.5rem;\n}\n\n.detail-items {\n display: flex;\n flex-direction: column;\n gap: 0.5rem;\n}\n\n.detail-item {\n padding: 0.5rem;\n background: #f9fafb;\n border-radius: 0.25rem;\n font-size: 0.875rem;\n display: flex;\n justify-content: space-between;\n}\n\n.detail-item strong {\n color: #6b7280;\n font-weight: 500;\n}\n\n.detail-tips {\n padding: 0.5rem;\n background: #eff6ff;\n border-radius: 0.25rem;\n font-size: 0.75rem;\n color: #1d4ed8;\n}\n\n.activity-item {\n padding: 0.75rem;\n background: #f0f9ff;\n border-radius: 0.25rem;\n border-left: 2px solid #0ea5e9;\n}\n\n.activity-name {\n font-weight: 600;\n color: #1f2937;\n font-size: 0.875rem;\n margin-bottom: 0.25rem;\n}\n\n.activity-category {\n font-size: 0.75rem;\n color: #0ea5e9;\n margin-bottom: 0.25rem;\n}\n\n.activity-description {\n color: #4b5563;\n font-size: 0.75rem;\n margin-bottom: 0.25rem;\n}\n\n.activity-meta {\n font-size: 0.75rem;\n color: #6b7280;\n}\n\n.no-activities {\n text-align: center;\n color: #9ca3af;\n font-style: italic;\n padding: 1rem;\n font-size: 0.875rem;\n}\n\n/* Interrupt UI for Chat Sidebar (Generative UI) */\n.interrupt-container {\n display: flex;\n flex-direction: column;\n gap: 1rem;\n max-width: 100%;\n padding-top: 34px;\n}\n\n.interrupt-header {\n margin-bottom: 0.5rem;\n}\n\n.agent-name {\n font-size: 0.875rem;\n font-weight: 600;\n color: #1f2937;\n margin: 0 0 0.25rem 0;\n}\n\n.agent-message {\n font-size: 0.75rem;\n color: #6b7280;\n margin: 0;\n line-height: 1.4;\n}\n\n.interrupt-options {\n padding: 0.75rem;\n display: flex;\n flex-direction: column;\n gap: 0.5rem;\n max-height: 300px;\n overflow-y: auto;\n}\n\n.option-card {\n display: flex;\n flex-direction: column;\n gap: 0.25rem;\n padding: 0.75rem;\n background: #f9fafb;\n border: 1px solid #e5e7eb;\n border-radius: 0.5rem;\n cursor: pointer;\n transition: all 0.2s ease;\n text-align: left;\n position: relative;\n min-height: auto;\n}\n\n.option-card:hover {\n background: #f3f4f6;\n border-color: #d1d5db;\n}\n\n.option-card:active {\n background: #e5e7eb;\n}\n\n.option-card.recommended {\n background: #eff6ff;\n border-color: #3b82f6;\n box-shadow: 0 0 0 1px rgba(59, 130, 246, 0.1);\n}\n\n.option-card.recommended:hover {\n background: #dbeafe;\n}\n\n.recommendation-badge {\n position: absolute;\n top: -2px;\n right: -2px;\n background: #3b82f6;\n color: white;\n font-size: 0.625rem;\n padding: 0.125rem 0.375rem;\n border-radius: 0.75rem;\n font-weight: 500;\n}\n\n.option-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 0.125rem;\n}\n\n.airline-name, .hotel-name {\n font-weight: 600;\n font-size: 0.8rem;\n color: #1f2937;\n}\n\n.price, .rating {\n font-weight: 600;\n font-size: 0.75rem;\n color: #059669;\n}\n\n.route-info, .location-info {\n font-size: 0.7rem;\n color: #6b7280;\n margin-bottom: 0.125rem;\n}\n\n.duration-info, .price-info {\n font-size: 0.7rem;\n color: #9ca3af;\n}\n\n/* Mobile responsive adjustments */\n@media (max-width: 768px) {\n .travel-planner-container {\n padding: 0.5rem;\n padding-bottom: 120px; /* Space for mobile chat */\n }\n \n .travel-content {\n padding: 0;\n gap: 0.75rem;\n }\n \n .itinerary-items {\n flex-direction: column;\n gap: 0.5rem;\n }\n \n .agent-indicators {\n flex-direction: column;\n gap: 0.5rem;\n }\n \n .agent-indicator {\n padding: 0.75rem;\n }\n \n .travel-details {\n padding: 0.75rem;\n }\n\n .interrupt-container {\n padding: 0.5rem;\n }\n\n .option-card {\n padding: 0.625rem;\n }\n\n .interrupt-options {\n max-height: 250px;\n }\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# LangGraph Subgraphs Demo: Travel Planning Assistant ✈️\n\nThis demo showcases **LangGraph subgraphs** through an interactive travel planning assistant. Watch as specialized AI agents collaborate to plan your perfect trip!\n\n## What are LangGraph Subgraphs? 🤖\n\n**Subgraphs** are the key to building modular, scalable AI systems in LangGraph. A subgraph is essentially \"a graph that is used as a node in another graph\" - enabling powerful encapsulation and reusability.\nFor more info, check out the [LangGraph docs](https://langchain-ai.github.io/langgraph/concepts/subgraphs/).\n\n### Key Concepts\n\n- **Encapsulation**: Each subgraph handles a specific domain with its own expertise\n- **Modularity**: Subgraphs can be developed, tested, and maintained independently \n- **Reusability**: The same subgraph can be used across multiple parent graphs\n- **State Communication**: Subgraphs can share state or use different schemas with transformations\n\n## Demo Architecture 🗺️\n\nThis travel planner demonstrates **supervisor-coordinated subgraphs** with **human-in-the-loop** decision making:\n\n### Parent Graph: Travel Supervisor\n- **Role**: Coordinates the travel planning process and routes to specialized agents\n- **State Management**: Maintains a shared itinerary object across all subgraphs\n- **Intelligence**: Determines what's needed and when each agent should be called\n\n### Subgraph 1: ✈️ Flights Agent\n- **Specialization**: Finding and booking flight options\n- **Process**: Presents flight options from Amsterdam to San Francisco with recommendations\n- **Interaction**: Uses interrupts to let users choose their preferred flight\n- **Data**: Static flight options (KLM, United) with pricing and duration\n\n### Subgraph 2: 🏨 Hotels Agent \n- **Specialization**: Finding and booking accommodation\n- **Process**: Shows hotel options in San Francisco with different price points\n- **Interaction**: Uses interrupts for user to select their preferred hotel\n- **Data**: Static hotel options (Hotel Zephyr, Ritz-Carlton, Hotel Zoe)\n\n### Subgraph 3: 🎯 Experiences Agent\n- **Specialization**: Curating restaurants and activities\n- **Process**: AI-powered recommendations based on selected flights and hotels\n- **Features**: Combines 2 restaurants and 2 activities with location-aware suggestions\n- **Data**: Static experiences (Pier 39, Golden Gate Bridge, Swan Oyster Depot, Tartine Bakery)\n\n## How It Works 🔄\n\n1. **User Request**: \"Help me plan a trip to San Francisco\"\n2. **Supervisor Analysis**: Determines what travel components are needed\n3. **Sequential Routing**: Routes to each agent in logical order:\n - First: Flights Agent (get transportation sorted)\n - Then: Hotels Agent (book accommodation) \n - Finally: Experiences Agent (plan activities)\n4. **Human Decisions**: Each agent presents options and waits for user choice via interrupts\n5. **State Building**: Selected choices are stored in the shared itinerary object\n6. **Completion**: All agents report back to supervisor for final coordination\n\n## State Communication Patterns 📊\n\n### Shared State Schema\nAll subgraph agents share and contribute to a common state object. When any agent updates the shared state, these changes are immediately reflected in the frontend through real-time syncing. This ensures that:\n\n- **Flight selections** from the Flights Agent are visible to subsequent agents\n- **Hotel choices** influence the Experiences Agent's recommendations \n- **All updates** are synchronized with the frontend UI in real-time\n- **State persistence** maintains the travel itinerary throughout the workflow\n\n### Human-in-the-Loop Pattern\nTwo of the specialist agents use **interrupts** to pause execution and gather user preferences:\n\n- **Flights Agent**: Presents options → interrupt → waits for selection → continues\n- **Hotels Agent**: Shows hotels → interrupt → waits for choice → continues\n\n## Try These Examples! 💡\n\n### Getting Started\n- \"Help me plan a trip to San Francisco\"\n- \"I want to visit San Francisco from Amsterdam\"\n- \"Plan my travel itinerary\"\n\n### During the Process\nWhen the Flights Agent presents options:\n- Choose between KLM ($650, 11h 30m) or United ($720, 12h 15m)\n\nWhen the Hotels Agent shows accommodations:\n- Select from Hotel Zephyr, The Ritz-Carlton, or Hotel Zoe\n\nThe Experiences Agent will then provide tailored recommendations based on your choices!\n\n## Frontend Capabilities 👁️\n\n- **Human-in-the-loop with interrupts** from subgraphs for user decision making\n- **Subgraphs detection and streaming** to show which agent is currently active\n- **Real-time state updates** as the shared itinerary is built across agents\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = None\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - } - ], - "langgraph-typescript::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A simple agentic chat flow using LangGraph instead of CrewAI.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Annotation, MessagesAnnotation, StateGraph, Command, START, END } from \"@langchain/langgraph\";\n\nconst AgentStateAnnotation = Annotation.Root({\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node based on the ReAct design pattern. It handles:\n * - The model to use (and binds in CopilotKit actions and the tools defined above)\n * - The system prompt\n * - Getting a response from the model\n * - Handling tool calls\n *\n * For more about the ReAct design pattern, see: \n * https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n */\n \n // 1. Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // 2. Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n // your_tool_here\n ],\n {\n // 2.1 Disable parallel tool calls to avoid race conditions,\n // enable this for faster performance if you want to manage\n // the complexity of running tool calls in parallel.\n parallel_tool_calls: false,\n }\n );\n\n // 3. Define the system message by which the chat model will be run\n const systemMessage = new SystemMessage({\n content: \"You are a helpful assistant.\"\n });\n\n // 4. Run the model to generate a response\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n // 6. We've handled all tool calls, so we can end the graph.\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n })\n}\n\n// Define a new graph \nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"chat_node\", chatNode)\n .addEdge(START, \"chat_node\");\n\n// Compile the graph\nexport const agenticChatGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph-typescript::agentic_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * An example demonstrating agentic generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Annotation, Command, MessagesAnnotation, StateGraph, END } from \"@langchain/langgraph\";\n\n// This tool simulates performing a task on the server.\n// The tool call will be streamed to the frontend as it is being generated.\nconst PERFORM_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_task_steps_generative_ui\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in gerund form\"\n },\n status: {\n type: \"string\",\n enum: [\"pending\"],\n description: \"The status of the step, always 'pending'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nconst AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n ...MessagesAnnotation.spec,\n});\n\ntype AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig) {\n /**\n * This is the entry point for the flow.\n */\n\n if (!state.steps) {\n state.steps = [];\n }\n\n return {\n steps: state.steps || []\n };\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig) {\n /**\n * Standard chat node.\n */\n const systemPrompt = `\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function \\`generate_task_steps_generative_ui\\`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n \n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"generate_task_steps_generative_ui\",\n tool_argument: \"steps\",\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n PERFORM_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_task_steps_generative_ui\") {\n const steps = toolCall.args.steps.map((step: any) => ({\n description: step.description,\n status: step.status\n }));\n \n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Steps executed.\",\n tool_call_id: toolCall.id\n };\n\n const updatedMessages = [...messages, toolResponse];\n\n // Simulate executing the steps\n for (let i = 0; i < steps.length; i++) {\n // simulate executing the step\n await new Promise(resolve => setTimeout(resolve, 1000));\n steps[i].status = \"completed\";\n // Update the state with the completed step\n state.steps = steps;\n // Emit custom events to update the frontend\n await dispatchCustomEvent(\"manually_emit_state\", state, config);\n }\n \n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n steps: state.steps\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation)\n .addNode(\"start_flow\", startFlow)\n .addNode(\"chat_node\", chatNode)\n .addEdge(\"__start__\", \"start_flow\")\n .addEdge(\"start_flow\", \"chat_node\")\n .addEdge(\"chat_node\", \"__end__\");\n\n// Compile the graph\nexport const agenticGenerativeUiGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph-typescript::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A LangGraph implementation of the human-in-the-loop agent.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, interrupt, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst DEFINE_TASK_TOOL = {\n type: \"function\",\n function: {\n name: \"plan_execution_steps\",\n description: \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n parameters: {\n type: \"object\",\n properties: {\n steps: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n description: {\n type: \"string\",\n description: \"The text of the step in imperative form\"\n },\n status: {\n type: \"string\",\n enum: [\"enabled\"],\n description: \"The status of the step, always 'enabled'\"\n }\n },\n required: [\"description\", \"status\"]\n },\n description: \"An array of 10 step objects, each containing text and status\"\n }\n },\n required: [\"steps\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n steps: Annotation>({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n tools: Annotation(),\n user_response: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize steps list if not exists\n if (!state.steps) {\n state.steps = [];\n }\n\n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n steps: state.steps,\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node where the agent processes messages and generates responses.\n * If task steps are defined, the user can enable/disable them using interrupts.\n */\n const systemPrompt = `\n You are a helpful assistant that can perform any task.\n You MUST call the \\`plan_execution_steps\\` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"steps\",\n tool: \"plan_execution_steps\",\n tool_argument: \"steps\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n DEFINE_TASK_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"plan_execution_steps\") {\n // Get the steps from the tool call\n const stepsRaw = toolCall.args.steps || [];\n \n // Set initial status to \"enabled\" for all steps\n const stepsData: Array<{ description: string; status: string }> = [];\n \n // Handle different potential formats of steps data\n if (Array.isArray(stepsRaw)) {\n for (const step of stepsRaw) {\n if (typeof step === 'object' && step.description) {\n stepsData.push({\n description: step.description,\n status: \"enabled\"\n });\n } else if (typeof step === 'string') {\n stepsData.push({\n description: step,\n status: \"enabled\"\n });\n }\n }\n }\n \n // If no steps were processed correctly, return to END with the updated messages\n if (stepsData.length === 0) {\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n }\n\n // Update steps in state and emit to frontend\n state.steps = stepsData;\n \n // Add a tool response to satisfy OpenAI's requirements\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Task steps generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n\n // Move to the process_steps_node which will handle the interrupt and final response\n return new Command({\n goto: \"process_steps_node\",\n update: {\n messages: updatedMessages,\n steps: state.steps,\n }\n });\n }\n }\n \n // If no tool calls or not plan_execution_steps, return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\nasync function processStepsNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This node handles the user interrupt for step customization and generates the final response.\n */\n\n let userResponse: string;\n\n // Check if we already have a user_response in the state\n // This happens when the node restarts after an interrupt\n if (state.user_response) {\n userResponse = state.user_response;\n } else {\n // Use LangGraph interrupt to get user input on steps\n // This will pause execution and wait for user input in the frontend\n userResponse = interrupt({ steps: state.steps });\n // Store the user response in state for when the node restarts\n state.user_response = userResponse;\n }\n \n // Generate the creative completion response\n const finalPrompt = `\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n `;\n \n const finalResponse = await new ChatOpenAI({ model: \"gpt-4o\" }).invoke([\n new SystemMessage({ content: finalPrompt }),\n { role: \"user\", content: userResponse }\n ], config);\n\n // Add the final response to messages\n const messages = [...state.messages, finalResponse];\n \n // Clear the user_response from state to prepare for future interactions\n const newState = { ...state };\n delete newState.user_response;\n \n // Return to END with the updated messages\n return new Command({\n goto: END,\n update: {\n messages: messages,\n steps: state.steps,\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\nworkflow.addNode(\"process_steps_node\", processStepsNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"process_steps_node\", END);\n\n// Add conditional edges from chat_node\nworkflow.addConditionalEdges(\n \"chat_node\",\n (state: AgentState) => {\n // This would be determined by the Command returned from chat_node\n // For now, we'll assume the logic is handled in the Command's goto property\n return \"continue\";\n },\n {\n \"process_steps_node\": \"process_steps_node\",\n \"continue\": END,\n }\n);\n\n// Compile the graph\nexport const humanInTheLoopGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph-typescript::predictive_state_updates": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Basic editor styles */\n.tiptap-container {\n height: 100vh; /* Full viewport height */\n width: 100vw; /* Full viewport width */\n display: flex;\n flex-direction: column;\n}\n\n.tiptap {\n flex: 1; /* Take up remaining space */\n overflow: auto; /* Allow scrolling if content overflows */\n}\n\n.tiptap :first-child {\n margin-top: 0;\n}\n\n/* List styles */\n.tiptap ul,\n.tiptap ol {\n padding: 0 1rem;\n margin: 1.25rem 1rem 1.25rem 0.4rem;\n}\n\n.tiptap ul li p,\n.tiptap ol li p {\n margin-top: 0.25em;\n margin-bottom: 0.25em;\n}\n\n/* Heading styles */\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n line-height: 1.1;\n margin-top: 2.5rem;\n text-wrap: pretty;\n font-weight: bold;\n}\n\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n margin-top: 3.5rem;\n margin-bottom: 1.5rem;\n}\n\n.tiptap p {\n margin-bottom: 1rem;\n}\n\n.tiptap h1 {\n font-size: 1.4rem;\n}\n\n.tiptap h2 {\n font-size: 1.2rem;\n}\n\n.tiptap h3 {\n font-size: 1.1rem;\n}\n\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n font-size: 1rem;\n}\n\n/* Code and preformatted text styles */\n.tiptap code {\n background-color: var(--purple-light);\n border-radius: 0.4rem;\n color: var(--black);\n font-size: 0.85rem;\n padding: 0.25em 0.3em;\n}\n\n.tiptap pre {\n background: var(--black);\n border-radius: 0.5rem;\n color: var(--white);\n font-family: \"JetBrainsMono\", monospace;\n margin: 1.5rem 0;\n padding: 0.75rem 1rem;\n}\n\n.tiptap pre code {\n background: none;\n color: inherit;\n font-size: 0.8rem;\n padding: 0;\n}\n\n.tiptap blockquote {\n border-left: 3px solid var(--gray-3);\n margin: 1.5rem 0;\n padding-left: 1rem;\n}\n\n.tiptap hr {\n border: none;\n border-top: 1px solid var(--gray-2);\n margin: 2rem 0;\n}\n\n.tiptap s {\n background-color: #f9818150;\n padding: 2px;\n font-weight: bold;\n color: rgba(0, 0, 0, 0.7);\n}\n\n.tiptap em {\n background-color: #b2f2bb;\n padding: 2px;\n font-weight: bold;\n font-style: normal;\n}\n\n.copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 📝 Predictive State Updates Document Editor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **predictive state updates** for real-time\ndocument collaboration:\n\n1. **Live Document Editing**: Watch as your Copilot makes changes to a document\n in real-time\n2. **Diff Visualization**: See exactly what's being changed as it happens\n3. **Streaming Updates**: Changes are displayed character-by-character as the\n Copilot works\n\n## How to Interact\n\nTry these interactions with the collaborative document editor:\n\n- \"Fix the grammar and typos in this document\"\n- \"Make this text more professional\"\n- \"Add a section about [topic]\"\n- \"Summarize this content in bullet points\"\n- \"Change the tone to be more casual\"\n\nWatch as the Copilot processes your request and edits the document in real-time\nright before your eyes.\n\n## ✨ Predictive State Updates in Action\n\n**What's happening technically:**\n\n- The document state is shared between your UI and the Copilot\n- As the Copilot generates content, changes are streamed to the UI\n- Each modification is visualized with additions and deletions\n- The UI renders these changes progressively, without waiting for completion\n- All edits are tracked and displayed in a visually intuitive way\n\n**What you'll see in this demo:**\n\n- Text changes are highlighted in different colors (green for additions, red for\n deletions)\n- The document updates character-by-character, creating a typing-like effect\n- You can see the Copilot's thought process as it refines the content\n- The final document seamlessly incorporates all changes\n- The experience feels collaborative, as if someone is editing alongside you\n\nThis pattern of real-time collaborative editing with diff visualization is\nperfect for document editors, code review tools, content creation platforms, or\nany application where users benefit from seeing exactly how content is being\ntransformed!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A demo of predictive state updates using LangGraph.\n */\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nconst WRITE_DOCUMENT_TOOL = {\n type: \"function\",\n function: {\n name: \"write_document_local\",\n description: [\n \"Write a document. Use markdown formatting to format the document.\",\n \"It's good to format the document extensively so it's easy to read.\",\n \"You can use all kinds of markdown.\",\n \"However, do not use italic or strike-through formatting, it's reserved for another purpose.\",\n \"You MUST write the full document, even when changing only a few words.\",\n \"When making edits to the document, try to make them minimal - do not change every word.\",\n \"Keep stories SHORT!\"\n ].join(\" \"),\n parameters: {\n type: \"object\",\n properties: {\n document: {\n type: \"string\",\n description: \"The document to write\"\n },\n },\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n document: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => undefined\n }),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n return new Command({\n goto: \"chat_node\"\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n\n const systemPrompt = `\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n ${state.document || ''}\\n-----\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n // Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"document\",\n tool: \"write_document_local\",\n tool_argument: \"document\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n WRITE_DOCUMENT_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model to generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n\n // Extract any tool calls from the response\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n\n if (toolCall.name === \"write_document_local\") {\n // Add the tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Document written.\",\n tool_call_id: toolCall.id\n };\n\n // Add confirmation tool call\n const confirmToolCall = {\n role: \"assistant\" as const,\n content: \"\",\n tool_calls: [{\n id: uuidv4(),\n type: \"function\" as const,\n function: {\n name: \"confirm_changes\",\n arguments: \"{}\"\n }\n }]\n };\n\n const updatedMessages = [...messages, toolResponse, confirmToolCall];\n\n // Return Command to route to end\n return new Command({\n goto: END,\n update: {\n messages: updatedMessages,\n document: toolCall.args.document\n }\n });\n }\n }\n\n // If no tool was called, go to end\n return new Command({\n goto: END,\n update: {\n messages: messages\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const predictiveStateUpdatesGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph-typescript::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A demo of shared state between the agent and CopilotKit using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { dispatchCustomEvent } from \"@langchain/core/callbacks/dispatch\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\"\n}\n\nenum SpecialPreferences {\n HIGH_PROTEIN = \"High Protein\",\n LOW_CARB = \"Low Carb\",\n SPICY = \"Spicy\",\n BUDGET_FRIENDLY = \"Budget-Friendly\",\n ONE_POT_MEAL = \"One-Pot Meal\",\n VEGETARIAN = \"Vegetarian\",\n VEGAN = \"Vegan\"\n}\n\nenum CookingTime {\n FIVE_MIN = \"5 min\",\n FIFTEEN_MIN = \"15 min\",\n THIRTY_MIN = \"30 min\",\n FORTY_FIVE_MIN = \"45 min\",\n SIXTY_PLUS_MIN = \"60+ min\"\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n skill_level: SkillLevel;\n special_preferences: SpecialPreferences[];\n cooking_time: CookingTime;\n ingredients: Ingredient[];\n instructions: string[];\n changes?: string;\n}\n\nconst GENERATE_RECIPE_TOOL = {\n type: \"function\",\n function: {\n name: \"generate_recipe\",\n description: \"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it. Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\",\n parameters: {\n type: \"object\",\n properties: {\n recipe: {\n type: \"object\",\n properties: {\n skill_level: {\n type: \"string\",\n enum: Object.values(SkillLevel),\n description: \"The skill level required for the recipe\"\n },\n special_preferences: {\n type: \"array\",\n items: {\n type: \"string\",\n enum: Object.values(SpecialPreferences)\n },\n description: \"A list of special preferences for the recipe\"\n },\n cooking_time: {\n type: \"string\",\n enum: Object.values(CookingTime),\n description: \"The cooking time of the recipe\"\n },\n ingredients: {\n type: \"array\",\n items: {\n type: \"object\",\n properties: {\n icon: { type: \"string\", description: \"The icon emoji (not emoji code like '\\\\u1f35e', but the actual emoji like 🥕) of the ingredient\" },\n name: { type: \"string\" },\n amount: { type: \"string\" }\n }\n },\n description: \"Entire list of ingredients for the recipe, including the new ingredients and the ones that are already in the recipe\"\n },\n instructions: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Entire list of instructions for the recipe, including the new instructions and the ones that are already there\"\n },\n changes: {\n type: \"string\",\n description: \"A description of the changes made to the recipe\"\n }\n },\n }\n },\n required: [\"recipe\"]\n }\n }\n};\n\nexport const AgentStateAnnotation = Annotation.Root({\n recipe: Annotation(),\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function startFlow(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * This is the entry point for the flow.\n */\n\n // Initialize recipe if not exists\n if (!state.recipe) {\n state.recipe = {\n skill_level: SkillLevel.BEGINNER,\n special_preferences: [],\n cooking_time: CookingTime.FIFTEEN_MIN,\n ingredients: [{ icon: \"🍴\", name: \"Sample Ingredient\", amount: \"1 unit\" }],\n instructions: [\"First step instruction\"]\n };\n // Emit the initial state to ensure it's properly shared with the frontend\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n }\n \n return new Command({\n goto: \"chat_node\",\n update: {\n messages: state.messages,\n recipe: state.recipe\n }\n });\n}\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n /**\n * Standard chat node.\n */\n // Create a safer serialization of the recipe\n let recipeJson = \"No recipe yet\";\n if (state.recipe) {\n try {\n recipeJson = JSON.stringify(state.recipe, null, 2);\n } catch (e) {\n recipeJson = `Error serializing recipe: ${e}`;\n }\n }\n\n const systemPrompt = `You are a helpful assistant for creating recipes. \n This is the current state of the recipe: ${recipeJson}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o-mini\" });\n \n // Define config for the model\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Use \"predict_state\" metadata to set up streaming for the write_document tool\n if (!config.metadata) config.metadata = {};\n config.metadata.predict_state = [{\n state_key: \"recipe\",\n tool: \"generate_recipe\",\n tool_argument: \"recipe\"\n }];\n\n // Bind the tools to the model\n const modelWithTools = model.bindTools(\n [\n ...state.tools,\n GENERATE_RECIPE_TOOL\n ],\n {\n // Disable parallel tool calls to avoid race conditions\n parallel_tool_calls: false,\n }\n );\n\n // Run the model and generate a response\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n // Update messages with the response\n const messages = [...state.messages, response];\n \n // Handle tool calls\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n \n if (toolCall.name === \"generate_recipe\") {\n // Update recipe state with tool_call_args\n const recipeData = toolCall.args.recipe;\n let recipe: Recipe;\n // If we have an existing recipe, update it\n if (state.recipe) {\n recipe = { ...state.recipe };\n for (const [key, value] of Object.entries(recipeData)) {\n if (value !== null && value !== undefined) { // Only update fields that were provided\n (recipe as any)[key] = value;\n }\n }\n } else {\n // Create a new recipe\n recipe = {\n skill_level: recipeData.skill_level || SkillLevel.BEGINNER,\n special_preferences: recipeData.special_preferences || [],\n cooking_time: recipeData.cooking_time || CookingTime.FIFTEEN_MIN,\n ingredients: recipeData.ingredients || [],\n instructions: recipeData.instructions || []\n };\n }\n \n // Add tool response to messages\n const toolResponse = {\n role: \"tool\" as const,\n content: \"Recipe generated.\",\n tool_call_id: toolCall.id\n };\n \n const updatedMessages = [...messages, toolResponse];\n \n // Explicitly emit the updated state to ensure it's shared with frontend\n state.recipe = recipe;\n await dispatchCustomEvent(\"manually_emit_intermediate_state\", state, config);\n \n // Return command with updated recipe\n return new Command({\n goto: \"start_flow\",\n update: {\n messages: updatedMessages,\n recipe: recipe\n }\n });\n }\n }\n\n return new Command({\n goto: END,\n update: {\n messages: messages,\n recipe: state.recipe\n }\n });\n}\n\n// Define the graph\nconst workflow = new StateGraph(AgentStateAnnotation);\n\n// Add nodes\nworkflow.addNode(\"start_flow\", startFlow);\nworkflow.addNode(\"chat_node\", chatNode);\n\n// Add edges\nworkflow.setEntryPoint(\"start_flow\");\nworkflow.addEdge(START, \"start_flow\");\nworkflow.addEdge(\"start_flow\", \"chat_node\");\nworkflow.addEdge(\"chat_node\", END);\n\n// Compile the graph\nexport const sharedStateGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph-typescript::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * An example demonstrating tool-based generative UI using LangGraph.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { Command, Annotation, MessagesAnnotation, StateGraph, END, START } from \"@langchain/langgraph\";\n\n\nexport const AgentStateAnnotation = Annotation.Root({\n tools: Annotation(),\n ...MessagesAnnotation.spec,\n});\nexport type AgentState = typeof AgentStateAnnotation.State;\n\nasync function chatNode(state: AgentState, config?: RunnableConfig): Promise {\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n const modelWithTools = model.bindTools(\n [\n ...state.tools || []\n ],\n { parallel_tool_calls: false }\n );\n\n const systemMessage = new SystemMessage({\n content: 'Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.'\n });\n\n const response = await modelWithTools.invoke([\n systemMessage,\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n messages: [response]\n }\n });\n}\n\nconst workflow = new StateGraph(AgentStateAnnotation);\nworkflow.addNode(\"chat_node\", chatNode);\n\nworkflow.addEdge(START, \"chat_node\");\n\nexport const toolBasedGenerativeUiGraph = workflow.compile();", - "language": "ts", - "type": "file" - } - ], - "langgraph-typescript::subgraphs": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = 'flights' | 'hotels' | 'experiences' | 'supervisor'\n\ninterface TravelAgentState {\n experiences: Experience[],\n flights: Flight[],\n hotels: Hotel[],\n itinerary: Itinerary\n planning_step: string\n active_agent: AvailableAgents\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: 'supervisor'\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends 'flights' ? Flight[] : TAgent extends 'hotels' ? Hotel[] : never,\n recommendation: TAgent extends 'flights' ? Flight : TAgent extends 'hotels' ? Hotel : never,\n agent: TAgent\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case 'flights': return 'Flights Agent';\n case 'hotels': return 'Hotels Agent';\n case 'experiences': return 'Experiences Agent';\n default: return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

{formatAgentName(agent)}: {message}

\n\n
\n {options.map((opt, idx) => {\n if ('airline' in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
\n {opt.duration}\n
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
\n 📍 {opt.location}\n
\n
\n {opt.price_per_night}\n
\n \n );\n })}\n
\n
\n )\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight);\n\n const chatTitle = 'Travel Planning Assistant';\n const chatDescription = 'Plan your perfect trip with AI specialists';\n const initialLabel = 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n }\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n {selectedFlight.airline} - {selectedFlight.price}\n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = 'supervisor';\n if (nodeName?.includes('flights_agent')) {\n activeAgent = 'flights';\n }\n if (nodeName?.includes('hotels_agent')) {\n activeAgent = 'hotels';\n }\n if (nodeName?.includes('experiences_agent')) {\n activeAgent = 'experiences';\n }\n return (\n
\n
Active Agent:
\n
\n
\n 👨‍💼\n Supervisor\n
\n
\n ✈️\n Flights\n
\n
\n 🏨\n Hotels\n
\n
\n 🎯\n Experiences\n
\n
\n
\n )\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} - {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} - {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Travel Planning Subgraphs Demo Styles */\n/* Essential styles that cannot be achieved with Tailwind classes */\n\n/* Main container with CopilotSidebar layout */\n.travel-planner-container {\n min-height: 100vh;\n padding: 2rem;\n background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);\n}\n\n/* Travel content area styles */\n.travel-content {\n max-width: 1200px;\n margin: 0 auto;\n padding: 0 1rem;\n display: flex;\n flex-direction: column;\n gap: 1rem;\n}\n\n/* Itinerary strip */\n.itinerary-strip {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n}\n\n.itinerary-label {\n font-size: 0.875rem;\n font-weight: 600;\n color: #6b7280;\n margin-bottom: 0.5rem;\n}\n\n.itinerary-items {\n display: flex;\n flex-wrap: wrap;\n gap: 1rem;\n}\n\n.itinerary-item {\n display: flex;\n align-items: center;\n gap: 0.5rem;\n padding: 0.5rem 0.75rem;\n background: #f9fafb;\n border-radius: 0.375rem;\n font-size: 0.875rem;\n}\n\n.item-icon {\n font-size: 1rem;\n}\n\n/* Agent status */\n.agent-status {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n}\n\n.status-label {\n font-size: 0.875rem;\n font-weight: 600;\n color: #6b7280;\n margin-bottom: 0.5rem;\n}\n\n.agent-indicators {\n display: flex;\n gap: 0.75rem;\n}\n\n.agent-indicator {\n display: flex;\n align-items: center;\n gap: 0.5rem;\n padding: 0.5rem 0.75rem;\n border-radius: 0.375rem;\n font-size: 0.875rem;\n background: #f9fafb;\n border: 1px solid #e5e7eb;\n transition: all 0.2s ease;\n}\n\n.agent-indicator.active {\n background: #dbeafe;\n border-color: #3b82f6;\n color: #1d4ed8;\n box-shadow: 0 0 0 2px rgba(59, 130, 246, 0.1);\n}\n\n/* Travel details sections */\n.travel-details {\n background: white;\n border-radius: 0.5rem;\n padding: 1rem;\n border: 1px solid #e5e7eb;\n box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);\n display: grid;\n gap: 1rem;\n}\n\n.details-section h4 {\n font-size: 1rem;\n font-weight: 600;\n color: #1f2937;\n margin-bottom: 0.5rem;\n display: flex;\n align-items: center;\n gap: 0.5rem;\n}\n\n.detail-items {\n display: flex;\n flex-direction: column;\n gap: 0.5rem;\n}\n\n.detail-item {\n padding: 0.5rem;\n background: #f9fafb;\n border-radius: 0.25rem;\n font-size: 0.875rem;\n display: flex;\n justify-content: space-between;\n}\n\n.detail-item strong {\n color: #6b7280;\n font-weight: 500;\n}\n\n.detail-tips {\n padding: 0.5rem;\n background: #eff6ff;\n border-radius: 0.25rem;\n font-size: 0.75rem;\n color: #1d4ed8;\n}\n\n.activity-item {\n padding: 0.75rem;\n background: #f0f9ff;\n border-radius: 0.25rem;\n border-left: 2px solid #0ea5e9;\n}\n\n.activity-name {\n font-weight: 600;\n color: #1f2937;\n font-size: 0.875rem;\n margin-bottom: 0.25rem;\n}\n\n.activity-category {\n font-size: 0.75rem;\n color: #0ea5e9;\n margin-bottom: 0.25rem;\n}\n\n.activity-description {\n color: #4b5563;\n font-size: 0.75rem;\n margin-bottom: 0.25rem;\n}\n\n.activity-meta {\n font-size: 0.75rem;\n color: #6b7280;\n}\n\n.no-activities {\n text-align: center;\n color: #9ca3af;\n font-style: italic;\n padding: 1rem;\n font-size: 0.875rem;\n}\n\n/* Interrupt UI for Chat Sidebar (Generative UI) */\n.interrupt-container {\n display: flex;\n flex-direction: column;\n gap: 1rem;\n max-width: 100%;\n padding-top: 34px;\n}\n\n.interrupt-header {\n margin-bottom: 0.5rem;\n}\n\n.agent-name {\n font-size: 0.875rem;\n font-weight: 600;\n color: #1f2937;\n margin: 0 0 0.25rem 0;\n}\n\n.agent-message {\n font-size: 0.75rem;\n color: #6b7280;\n margin: 0;\n line-height: 1.4;\n}\n\n.interrupt-options {\n padding: 0.75rem;\n display: flex;\n flex-direction: column;\n gap: 0.5rem;\n max-height: 300px;\n overflow-y: auto;\n}\n\n.option-card {\n display: flex;\n flex-direction: column;\n gap: 0.25rem;\n padding: 0.75rem;\n background: #f9fafb;\n border: 1px solid #e5e7eb;\n border-radius: 0.5rem;\n cursor: pointer;\n transition: all 0.2s ease;\n text-align: left;\n position: relative;\n min-height: auto;\n}\n\n.option-card:hover {\n background: #f3f4f6;\n border-color: #d1d5db;\n}\n\n.option-card:active {\n background: #e5e7eb;\n}\n\n.option-card.recommended {\n background: #eff6ff;\n border-color: #3b82f6;\n box-shadow: 0 0 0 1px rgba(59, 130, 246, 0.1);\n}\n\n.option-card.recommended:hover {\n background: #dbeafe;\n}\n\n.recommendation-badge {\n position: absolute;\n top: -2px;\n right: -2px;\n background: #3b82f6;\n color: white;\n font-size: 0.625rem;\n padding: 0.125rem 0.375rem;\n border-radius: 0.75rem;\n font-weight: 500;\n}\n\n.option-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 0.125rem;\n}\n\n.airline-name, .hotel-name {\n font-weight: 600;\n font-size: 0.8rem;\n color: #1f2937;\n}\n\n.price, .rating {\n font-weight: 600;\n font-size: 0.75rem;\n color: #059669;\n}\n\n.route-info, .location-info {\n font-size: 0.7rem;\n color: #6b7280;\n margin-bottom: 0.125rem;\n}\n\n.duration-info, .price-info {\n font-size: 0.7rem;\n color: #9ca3af;\n}\n\n/* Mobile responsive adjustments */\n@media (max-width: 768px) {\n .travel-planner-container {\n padding: 0.5rem;\n padding-bottom: 120px; /* Space for mobile chat */\n }\n \n .travel-content {\n padding: 0;\n gap: 0.75rem;\n }\n \n .itinerary-items {\n flex-direction: column;\n gap: 0.5rem;\n }\n \n .agent-indicators {\n flex-direction: column;\n gap: 0.5rem;\n }\n \n .agent-indicator {\n padding: 0.75rem;\n }\n \n .travel-details {\n padding: 0.75rem;\n }\n\n .interrupt-container {\n padding: 0.5rem;\n }\n\n .option-card {\n padding: 0.625rem;\n }\n\n .interrupt-options {\n max-height: 250px;\n }\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# LangGraph Subgraphs Demo: Travel Planning Assistant ✈️\n\nThis demo showcases **LangGraph subgraphs** through an interactive travel planning assistant. Watch as specialized AI agents collaborate to plan your perfect trip!\n\n## What are LangGraph Subgraphs? 🤖\n\n**Subgraphs** are the key to building modular, scalable AI systems in LangGraph. A subgraph is essentially \"a graph that is used as a node in another graph\" - enabling powerful encapsulation and reusability.\nFor more info, check out the [LangGraph docs](https://langchain-ai.github.io/langgraph/concepts/subgraphs/).\n\n### Key Concepts\n\n- **Encapsulation**: Each subgraph handles a specific domain with its own expertise\n- **Modularity**: Subgraphs can be developed, tested, and maintained independently \n- **Reusability**: The same subgraph can be used across multiple parent graphs\n- **State Communication**: Subgraphs can share state or use different schemas with transformations\n\n## Demo Architecture 🗺️\n\nThis travel planner demonstrates **supervisor-coordinated subgraphs** with **human-in-the-loop** decision making:\n\n### Parent Graph: Travel Supervisor\n- **Role**: Coordinates the travel planning process and routes to specialized agents\n- **State Management**: Maintains a shared itinerary object across all subgraphs\n- **Intelligence**: Determines what's needed and when each agent should be called\n\n### Subgraph 1: ✈️ Flights Agent\n- **Specialization**: Finding and booking flight options\n- **Process**: Presents flight options from Amsterdam to San Francisco with recommendations\n- **Interaction**: Uses interrupts to let users choose their preferred flight\n- **Data**: Static flight options (KLM, United) with pricing and duration\n\n### Subgraph 2: 🏨 Hotels Agent \n- **Specialization**: Finding and booking accommodation\n- **Process**: Shows hotel options in San Francisco with different price points\n- **Interaction**: Uses interrupts for user to select their preferred hotel\n- **Data**: Static hotel options (Hotel Zephyr, Ritz-Carlton, Hotel Zoe)\n\n### Subgraph 3: 🎯 Experiences Agent\n- **Specialization**: Curating restaurants and activities\n- **Process**: AI-powered recommendations based on selected flights and hotels\n- **Features**: Combines 2 restaurants and 2 activities with location-aware suggestions\n- **Data**: Static experiences (Pier 39, Golden Gate Bridge, Swan Oyster Depot, Tartine Bakery)\n\n## How It Works 🔄\n\n1. **User Request**: \"Help me plan a trip to San Francisco\"\n2. **Supervisor Analysis**: Determines what travel components are needed\n3. **Sequential Routing**: Routes to each agent in logical order:\n - First: Flights Agent (get transportation sorted)\n - Then: Hotels Agent (book accommodation) \n - Finally: Experiences Agent (plan activities)\n4. **Human Decisions**: Each agent presents options and waits for user choice via interrupts\n5. **State Building**: Selected choices are stored in the shared itinerary object\n6. **Completion**: All agents report back to supervisor for final coordination\n\n## State Communication Patterns 📊\n\n### Shared State Schema\nAll subgraph agents share and contribute to a common state object. When any agent updates the shared state, these changes are immediately reflected in the frontend through real-time syncing. This ensures that:\n\n- **Flight selections** from the Flights Agent are visible to subsequent agents\n- **Hotel choices** influence the Experiences Agent's recommendations \n- **All updates** are synchronized with the frontend UI in real-time\n- **State persistence** maintains the travel itinerary throughout the workflow\n\n### Human-in-the-Loop Pattern\nTwo of the specialist agents use **interrupts** to pause execution and gather user preferences:\n\n- **Flights Agent**: Presents options → interrupt → waits for selection → continues\n- **Hotels Agent**: Shows hotels → interrupt → waits for choice → continues\n\n## Try These Examples! 💡\n\n### Getting Started\n- \"Help me plan a trip to San Francisco\"\n- \"I want to visit San Francisco from Amsterdam\"\n- \"Plan my travel itinerary\"\n\n### During the Process\nWhen the Flights Agent presents options:\n- Choose between KLM ($650, 11h 30m) or United ($720, 12h 15m)\n\nWhen the Hotels Agent shows accommodations:\n- Select from Hotel Zephyr, The Ritz-Carlton, or Hotel Zoe\n\nThe Experiences Agent will then provide tailored recommendations based on your choices!\n\n## Frontend Capabilities 👁️\n\n- **Human-in-the-loop with interrupts** from subgraphs for user decision making\n- **Subgraphs detection and streaming** to show which agent is currently active\n- **Real-time state updates** as the shared itinerary is built across agents\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agent.py", - "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = None\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", - "language": "python", - "type": "file" - }, - { - "name": "agent.ts", - "content": "/**\n * A travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\n * The supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n */\n\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { SystemMessage, AIMessage, ToolMessage } from \"@langchain/core/messages\";\nimport { RunnableConfig } from \"@langchain/core/runnables\";\nimport { \n Annotation, \n MessagesAnnotation, \n StateGraph, \n Command, \n START, \n END, \n interrupt \n} from \"@langchain/langgraph\";\n\n// Travel data interfaces\ninterface Flight {\n airline: string;\n departure: string;\n arrival: string;\n price: string;\n duration: string;\n}\n\ninterface Hotel {\n name: string;\n location: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n type: \"restaurant\" | \"activity\";\n description: string;\n location: string;\n}\n\ninterface Itinerary {\n flight?: Flight;\n hotel?: Hotel;\n}\n\n// Custom reducer to merge itinerary updates\nfunction mergeItinerary(left: Itinerary | null, right?: Itinerary | null): Itinerary {\n if (!left) left = {};\n if (!right) right = {};\n return { ...left, ...right };\n}\n\n// State annotation for travel agent system\nexport const TravelAgentStateAnnotation = Annotation.Root({\n origin: Annotation(),\n destination: Annotation(),\n flights: Annotation(),\n hotels: Annotation(),\n experiences: Annotation(),\n\n // Itinerary with custom merger\n itinerary: Annotation({\n reducer: mergeItinerary,\n default: () => null\n }),\n\n // Tools available to all agents\n tools: Annotation({\n reducer: (x, y) => y ?? x,\n default: () => []\n }),\n\n // Supervisor routing\n next_agent: Annotation(),\n ...MessagesAnnotation.spec,\n});\n\nexport type TravelAgentState = typeof TravelAgentStateAnnotation.State;\n\n// Static data for demonstration\nconst STATIC_FLIGHTS: Flight[] = [\n { airline: \"KLM\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$650\", duration: \"11h 30m\" },\n { airline: \"United\", departure: \"Amsterdam (AMS)\", arrival: \"San Francisco (SFO)\", price: \"$720\", duration: \"12h 15m\" }\n];\n\nconst STATIC_HOTELS: Hotel[] = [\n { name: \"Hotel Zephyr\", location: \"Fisherman's Wharf\", price_per_night: \"$280/night\", rating: \"4.2 stars\" },\n { name: \"The Ritz-Carlton\", location: \"Nob Hill\", price_per_night: \"$550/night\", rating: \"4.8 stars\" },\n { name: \"Hotel Zoe\", location: \"Union Square\", price_per_night: \"$320/night\", rating: \"4.4 stars\" }\n];\n\nconst STATIC_EXPERIENCES: Experience[] = [\n { name: \"Pier 39\", type: \"activity\", description: \"Iconic waterfront destination with shops and sea lions\", location: \"Fisherman's Wharf\" },\n { name: \"Golden Gate Bridge\", type: \"activity\", description: \"World-famous suspension bridge with stunning views\", location: \"Golden Gate\" },\n { name: \"Swan Oyster Depot\", type: \"restaurant\", description: \"Historic seafood counter serving fresh oysters\", location: \"Polk Street\" },\n { name: \"Tartine Bakery\", type: \"restaurant\", description: \"Artisanal bakery famous for bread and pastries\", location: \"Mission District\" }\n];\n\nfunction createInterrupt(message: string, options: any[], recommendation: any, agent: string) {\n return interrupt({\n message,\n options,\n recommendation,\n agent,\n });\n}\n\n// Flights finder subgraph\nasync function flightsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate flight search with static data\n const flights = STATIC_FLIGHTS;\n\n const selectedFlight = state.itinerary?.flight;\n \n let flightChoice: Flight;\n const message = `Found ${flights.length} flight options from ${state.origin || 'Amsterdam'} to ${state.destination || 'San Francisco'}.\\n` +\n `I recommend choosing the flight by ${flights[0].airline} since it's known to be on time and cheaper.`\n if (!selectedFlight) {\n const interruptResult = createInterrupt(\n message,\n flights,\n flights[0],\n \"flights\"\n );\n \n // Parse the interrupt result if it's a string\n flightChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n flightChoice = selectedFlight;\n }\n\n return new Command({\n goto: END,\n update: {\n flights: flights,\n itinerary: {\n flight: flightChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Flights Agent: Great. I'll book you the ${flightChoice.airline} flight from ${flightChoice.departure} to ${flightChoice.arrival}.`,\n }),\n ]\n }\n });\n}\n\n// Hotels finder subgraph\nasync function hotelsFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Simulate hotel search with static data\n const hotels = STATIC_HOTELS;\n const selectedHotel = state.itinerary?.hotel;\n \n let hotelChoice: Hotel;\n const message = `Found ${hotels.length} accommodation options in ${state.destination || 'San Francisco'}.\\n\n I recommend choosing the ${hotels[2].name} since it strikes the balance between rating, price, and location.`\n if (!selectedHotel) {\n const interruptResult = createInterrupt(\n message,\n hotels,\n hotels[2],\n \"hotels\"\n );\n \n // Parse the interrupt result if it's a string\n hotelChoice = typeof interruptResult === 'string' ? JSON.parse(interruptResult) : interruptResult;\n } else {\n hotelChoice = selectedHotel;\n }\n\n return new Command({\n goto: END,\n update: {\n hotels: hotels,\n itinerary: {\n hotel: hotelChoice\n },\n // Return all \"messages\" that the agent was sending\n messages: [\n ...state.messages,\n new AIMessage({\n content: message,\n }),\n new AIMessage({\n content: `Hotels Agent: Excellent choice! You'll like ${hotelChoice.name}.`\n }),\n ]\n }\n });\n}\n\n// Experiences finder subgraph\nasync function experiencesFinder(state: TravelAgentState, config?: RunnableConfig): Promise {\n // Filter experiences (2 restaurants, 2 activities)\n const restaurants = STATIC_EXPERIENCES.filter(exp => exp.type === \"restaurant\").slice(0, 2);\n const activities = STATIC_EXPERIENCES.filter(exp => exp.type === \"activity\").slice(0, 2);\n const experiences = [...restaurants, ...activities];\n\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n const itinerary = state.itinerary || {};\n\n const systemPrompt = `\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flight chosen: ${JSON.stringify(itinerary.flight) || 'None'}\n - Hotel chosen: ${JSON.stringify(itinerary.hotel) || 'None'}\n - Activities found: ${JSON.stringify(activities)}\n - Restaurants found: ${JSON.stringify(restaurants)}\n `;\n\n // Get experiences response\n const response = await model.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n return new Command({\n goto: END,\n update: {\n experiences: experiences,\n messages: [...state.messages, response]\n }\n });\n}\n\n// Supervisor response tool\nconst SUPERVISOR_RESPONSE_TOOL = {\n type: \"function\" as const,\n function: {\n name: \"supervisor_response\",\n description: \"Always use this tool to structure your response to the user.\",\n parameters: {\n type: \"object\",\n properties: {\n answer: {\n type: \"string\",\n description: \"The answer to the user\"\n },\n next_agent: {\n type: \"string\",\n enum: [\"flights_agent\", \"hotels_agent\", \"experiences_agent\", \"complete\"],\n description: \"The agent to go to. Not required if you do not want to route to another agent.\"\n }\n },\n required: [\"answer\"]\n }\n }\n};\n\n// Supervisor agent\nasync function supervisorAgent(state: TravelAgentState, config?: RunnableConfig): Promise {\n const itinerary = state.itinerary || {};\n\n // Check what's already completed\n const hasFlights = itinerary.flight !== undefined;\n const hasHotels = itinerary.hotel !== undefined;\n const hasExperiences = state.experiences !== null;\n\n const systemPrompt = `\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: ${state.origin || 'Amsterdam'}\n - Destination: ${state.destination || 'San Francisco'}\n - Flights found: ${hasFlights}\n - Hotels found: ${hasHotels}\n - Experiences found: ${hasExperiences}\n - Itinerary (Things that the user has already confirmed selection on): ${JSON.stringify(itinerary, null, 2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - complete: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n `;\n\n // Define the model\n const model = new ChatOpenAI({ model: \"gpt-4o\" });\n\n if (!config) {\n config = { recursionLimit: 25 };\n }\n\n // Bind the routing tool\n const modelWithTools = model.bindTools(\n [SUPERVISOR_RESPONSE_TOOL],\n {\n parallel_tool_calls: false,\n }\n );\n\n // Get supervisor decision\n const response = await modelWithTools.invoke([\n new SystemMessage({ content: systemPrompt }),\n ...state.messages,\n ], config);\n\n let messages = [...state.messages, response];\n\n // Handle tool calls for routing\n if (response.tool_calls && response.tool_calls.length > 0) {\n const toolCall = response.tool_calls[0];\n const toolCallArgs = toolCall.args;\n const nextAgent = toolCallArgs.next_agent;\n\n const toolResponse = new ToolMessage({\n tool_call_id: toolCall.id!,\n content: `Routing to ${nextAgent} and providing the answer`,\n });\n\n messages = [\n ...messages, \n toolResponse, \n new AIMessage({ content: toolCallArgs.answer })\n ];\n\n if (nextAgent && nextAgent !== \"complete\") {\n return new Command({ goto: nextAgent });\n }\n }\n\n // Fallback if no tool call or complete\n return new Command({\n goto: END,\n update: { messages }\n });\n}\n\n// Create subgraphs\nconst flightsGraph = new StateGraph(TravelAgentStateAnnotation);\nflightsGraph.addNode(\"flights_agent_chat_node\", flightsFinder);\nflightsGraph.setEntryPoint(\"flights_agent_chat_node\");\nflightsGraph.addEdge(START, \"flights_agent_chat_node\");\nflightsGraph.addEdge(\"flights_agent_chat_node\", END);\nconst flightsSubgraph = flightsGraph.compile();\n\nconst hotelsGraph = new StateGraph(TravelAgentStateAnnotation);\nhotelsGraph.addNode(\"hotels_agent_chat_node\", hotelsFinder);\nhotelsGraph.setEntryPoint(\"hotels_agent_chat_node\");\nhotelsGraph.addEdge(START, \"hotels_agent_chat_node\");\nhotelsGraph.addEdge(\"hotels_agent_chat_node\", END);\nconst hotelsSubgraph = hotelsGraph.compile();\n\nconst experiencesGraph = new StateGraph(TravelAgentStateAnnotation);\nexperiencesGraph.addNode(\"experiences_agent_chat_node\", experiencesFinder);\nexperiencesGraph.setEntryPoint(\"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(START, \"experiences_agent_chat_node\");\nexperiencesGraph.addEdge(\"experiences_agent_chat_node\", END);\nconst experiencesSubgraph = experiencesGraph.compile();\n\n// Main supervisor workflow\nconst workflow = new StateGraph(TravelAgentStateAnnotation);\n\n// Add supervisor and subgraphs as nodes\nworkflow.addNode(\"supervisor\", supervisorAgent, { ends: ['flights_agent', 'hotels_agent', 'experiences_agent', END] });\nworkflow.addNode(\"flights_agent\", flightsSubgraph);\nworkflow.addNode(\"hotels_agent\", hotelsSubgraph);\nworkflow.addNode(\"experiences_agent\", experiencesSubgraph);\n\n// Set entry point\nworkflow.setEntryPoint(\"supervisor\");\nworkflow.addEdge(START, \"supervisor\");\n\n// Add edges back to supervisor after each subgraph\nworkflow.addEdge(\"flights_agent\", \"supervisor\");\nworkflow.addEdge(\"hotels_agent\", \"supervisor\");\nworkflow.addEdge(\"experiences_agent\", \"supervisor\");\n\n// Compile the graph\nexport const subGraphsAgentGraph = workflow.compile();\n", - "language": "ts", - "type": "file" - } - ], - "agno::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - } - ], - "agno::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - } - ], "llama-index::agentic_chat": [ { "name": "page.tsx", @@ -1263,7 +72,7 @@ }, { "name": "agentic_generative_ui.py", - "content": "import asyncio\nimport copy\nimport jsonpatch\nfrom pydantic import BaseModel\n\nfrom llama_index.core.workflow import Context\nfrom llama_index.llms.openai import OpenAI\nfrom llama_index.protocols.ag_ui.router import get_ag_ui_workflow_router\nfrom llama_index.protocols.ag_ui.events import StateDeltaWorkflowEvent, StateSnapshotWorkflowEvent\n\nclass Step(BaseModel):\n description: str\n\nclass Task(BaseModel):\n steps: list[Step]\n\n# Genrative UI demo\nasync def run_task(\n ctx: Context, task: Task,\n) -> str:\n \"\"\"Execute any list of steps needed to complete a task. Useful for anything the user wants to do.\"\"\"\n state = await ctx.get(\"state\", default={})\n task = Task.model_validate(task)\n\n state = {\n \"steps\": [\n {\n \"description\": step.description,\n \"status\": \"pending\"\n }\n for step in task.steps\n ]\n }\n\n # Send initial state snapshot\n ctx.write_event_to_stream(\n StateSnapshotWorkflowEvent(\n snapshot=state\n )\n )\n\n # Sleep for 1 second\n await asyncio.sleep(1.0)\n\n # Create a copy to track changes for JSON patches\n previous_state = copy.deepcopy(state)\n\n # Update each step and send deltas\n for i, step in enumerate(state[\"steps\"]):\n step[\"status\"] = \"completed\"\n \n # Generate JSON patch from previous state to current state\n patch = jsonpatch.make_patch(previous_state, state)\n \n # Send state delta event\n ctx.write_event_to_stream(\n StateDeltaWorkflowEvent(\n delta=patch.patch\n )\n )\n \n # Update previous state for next iteration\n previous_state = copy.deepcopy(state)\n \n # Sleep for 1 second\n await asyncio.sleep(1.0)\n\n # Optionally send a final snapshot to the client\n ctx.write_event_to_stream(\n StateSnapshotWorkflowEvent(\n snapshot=state\n )\n )\n\n return \"Done!\"\n\n\nagentic_generative_ui_router = get_ag_ui_workflow_router(\n llm=OpenAI(model=\"gpt-4.1\"),\n frontend_tools=[run_task],\n initial_state={},\n system_prompt=(\n \"You are a helpful assistant that can help the user with their task. \"\n \"If the user asks you to do any task, use the run_task tool to do it. \"\n \"Use your best judgement to describe the steps.\"\n )\n)\n", + "content": "import asyncio\nimport copy\nimport jsonpatch\nfrom pydantic import BaseModel\n\nfrom llama_index.core.workflow import Context\nfrom llama_index.llms.openai import OpenAI\nfrom llama_index.protocols.ag_ui.router import get_ag_ui_workflow_router\nfrom llama_index.protocols.ag_ui.events import StateDeltaWorkflowEvent, StateSnapshotWorkflowEvent\n\nclass Step(BaseModel):\n description: str\n\nclass Task(BaseModel):\n steps: list[Step]\n\n# Genrative UI demo\nasync def run_task(\n ctx: Context, task: Task,\n) -> str:\n \"\"\"Execute any list of steps needed to complete a task. Useful for anything the user wants to do.\"\"\"\n\n async with ctx.store.edit_state() as global_state:\n state = global_state.get(\"state\", {})\n task = Task.model_validate(task)\n\n state = {\n \"steps\": [\n {\n \"description\": step.description,\n \"status\": \"pending\"\n }\n for step in task.steps\n ]\n }\n\n # Send initial state snapshot\n ctx.write_event_to_stream(\n StateSnapshotWorkflowEvent(\n snapshot=state\n )\n )\n\n # Sleep for 1 second\n await asyncio.sleep(1.0)\n\n # Create a copy to track changes for JSON patches\n previous_state = copy.deepcopy(state)\n\n # Update each step and send deltas\n for i, step in enumerate(state[\"steps\"]):\n step[\"status\"] = \"completed\"\n \n # Generate JSON patch from previous state to current state\n patch = jsonpatch.make_patch(previous_state, state)\n \n # Send state delta event\n ctx.write_event_to_stream(\n StateDeltaWorkflowEvent(\n delta=patch.patch\n )\n )\n \n # Update previous state for next iteration\n previous_state = copy.deepcopy(state)\n \n # Sleep for 1 second\n await asyncio.sleep(1.0)\n\n # Optionally send a final snapshot to the client\n ctx.write_event_to_stream(\n StateSnapshotWorkflowEvent(\n snapshot=state\n )\n )\n\n global_state[\"state\"] = state\n\n return \"Task Done!\"\n\n\nagentic_generative_ui_router = get_ag_ui_workflow_router(\n llm=OpenAI(model=\"gpt-4.1\"),\n backend_tools=[run_task],\n initial_state={},\n system_prompt=(\n \"You are a helpful assistant that can help the user with their task. \"\n \"If the user asks you to do any task, use the run_task tool to do it. \"\n \"Use your best judgement to describe the steps.\"\n )\n)\n", "language": "python", "type": "file" } @@ -1289,163 +98,7 @@ }, { "name": "shared_state.py", - "content": "from typing import Literal, List\nfrom pydantic import BaseModel\n\nfrom llama_index.core.workflow import Context\nfrom llama_index.llms.openai import OpenAI\nfrom llama_index.protocols.ag_ui.events import StateSnapshotWorkflowEvent\nfrom llama_index.protocols.ag_ui.router import get_ag_ui_workflow_router\n\n\nclass Ingredient(BaseModel):\n icon: str\n name: str\n amount: str\n\nclass Recipe(BaseModel):\n skill_level: str\n special_preferences: List[str]\n cooking_time: str\n ingredients: List[Ingredient]\n instructions: List[str]\n\n\nasync def update_recipe(ctx: Context, recipe: Recipe) -> str:\n \"\"\"Useful for recording a recipe to shared state.\"\"\"\n recipe = Recipe.model_validate(recipe)\n\n state = await ctx.get(\"state\")\n if state is None:\n state = {}\n\n state[\"recipe\"] = recipe.model_dump()\n\n ctx.write_event_to_stream(\n StateSnapshotWorkflowEvent(\n snapshot=state\n )\n )\n\n await ctx.set(\"state\", state)\n\n return \"Recipe updated!\"\n\n\nshared_state_router = get_ag_ui_workflow_router(\n llm=OpenAI(model=\"gpt-4.1\"),\n frontend_tools=[update_recipe],\n initial_state={\n \"recipe\": None,\n }\n)\n\n\n", - "language": "python", - "type": "file" - } - ], - "crewai::agentic_chat": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_chat.py", - "content": "\"\"\"\nA simple agentic chat flow.\n\"\"\"\n\nfrom crewai.flow.flow import Flow, start\nfrom litellm import completion\nfrom ..sdk import copilotkit_stream, CopilotKitState\n\nclass AgenticChatFlow(Flow[CopilotKitState]):\n\n @start()\n async def chat(self):\n system_prompt = \"You are a helpful assistant.\"\n\n # 1. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 1.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 1.2 Bind the available tools to the model\n tools=[\n *self.state.copilotkit.actions,\n ],\n\n # 1.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n\n message = response.choices[0].message\n\n # 2. Append the message to the messages in state\n self.state.messages.append(message)\n", - "language": "python", - "type": "file" - } - ], - "crewai::human_in_the_loop": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "human_in_the_loop.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI.\n\"\"\"\n\nfrom crewai.flow.flow import Flow, start, router, listen\nfrom litellm import completion\nfrom pydantic import BaseModel\nfrom typing import Literal, List\nfrom ..sdk import (\n copilotkit_stream,\n CopilotKitState,\n)\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nDEFINE_TASK_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_task_steps\",\n \"description\": \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in imperative form (i.e. Dig hole, Open door, ...)\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"steps\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"description\": {\n \"type\": \"string\",\n \"description\": \"The text of the step in imperative form\"\n },\n \"status\": {\n \"type\": \"string\",\n \"enum\": [\"enabled\"],\n \"description\": \"The status of the step, always 'enabled'\"\n }\n },\n \"required\": [\"description\", \"status\"]\n },\n \"description\": \"An array of 10 step objects, each containing text and status\"\n }\n },\n \"required\": [\"steps\"]\n }\n }\n}\n\nclass TaskStep(BaseModel):\n description: str\n status: Literal[\"enabled\", \"disabled\"]\n\nclass AgentState(CopilotKitState):\n \"\"\"\n Here we define the state of the agent\n\n In this instance, we're inheriting from CopilotKitState, which will bring in\n the CopilotKitState fields. We're also adding a custom field, `steps`,\n which will be used to store the steps of the task.\n \"\"\"\n steps: List[TaskStep] = []\n\n\nclass HumanInTheLoopFlow(Flow[AgentState]):\n \"\"\"\n This is a sample flow that uses the CopilotKit framework to create a chat agent.\n \"\"\"\n\n @start()\n @listen(\"route_follow_up\")\n async def start_flow(self):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n @router(start_flow)\n async def chat(self):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `generate_task_steps` function when the user asks you to perform a task.\n When the function `generate_task_steps` is called, the user will decide to enable or disable a step.\n After the user has decided which steps to perform, provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n # 1. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 1.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 1.2 Bind the tools to the model\n tools=[\n *self.state.copilotkit.actions,\n DEFINE_TASK_TOOL\n ],\n\n # 1.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n\n message = response.choices[0].message\n\n # 2. Append the message to the messages in state\n self.state.messages.append(message)\n\n return \"route_end\"\n\n @listen(\"route_end\")\n async def end(self):\n \"\"\"\n End the flow.\n \"\"\"\n", - "language": "python", - "type": "file" - } - ], - "crewai::tool_based_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n
\n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n}\n\n.page-background {\n /* Darker gradient background */\n background: linear-gradient(170deg, #e9ecef 0%, #ced4da 100%);\n}\n\n@keyframes fade-scale-in {\n from {\n opacity: 0;\n transform: translateY(10px) scale(0.98);\n }\n to {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Updated card entry animation */\n@keyframes pop-in {\n 0% {\n opacity: 0;\n transform: translateY(15px) scale(0.95);\n }\n 70% {\n opacity: 1;\n transform: translateY(-2px) scale(1.02);\n }\n 100% {\n opacity: 1;\n transform: translateY(0) scale(1);\n }\n}\n\n/* Animation for subtle background gradient movement */\n@keyframes animated-gradient {\n 0% {\n background-position: 0% 50%;\n }\n 50% {\n background-position: 100% 50%;\n }\n 100% {\n background-position: 0% 50%;\n }\n}\n\n/* Animation for flash effect on apply */\n@keyframes flash-border-glow {\n 0% {\n /* Start slightly intensified */\n border-top-color: #ff5b4a !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n }\n 50% {\n /* Peak intensity */\n border-top-color: #ff4733 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.08),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 35px rgba(255, 71, 51, 0.7);\n }\n 100% {\n /* Return to default state appearance */\n border-top-color: #ff6f61 !important;\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 10px rgba(255, 111, 97, 0.15);\n }\n}\n\n/* Existing animation for haiku lines */\n@keyframes fade-slide-in {\n from {\n opacity: 0;\n transform: translateX(-15px);\n }\n to {\n opacity: 1;\n transform: translateX(0);\n }\n}\n\n.animated-fade-in {\n /* Use the new pop-in animation */\n animation: pop-in 0.6s ease-out forwards;\n}\n\n.haiku-card {\n /* Subtle animated gradient background */\n background: linear-gradient(120deg, #ffffff 0%, #fdfdfd 50%, #ffffff 100%);\n background-size: 200% 200%;\n animation: animated-gradient 10s ease infinite;\n\n /* === Explicit Border Override Attempt === */\n /* 1. Set the default grey border for all sides */\n border: 1px solid #dee2e6;\n\n /* 2. Explicitly override the top border immediately after */\n border-top: 10px solid #ff6f61 !important; /* Orange top - Added !important */\n /* === End Explicit Border Override Attempt === */\n\n padding: 2.5rem 3rem;\n border-radius: 20px;\n\n /* Default glow intensity */\n box-shadow: 0 10px 30px rgba(0, 0, 0, 0.07),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 15px rgba(255, 111, 97, 0.25);\n text-align: left;\n max-width: 745px;\n margin: 3rem auto;\n min-width: 600px;\n\n /* Transition */\n transition: transform 0.35s ease, box-shadow 0.35s ease, border-top-width 0.35s ease, border-top-color 0.35s ease;\n}\n\n.haiku-card:hover {\n transform: translateY(-8px) scale(1.03);\n /* Enhanced shadow + Glow */\n box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1),\n inset 0 1px 2px rgba(0, 0, 0, 0.01),\n 0 0 25px rgba(255, 91, 74, 0.5);\n /* Modify only top border properties */\n border-top-width: 14px !important; /* Added !important */\n border-top-color: #ff5b4a !important; /* Added !important */\n}\n\n.haiku-card .flex {\n margin-bottom: 1.5rem;\n}\n\n.haiku-card .flex.haiku-line { /* Target the lines specifically */\n margin-bottom: 1.5rem;\n opacity: 0; /* Start hidden for animation */\n animation: fade-slide-in 0.5s ease-out forwards;\n /* animation-delay is set inline in page.tsx */\n}\n\n/* Remove previous explicit color overrides - rely on Tailwind */\n/* .haiku-card p.text-4xl {\n color: #212529;\n}\n\n.haiku-card p.text-base {\n color: #495057;\n} */\n\n.haiku-card.applied-flash {\n /* Apply the flash animation once */\n /* Note: animation itself has !important on border-top-color */\n animation: flash-border-glow 0.6s ease-out forwards;\n}\n\n/* Styling for images within the main haiku card */\n.haiku-card-image {\n width: 9.5rem; /* Increased size (approx w-48) */\n height: 9.5rem; /* Increased size (approx h-48) */\n object-fit: cover;\n border-radius: 1.5rem; /* rounded-xl */\n border: 1px solid #e5e7eb;\n /* Enhanced shadow with subtle orange hint */\n box-shadow: 0 8px 15px rgba(0, 0, 0, 0.1),\n 0 3px 6px rgba(0, 0, 0, 0.08),\n 0 0 10px rgba(255, 111, 97, 0.2);\n /* Inherit animation delay from inline style */\n animation-name: fadeIn;\n animation-duration: 0.5s;\n animation-fill-mode: both;\n}\n\n/* Styling for images within the suggestion card */\n.suggestion-card-image {\n width: 6.5rem; /* Increased slightly (w-20) */\n height: 6.5rem; /* Increased slightly (h-20) */\n object-fit: cover;\n border-radius: 1rem; /* Equivalent to rounded-md */\n border: 1px solid #d1d5db; /* Equivalent to border (using Tailwind gray-300) */\n margin-top: 0.5rem;\n /* Added shadow for suggestion images */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1),\n 0 2px 4px rgba(0, 0, 0, 0.06);\n transition: all 0.2s ease-in-out; /* Added for smooth deselection */\n}\n\n/* Styling for the focused suggestion card image */\n.suggestion-card-image-focus {\n width: 6.5rem;\n height: 6.5rem;\n object-fit: cover;\n border-radius: 1rem;\n margin-top: 0.5rem;\n /* Highlight styles */\n border: 2px solid #ff6f61; /* Thicker, themed border */\n box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1), /* Base shadow for depth */\n 0 0 12px rgba(255, 111, 97, 0.6); /* Orange glow */\n transform: scale(1.05); /* Slightly scale up */\n transition: all 0.2s ease-in-out; /* Smooth transition for focus */\n}\n\n/* Styling for the suggestion card container in the sidebar */\n.suggestion-card {\n border: 1px solid #dee2e6; /* Same default border as haiku-card */\n border-top: 10px solid #ff6f61; /* Same orange top border */\n border-radius: 0.375rem; /* Default rounded-md */\n /* Note: background-color is set by Tailwind bg-gray-100 */\n /* Other styles like padding, margin, flex are handled by Tailwind */\n}\n\n.suggestion-image-container {\n display: flex;\n gap: 1rem;\n justify-content: space-between;\n width: 100%;\n height: 6.5rem;\n}\n\n/* Mobile responsive styles - matches useMobileView hook breakpoint */\n@media (max-width: 767px) {\n .haiku-card {\n padding: 1rem 1.5rem; /* Reduced from 2.5rem 3rem */\n min-width: auto; /* Remove min-width constraint */\n max-width: 100%; /* Full width on mobile */\n margin: 1rem auto; /* Reduced margin */\n }\n\n .haiku-card-image {\n width: 5.625rem; /* 90px - smaller on mobile */\n height: 5.625rem; /* 90px - smaller on mobile */\n }\n\n .suggestion-card-image {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n\n .suggestion-card-image-focus {\n width: 5rem; /* Slightly smaller on mobile */\n height: 5rem; /* Slightly smaller on mobile */\n }\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🪶 Tool-Based Generative UI Haiku Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **tool-based generative UI** capabilities:\n\n1. **Frontend Rendering of Tool Calls**: Backend tool calls are automatically\n rendered in the UI\n2. **Dynamic UI Generation**: The UI updates in real-time as the agent generates\n content\n3. **Elegant Content Presentation**: Complex structured data (haikus) are\n beautifully displayed\n\n## How to Interact\n\nChat with your Copilot and ask for haikus about different topics:\n\n- \"Create a haiku about nature\"\n- \"Write a haiku about technology\"\n- \"Generate a haiku about the changing seasons\"\n- \"Make a humorous haiku about programming\"\n\nEach request will trigger the agent to generate a haiku and display it in a\nvisually appealing card format in the UI.\n\n## ✨ Tool-Based Generative UI in Action\n\n**What's happening technically:**\n\n- The agent processes your request and determines it should create a haiku\n- It calls a backend tool that returns structured haiku data\n- CopilotKit automatically renders this tool call in the frontend\n- The rendering is handled by the registered tool component in your React app\n- No manual state management is required to display the results\n\n**What you'll see in this demo:**\n\n- As you request a haiku, a beautifully formatted card appears in the UI\n- The haiku follows the traditional 5-7-5 syllable structure\n- Each haiku is presented with consistent styling\n- Multiple haikus can be generated in sequence\n- The UI adapts to display each new piece of content\n\nThis pattern of tool-based generative UI can be extended to create any kind of\ndynamic content - from data visualizations to interactive components, all driven\nby your Copilot's tool calls!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "tool_based_generative_ui.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI.\n\"\"\"\n\nfrom crewai.flow.flow import Flow, start\nfrom litellm import completion\nfrom ..sdk import copilotkit_stream, CopilotKitState\n\n\n# This tool generates a haiku on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nGENERATE_HAIKU_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"description\": \"Generate a haiku in Japanese and its English translation\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"japanese\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in Japanese\"\n },\n \"english\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in English\"\n },\n \"image_names\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Names of 3 relevant images from the provided list\"\n }\n },\n \"required\": [\"japanese\", \"english\", \"image_names\"]\n }\n }\n}\n\n\nclass ToolBasedGenerativeUIFlow(Flow[CopilotKitState]):\n \"\"\"\n A flow that demonstrates tool-based generative UI.\n \"\"\"\n\n @start()\n async def chat(self):\n \"\"\"\n The main function handling chat and tool calls.\n \"\"\"\n system_prompt = \"You assist the user in generating a haiku. When generating a haiku using the 'generate_haiku' tool, you MUST also select exactly 3 image filenames from the following list that are most relevant to the haiku's content or theme. Return the filenames in the 'image_names' parameter. Dont provide the relavent image names in your final response to the user. \"\n\n\n # 1. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 1.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 1.2 Bind the available tools to the model\n tools=[ GENERATE_HAIKU_TOOL ],\n\n # 1.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n message = response.choices[0].message\n\n # 2. Append the message to the messages in state\n self.state.messages.append(message)\n\n # 3. If there are tool calls, append a tool message to the messages in state\n if message.tool_calls:\n self.state.messages.append(\n {\n \"tool_call_id\": message.tool_calls[0].id,\n \"role\": \"tool\",\n \"content\": \"Haiku generated.\"\n }\n )\n", - "language": "python", - "type": "file" - } - ], - "crewai::agentic_generative_ui": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "agentic_generative_ui.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI.\n\"\"\"\n\nimport json\nimport asyncio\nfrom crewai.flow.flow import Flow, start, router, listen, or_\nfrom litellm import completion\nfrom pydantic import BaseModel\nfrom typing import Literal, List\n\nfrom ..sdk import (\n copilotkit_stream,\n CopilotKitState,\n copilotkit_predict_state,\n copilotkit_emit_state\n)\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nPERFORM_TASK_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_task_steps\",\n \"description\": \"Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"steps\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"description\": {\n \"type\": \"string\",\n \"description\": \"The text of the step in gerund form\"\n },\n \"status\": {\n \"type\": \"string\",\n \"enum\": [\"pending\"],\n \"description\": \"The status of the step, always 'pending'\"\n }\n },\n \"required\": [\"description\", \"status\"]\n },\n \"description\": \"An array of 10 step objects, each containing text and status\"\n }\n },\n \"required\": [\"steps\"]\n }\n }\n}\n\nclass TaskStep(BaseModel):\n description: str\n status: Literal[\"pending\", \"completed\"]\n\nclass AgentState(CopilotKitState):\n \"\"\"\n Here we define the state of the agent\n\n In this instance, we're inheriting from CopilotKitState, which will bring in\n the CopilotKitState fields. We're also adding a custom field, `steps`,\n which will be used to store the steps of the task.\n \"\"\"\n steps: List[TaskStep] = []\n\n\nclass AgenticGenerativeUIFlow(Flow[AgentState]):\n \"\"\"\n This is a sample flow that uses the CopilotKit framework to create a chat agent.\n \"\"\"\n\n \n @start()\n async def start_flow(self):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n self.state.steps = []\n\n @router(or_(start_flow, \"simulate_task\"))\n async def chat(self):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # 1. Here we specify that we want to stream the tool call to generate_task_steps\n # to the frontend as state.\n await copilotkit_predict_state({\n \"steps\": {\n \"tool_name\": \"generate_task_steps\",\n \"tool_argument\": \"steps\"\n }\n })\n\n # 2. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 2.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 2.2 Bind the tools to the model\n tools=[\n *self.state.copilotkit.actions,\n PERFORM_TASK_TOOL\n ],\n\n # 2.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n\n message = response.choices[0].message\n\n # 3. Append the message to the messages in state\n self.state.messages.append(message)\n\n # 4. Handle tool call\n if message.get(\"tool_calls\"):\n tool_call = message[\"tool_calls\"][0]\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"function\"][\"name\"]\n tool_call_args = json.loads(tool_call[\"function\"][\"arguments\"])\n\n if tool_call_name == \"generate_task_steps\":\n # Convert each step in the JSON array to a TaskStep instance\n self.state.steps = [TaskStep(**step) for step in tool_call_args[\"steps\"]]\n\n # 4.1 Append the result to the messages in state\n self.state.messages.append({\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call_id\n })\n return \"route_simulate_task\"\n\n # 5. If our tool was not called, return to the end route\n return \"route_end\"\n\n @listen(\"route_simulate_task\")\n async def simulate_task(self):\n \"\"\"\n Simulate the task.\n \"\"\"\n for step in self.state.steps:\n # simulate executing the step\n await asyncio.sleep(1)\n step.status = \"completed\"\n await copilotkit_emit_state(self.state)\n\n @listen(\"route_end\")\n async def end(self):\n \"\"\"\n End the flow.\n \"\"\"\n", - "language": "python", - "type": "file" - } - ], - "crewai::shared_state": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "shared_state.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import List, Optional\nfrom litellm import completion\nfrom pydantic import BaseModel, Field\nfrom crewai.flow.flow import Flow, start, router, listen\nfrom ..sdk import (\n copilotkit_stream, \n copilotkit_predict_state,\n CopilotKitState\n)\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient with its details.\n \"\"\"\n icon: str = Field(..., description=\"Emoji icon representing the ingredient.\")\n name: str = Field(..., description=\"Name of the ingredient.\")\n amount: str = Field(..., description=\"Amount or quantity of the ingredient.\")\n\nGENERATE_RECIPE_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_recipe\",\n \"description\": \" \".join(\"\"\"Generate or modify an existing recipe. \n When creating a new recipe, specify all fields. \n When modifying, only fill optional fields if they need changes; \n otherwise, leave them empty.\"\"\".split()),\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"recipe\": {\n \"description\": \"The recipe object containing all details.\",\n \"type\": \"object\",\n \"properties\": {\n \"title\": {\n \"type\": \"string\",\n \"description\": \"The title of the recipe.\"\n },\n \"skill_level\": {\n \"type\": \"string\",\n \"enum\": [level.value for level in SkillLevel],\n \"description\": \"The skill level required for the recipe.\"\n },\n \"special_preferences\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"A list of dietary preferences (e.g., Vegetarian, Gluten-free).\"\n },\n \"cooking_time\": {\n \"type\": \"string\",\n \"enum\": [time.value for time in CookingTime],\n \"description\": \"The estimated cooking time for the recipe.\"\n },\n \"ingredients\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"icon\": {\"type\": \"string\", \"description\": \"Emoji icon for the ingredient.\"},\n \"name\": {\"type\": \"string\", \"description\": \"Name of the ingredient.\"},\n \"amount\": {\"type\": \"string\", \"description\": \"Amount/quantity of the ingredient.\"}\n },\n \"required\": [\"icon\", \"name\", \"amount\"]\n },\n \"description\": \"A list of ingredients required for the recipe.\"\n },\n \"instructions\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n \"description\": \"Step-by-step instructions for preparing the recipe.\"\n }\n },\n \"required\": [\"title\", \"skill_level\", \"cooking_time\", \"special_preferences\", \"ingredients\", \"instructions\"]\n }\n },\n \"required\": [\"recipe\"]\n }\n }\n}\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n title: str\n skill_level: SkillLevel\n special_preferences: List[str] = Field(default_factory=list)\n cooking_time: CookingTime\n ingredients: List[Ingredient] = Field(default_factory=list)\n instructions: List[str] = Field(default_factory=list)\n\n\nclass AgentState(CopilotKitState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Recipe] = None\n\nclass SharedStateFlow(Flow[AgentState]):\n \"\"\"\n This is a sample flow that demonstrates shared state between the agent and CopilotKit.\n \"\"\"\n\n @start()\n @listen(\"route_follow_up\")\n async def start_flow(self):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n print(f\"start_flow\")\n print(f\"self.state: {self.state}\")\n\n @router(start_flow)\n async def chat(self):\n \"\"\"\n Standard chat node.\n \"\"\"\n \n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {self.state.model_dump_json(indent=2)}\n You can modify the recipe by calling the generate_recipe tool.\n If you have just created or modified the recipe, just answer in one sentence what you did.\n \"\"\"\n\n # 1. Here we specify that we want to stream the tool call to generate_recipe\n # to the frontend as state.\n await copilotkit_predict_state({\n \"recipe\": {\n \"tool_name\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }\n })\n\n # 2. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 2.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 2.2 Bind the tools to the model\n tools=[\n *self.state.copilotkit.actions,\n GENERATE_RECIPE_TOOL\n ],\n\n # 2.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n\n message = response.choices[0].message\n\n # 3. Append the message to the messages in state\n self.state.messages.append(message)\n\n # 4. Handle tool call\n if message.get(\"tool_calls\"):\n tool_call = message[\"tool_calls\"][0]\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"function\"][\"name\"]\n tool_call_args = json.loads(tool_call[\"function\"][\"arguments\"])\n\n if tool_call_name == \"generate_recipe\":\n # Attempt to update the recipe state using the data from the tool call\n try:\n updated_recipe_data = tool_call_args[\"recipe\"]\n # Validate and update the state. Pydantic will raise an error if the structure is wrong.\n self.state.recipe = Recipe(**updated_recipe_data)\n\n # 4.1 Append the result to the messages in state\n self.state.messages.append({\n \"role\": \"tool\",\n \"content\": \"Recipe updated.\", # More accurate message\n \"tool_call_id\": tool_call_id\n })\n return \"route_follow_up\"\n except Exception as e:\n # Handle validation or other errors during update\n print(f\"Error updating recipe state: {e}\") # Log the error server-side\n # Optionally inform the user via a tool message, though it might be noisy\n # self.state.messages.append({\"role\": \"tool\", \"content\": f\"Error processing recipe update: {e}\", \"tool_call_id\": tool_call_id})\n return \"route_end\" # End the flow on error for now\n\n # 5. If our tool was not called, return to the end route\n return \"route_end\"\n\n @listen(\"route_end\")\n async def end(self):\n \"\"\"\n End the flow.\n \"\"\"\n", - "language": "python", - "type": "file" - } - ], - "crewai::predictive_state_updates": [ - { - "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", - "language": "typescript", - "type": "file" - }, - { - "name": "style.css", - "content": "/* Basic editor styles */\n.tiptap-container {\n height: 100vh; /* Full viewport height */\n width: 100vw; /* Full viewport width */\n display: flex;\n flex-direction: column;\n}\n\n.tiptap {\n flex: 1; /* Take up remaining space */\n overflow: auto; /* Allow scrolling if content overflows */\n}\n\n.tiptap :first-child {\n margin-top: 0;\n}\n\n/* List styles */\n.tiptap ul,\n.tiptap ol {\n padding: 0 1rem;\n margin: 1.25rem 1rem 1.25rem 0.4rem;\n}\n\n.tiptap ul li p,\n.tiptap ol li p {\n margin-top: 0.25em;\n margin-bottom: 0.25em;\n}\n\n/* Heading styles */\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n line-height: 1.1;\n margin-top: 2.5rem;\n text-wrap: pretty;\n font-weight: bold;\n}\n\n.tiptap h1,\n.tiptap h2,\n.tiptap h3,\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n margin-top: 3.5rem;\n margin-bottom: 1.5rem;\n}\n\n.tiptap p {\n margin-bottom: 1rem;\n}\n\n.tiptap h1 {\n font-size: 1.4rem;\n}\n\n.tiptap h2 {\n font-size: 1.2rem;\n}\n\n.tiptap h3 {\n font-size: 1.1rem;\n}\n\n.tiptap h4,\n.tiptap h5,\n.tiptap h6 {\n font-size: 1rem;\n}\n\n/* Code and preformatted text styles */\n.tiptap code {\n background-color: var(--purple-light);\n border-radius: 0.4rem;\n color: var(--black);\n font-size: 0.85rem;\n padding: 0.25em 0.3em;\n}\n\n.tiptap pre {\n background: var(--black);\n border-radius: 0.5rem;\n color: var(--white);\n font-family: \"JetBrainsMono\", monospace;\n margin: 1.5rem 0;\n padding: 0.75rem 1rem;\n}\n\n.tiptap pre code {\n background: none;\n color: inherit;\n font-size: 0.8rem;\n padding: 0;\n}\n\n.tiptap blockquote {\n border-left: 3px solid var(--gray-3);\n margin: 1.5rem 0;\n padding-left: 1rem;\n}\n\n.tiptap hr {\n border: none;\n border-top: 1px solid var(--gray-2);\n margin: 2rem 0;\n}\n\n.tiptap s {\n background-color: #f9818150;\n padding: 2px;\n font-weight: bold;\n color: rgba(0, 0, 0, 0.7);\n}\n\n.tiptap em {\n background-color: #b2f2bb;\n padding: 2px;\n font-weight: bold;\n font-style: normal;\n}\n\n.copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n", - "language": "css", - "type": "file" - }, - { - "name": "README.mdx", - "content": "# 📝 Predictive State Updates Document Editor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **predictive state updates** for real-time\ndocument collaboration:\n\n1. **Live Document Editing**: Watch as your Copilot makes changes to a document\n in real-time\n2. **Diff Visualization**: See exactly what's being changed as it happens\n3. **Streaming Updates**: Changes are displayed character-by-character as the\n Copilot works\n\n## How to Interact\n\nTry these interactions with the collaborative document editor:\n\n- \"Fix the grammar and typos in this document\"\n- \"Make this text more professional\"\n- \"Add a section about [topic]\"\n- \"Summarize this content in bullet points\"\n- \"Change the tone to be more casual\"\n\nWatch as the Copilot processes your request and edits the document in real-time\nright before your eyes.\n\n## ✨ Predictive State Updates in Action\n\n**What's happening technically:**\n\n- The document state is shared between your UI and the Copilot\n- As the Copilot generates content, changes are streamed to the UI\n- Each modification is visualized with additions and deletions\n- The UI renders these changes progressively, without waiting for completion\n- All edits are tracked and displayed in a visually intuitive way\n\n**What you'll see in this demo:**\n\n- Text changes are highlighted in different colors (green for additions, red for\n deletions)\n- The document updates character-by-character, creating a typing-like effect\n- You can see the Copilot's thought process as it refines the content\n- The final document seamlessly incorporates all changes\n- The experience feels collaborative, as if someone is editing alongside you\n\nThis pattern of real-time collaborative editing with diff visualization is\nperfect for document editors, code review tools, content creation platforms, or\nany application where users benefit from seeing exactly how content is being\ntransformed!\n", - "language": "markdown", - "type": "file" - }, - { - "name": "predictive_state_updates.py", - "content": "\"\"\"\nA demo of predictive state updates.\n\"\"\"\n\nimport json\nimport uuid\nfrom typing import Optional\nfrom litellm import completion\nfrom crewai.flow.flow import Flow, start, router, listen\nfrom ..sdk import (\n copilotkit_stream, \n copilotkit_predict_state,\n CopilotKitState\n)\n\nWRITE_DOCUMENT_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"write_document_local\",\n \"description\": \" \".join(\"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\".split()),\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"document\": {\n \"type\": \"string\",\n \"description\": \"The document to write\"\n },\n },\n }\n }\n}\n\n\nclass AgentState(CopilotKitState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n\nclass PredictiveStateUpdatesFlow(Flow[AgentState]):\n \"\"\"\n This is a sample flow that demonstrates predictive state updates.\n \"\"\"\n\n @start()\n @listen(\"route_follow_up\")\n async def start_flow(self):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n @router(start_flow)\n async def chat(self):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message. \n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {self.state.document}\\n-----\n \"\"\"\n\n # 1. Here we specify that we want to stream the tool call to write_document_local\n # to the frontend as state.\n await copilotkit_predict_state({\n \"document\": {\n \"tool_name\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }\n })\n\n # 2. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 2.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 2.2 Bind the tools to the model\n tools=[\n *self.state.copilotkit.actions,\n WRITE_DOCUMENT_TOOL\n ],\n\n # 2.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n\n message = response.choices[0].message\n\n # 3. Append the message to the messages in state\n self.state.messages.append(message)\n\n # 4. Handle tool call\n if message.get(\"tool_calls\"):\n tool_call = message[\"tool_calls\"][0]\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"function\"][\"name\"]\n tool_call_args = json.loads(tool_call[\"function\"][\"arguments\"])\n\n if tool_call_name == \"write_document_local\":\n self.state.document = tool_call_args[\"document\"]\n\n # 4.1 Append the result to the messages in state\n self.state.messages.append({\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n })\n\n # 4.2 Append a tool call to confirm changes\n self.state.messages.append({\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n })\n\n return \"route_end\"\n\n # 5. If our tool was not called, return to the end route\n return \"route_end\"\n\n @listen(\"route_end\")\n async def end(self):\n \"\"\"\n End the flow.\n \"\"\"\n", + "content": "from typing import Literal, List\nfrom pydantic import BaseModel\n\nfrom llama_index.core.workflow import Context\nfrom llama_index.llms.openai import OpenAI\nfrom llama_index.protocols.ag_ui.events import StateSnapshotWorkflowEvent\nfrom llama_index.protocols.ag_ui.router import get_ag_ui_workflow_router\n\n\nclass Ingredient(BaseModel):\n icon: str\n name: str\n amount: str\n\nclass Recipe(BaseModel):\n skill_level: str\n special_preferences: List[str]\n cooking_time: str\n ingredients: List[Ingredient]\n instructions: List[str]\n\n\nasync def update_recipe(ctx: Context, recipe: Recipe) -> str:\n \"\"\"Useful for recording a recipe to shared state.\"\"\"\n recipe = Recipe.model_validate(recipe)\n\n async with ctx.store.edit_state() as global_state:\n state = global_state.get(\"state\", {})\n if state is None:\n state = {}\n\n state[\"recipe\"] = recipe.model_dump()\n\n ctx.write_event_to_stream(\n StateSnapshotWorkflowEvent(\n snapshot=state\n )\n )\n\n global_state[\"state\"] = state\n\n return \"Recipe updated!\"\n\n\nshared_state_router = get_ag_ui_workflow_router(\n llm=OpenAI(model=\"gpt-4.1\"),\n frontend_tools=[update_recipe],\n initial_state={\n \"recipe\": None,\n }\n)\n\n\n", "language": "python", "type": "file" } diff --git a/typescript-sdk/integrations/llamaindex/server-py/pyproject.toml b/typescript-sdk/integrations/llamaindex/server-py/pyproject.toml index 737496655..6c323b06a 100644 --- a/typescript-sdk/integrations/llamaindex/server-py/pyproject.toml +++ b/typescript-sdk/integrations/llamaindex/server-py/pyproject.toml @@ -9,9 +9,9 @@ description = "Add your description here" readme = "README.md" requires-python = ">=3.9, <3.14" dependencies = [ - "llama-index-core>=0.12.41,<0.13", - "llama-index-agent-openai>=0.4.9,<0.5", - "llama-index-protocols-ag-ui>=0.1.2", + "llama-index-core>=0.14.0,<0.15", + "llama-index-llms-openai>=0.5.0,<0.6.0", + "llama-index-protocols-ag-ui>=0.2.2,<0.3", "jsonpatch>=1.33", "uvicorn>=0.27.0", "fastapi>=0.100.0", @@ -26,5 +26,8 @@ include = ["server/"] [tool.hatch.build.targets.wheel] include = ["server/"] +[tool.hatch.metadata] +allow-direct-references = true + [project.scripts] dev = "server:main" diff --git a/typescript-sdk/integrations/llamaindex/server-py/server/routers/agentic_generative_ui.py b/typescript-sdk/integrations/llamaindex/server-py/server/routers/agentic_generative_ui.py index cfd6ab4c2..3b2facd52 100644 --- a/typescript-sdk/integrations/llamaindex/server-py/server/routers/agentic_generative_ui.py +++ b/typescript-sdk/integrations/llamaindex/server-py/server/routers/agentic_generative_ui.py @@ -19,65 +19,69 @@ async def run_task( ctx: Context, task: Task, ) -> str: """Execute any list of steps needed to complete a task. Useful for anything the user wants to do.""" - state = await ctx.get("state", default={}) - task = Task.model_validate(task) - state = { - "steps": [ - { - "description": step.description, - "status": "pending" - } - for step in task.steps - ] - } + async with ctx.store.edit_state() as global_state: + state = global_state.get("state", {}) + task = Task.model_validate(task) - # Send initial state snapshot - ctx.write_event_to_stream( - StateSnapshotWorkflowEvent( - snapshot=state - ) - ) - - # Sleep for 1 second - await asyncio.sleep(1.0) - - # Create a copy to track changes for JSON patches - previous_state = copy.deepcopy(state) + state = { + "steps": [ + { + "description": step.description, + "status": "pending" + } + for step in task.steps + ] + } - # Update each step and send deltas - for i, step in enumerate(state["steps"]): - step["status"] = "completed" - - # Generate JSON patch from previous state to current state - patch = jsonpatch.make_patch(previous_state, state) - - # Send state delta event + # Send initial state snapshot ctx.write_event_to_stream( - StateDeltaWorkflowEvent( - delta=patch.patch + StateSnapshotWorkflowEvent( + snapshot=state ) ) - - # Update previous state for next iteration - previous_state = copy.deepcopy(state) - + # Sleep for 1 second await asyncio.sleep(1.0) - # Optionally send a final snapshot to the client - ctx.write_event_to_stream( - StateSnapshotWorkflowEvent( - snapshot=state + # Create a copy to track changes for JSON patches + previous_state = copy.deepcopy(state) + + # Update each step and send deltas + for i, step in enumerate(state["steps"]): + step["status"] = "completed" + + # Generate JSON patch from previous state to current state + patch = jsonpatch.make_patch(previous_state, state) + + # Send state delta event + ctx.write_event_to_stream( + StateDeltaWorkflowEvent( + delta=patch.patch + ) + ) + + # Update previous state for next iteration + previous_state = copy.deepcopy(state) + + # Sleep for 1 second + await asyncio.sleep(1.0) + + # Optionally send a final snapshot to the client + ctx.write_event_to_stream( + StateSnapshotWorkflowEvent( + snapshot=state + ) ) - ) - return "Done!" + global_state["state"] = state + + return "Task Done!" agentic_generative_ui_router = get_ag_ui_workflow_router( llm=OpenAI(model="gpt-4.1"), - frontend_tools=[run_task], + backend_tools=[run_task], initial_state={}, system_prompt=( "You are a helpful assistant that can help the user with their task. " diff --git a/typescript-sdk/integrations/llamaindex/server-py/server/routers/shared_state.py b/typescript-sdk/integrations/llamaindex/server-py/server/routers/shared_state.py index a81d5db25..cf29a0f7b 100644 --- a/typescript-sdk/integrations/llamaindex/server-py/server/routers/shared_state.py +++ b/typescript-sdk/integrations/llamaindex/server-py/server/routers/shared_state.py @@ -24,19 +24,20 @@ async def update_recipe(ctx: Context, recipe: Recipe) -> str: """Useful for recording a recipe to shared state.""" recipe = Recipe.model_validate(recipe) - state = await ctx.get("state") - if state is None: - state = {} + async with ctx.store.edit_state() as global_state: + state = global_state.get("state", {}) + if state is None: + state = {} - state["recipe"] = recipe.model_dump() + state["recipe"] = recipe.model_dump() - ctx.write_event_to_stream( - StateSnapshotWorkflowEvent( - snapshot=state + ctx.write_event_to_stream( + StateSnapshotWorkflowEvent( + snapshot=state + ) ) - ) - await ctx.set("state", state) + global_state["state"] = state return "Recipe updated!" diff --git a/typescript-sdk/integrations/llamaindex/server-py/uv.lock b/typescript-sdk/integrations/llamaindex/server-py/uv.lock index d1fa52c5b..72fbbd7fb 100644 --- a/typescript-sdk/integrations/llamaindex/server-py/uv.lock +++ b/typescript-sdk/integrations/llamaindex/server-py/uv.lock @@ -199,7 +199,7 @@ wheels = [ [[package]] name = "banks" -version = "2.1.2" +version = "2.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "deprecated" }, @@ -209,9 +209,9 @@ dependencies = [ { name = "platformdirs" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/34/2b6697f02ffb68bee50e5fd37d6c64432244d3245603fd62950169dfed7e/banks-2.1.2.tar.gz", hash = "sha256:a0651db9d14b57fa2e115e78f68dbb1b36fe226ad6eef96192542908b1d20c1f", size = 173332, upload-time = "2025-04-20T07:09:21.674Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/f8/25ef24814f77f3fd7f0fd3bd1ef3749e38a9dbd23502fbb53034de49900c/banks-2.2.0.tar.gz", hash = "sha256:d1446280ce6e00301e3e952dd754fd8cee23ff277d29ed160994a84d0d7ffe62", size = 179052, upload-time = "2025-07-18T16:28:26.892Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/04/4a/7fdca29d1db62f5f5c3446bf8f668beacdb0b5a8aff4247574ddfddc6bcd/banks-2.1.2-py3-none-any.whl", hash = "sha256:7fba451069f6bea376483b8136a0f29cb1e6883133626d00e077e20a3d102c0e", size = 28064, upload-time = "2025-04-20T07:09:20.201Z" }, + { url = "https://files.pythonhosted.org/packages/b4/d6/f9168956276934162ec8d48232f9920f2985ee45aa7602e3c6b4bc203613/banks-2.2.0-py3-none-any.whl", hash = "sha256:963cd5c85a587b122abde4f4064078def35c50c688c1b9d36f43c92503854e7d", size = 29244, upload-time = "2025-07-18T16:28:27.835Z" }, ] [[package]] @@ -670,73 +670,74 @@ wheels = [ [[package]] name = "jiter" -version = "0.10.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ee/9d/ae7ddb4b8ab3fb1b51faf4deb36cb48a4fbbd7cb36bad6a5fca4741306f7/jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500", size = 162759, upload-time = "2025-05-18T19:04:59.73Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/be/7e/4011b5c77bec97cb2b572f566220364e3e21b51c48c5bd9c4a9c26b41b67/jiter-0.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cd2fb72b02478f06a900a5782de2ef47e0396b3e1f7d5aba30daeb1fce66f303", size = 317215, upload-time = "2025-05-18T19:03:04.303Z" }, - { url = "https://files.pythonhosted.org/packages/8a/4f/144c1b57c39692efc7ea7d8e247acf28e47d0912800b34d0ad815f6b2824/jiter-0.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:32bb468e3af278f095d3fa5b90314728a6916d89ba3d0ffb726dd9bf7367285e", size = 322814, upload-time = "2025-05-18T19:03:06.433Z" }, - { url = "https://files.pythonhosted.org/packages/63/1f/db977336d332a9406c0b1f0b82be6f71f72526a806cbb2281baf201d38e3/jiter-0.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa8b3e0068c26ddedc7abc6fac37da2d0af16b921e288a5a613f4b86f050354f", size = 345237, upload-time = "2025-05-18T19:03:07.833Z" }, - { url = "https://files.pythonhosted.org/packages/d7/1c/aa30a4a775e8a672ad7f21532bdbfb269f0706b39c6ff14e1f86bdd9e5ff/jiter-0.10.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:286299b74cc49e25cd42eea19b72aa82c515d2f2ee12d11392c56d8701f52224", size = 370999, upload-time = "2025-05-18T19:03:09.338Z" }, - { url = "https://files.pythonhosted.org/packages/35/df/f8257abc4207830cb18880781b5f5b716bad5b2a22fb4330cfd357407c5b/jiter-0.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ed5649ceeaeffc28d87fb012d25a4cd356dcd53eff5acff1f0466b831dda2a7", size = 491109, upload-time = "2025-05-18T19:03:11.13Z" }, - { url = "https://files.pythonhosted.org/packages/06/76/9e1516fd7b4278aa13a2cc7f159e56befbea9aa65c71586305e7afa8b0b3/jiter-0.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2ab0051160cb758a70716448908ef14ad476c3774bd03ddce075f3c1f90a3d6", size = 388608, upload-time = "2025-05-18T19:03:12.911Z" }, - { url = "https://files.pythonhosted.org/packages/6d/64/67750672b4354ca20ca18d3d1ccf2c62a072e8a2d452ac3cf8ced73571ef/jiter-0.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03997d2f37f6b67d2f5c475da4412be584e1cec273c1cfc03d642c46db43f8cf", size = 352454, upload-time = "2025-05-18T19:03:14.741Z" }, - { url = "https://files.pythonhosted.org/packages/96/4d/5c4e36d48f169a54b53a305114be3efa2bbffd33b648cd1478a688f639c1/jiter-0.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c404a99352d839fed80d6afd6c1d66071f3bacaaa5c4268983fc10f769112e90", size = 391833, upload-time = "2025-05-18T19:03:16.426Z" }, - { url = "https://files.pythonhosted.org/packages/0b/de/ce4a6166a78810bd83763d2fa13f85f73cbd3743a325469a4a9289af6dae/jiter-0.10.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66e989410b6666d3ddb27a74c7e50d0829704ede652fd4c858e91f8d64b403d0", size = 523646, upload-time = "2025-05-18T19:03:17.704Z" }, - { url = "https://files.pythonhosted.org/packages/a2/a6/3bc9acce53466972964cf4ad85efecb94f9244539ab6da1107f7aed82934/jiter-0.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b532d3af9ef4f6374609a3bcb5e05a1951d3bf6190dc6b176fdb277c9bbf15ee", size = 514735, upload-time = "2025-05-18T19:03:19.44Z" }, - { url = "https://files.pythonhosted.org/packages/b4/d8/243c2ab8426a2a4dea85ba2a2ba43df379ccece2145320dfd4799b9633c5/jiter-0.10.0-cp310-cp310-win32.whl", hash = "sha256:da9be20b333970e28b72edc4dff63d4fec3398e05770fb3205f7fb460eb48dd4", size = 210747, upload-time = "2025-05-18T19:03:21.184Z" }, - { url = "https://files.pythonhosted.org/packages/37/7a/8021bd615ef7788b98fc76ff533eaac846322c170e93cbffa01979197a45/jiter-0.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:f59e533afed0c5b0ac3eba20d2548c4a550336d8282ee69eb07b37ea526ee4e5", size = 207484, upload-time = "2025-05-18T19:03:23.046Z" }, - { url = "https://files.pythonhosted.org/packages/1b/dd/6cefc6bd68b1c3c979cecfa7029ab582b57690a31cd2f346c4d0ce7951b6/jiter-0.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:3bebe0c558e19902c96e99217e0b8e8b17d570906e72ed8a87170bc290b1e978", size = 317473, upload-time = "2025-05-18T19:03:25.942Z" }, - { url = "https://files.pythonhosted.org/packages/be/cf/fc33f5159ce132be1d8dd57251a1ec7a631c7df4bd11e1cd198308c6ae32/jiter-0.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:558cc7e44fd8e507a236bee6a02fa17199ba752874400a0ca6cd6e2196cdb7dc", size = 321971, upload-time = "2025-05-18T19:03:27.255Z" }, - { url = "https://files.pythonhosted.org/packages/68/a4/da3f150cf1d51f6c472616fb7650429c7ce053e0c962b41b68557fdf6379/jiter-0.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d613e4b379a07d7c8453c5712ce7014e86c6ac93d990a0b8e7377e18505e98d", size = 345574, upload-time = "2025-05-18T19:03:28.63Z" }, - { url = "https://files.pythonhosted.org/packages/84/34/6e8d412e60ff06b186040e77da5f83bc158e9735759fcae65b37d681f28b/jiter-0.10.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f62cf8ba0618eda841b9bf61797f21c5ebd15a7a1e19daab76e4e4b498d515b2", size = 371028, upload-time = "2025-05-18T19:03:30.292Z" }, - { url = "https://files.pythonhosted.org/packages/fb/d9/9ee86173aae4576c35a2f50ae930d2ccb4c4c236f6cb9353267aa1d626b7/jiter-0.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:919d139cdfa8ae8945112398511cb7fca58a77382617d279556b344867a37e61", size = 491083, upload-time = "2025-05-18T19:03:31.654Z" }, - { url = "https://files.pythonhosted.org/packages/d9/2c/f955de55e74771493ac9e188b0f731524c6a995dffdcb8c255b89c6fb74b/jiter-0.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13ddbc6ae311175a3b03bd8994881bc4635c923754932918e18da841632349db", size = 388821, upload-time = "2025-05-18T19:03:33.184Z" }, - { url = "https://files.pythonhosted.org/packages/81/5a/0e73541b6edd3f4aada586c24e50626c7815c561a7ba337d6a7eb0a915b4/jiter-0.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c440ea003ad10927a30521a9062ce10b5479592e8a70da27f21eeb457b4a9c5", size = 352174, upload-time = "2025-05-18T19:03:34.965Z" }, - { url = "https://files.pythonhosted.org/packages/1c/c0/61eeec33b8c75b31cae42be14d44f9e6fe3ac15a4e58010256ac3abf3638/jiter-0.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc347c87944983481e138dea467c0551080c86b9d21de6ea9306efb12ca8f606", size = 391869, upload-time = "2025-05-18T19:03:36.436Z" }, - { url = "https://files.pythonhosted.org/packages/41/22/5beb5ee4ad4ef7d86f5ea5b4509f680a20706c4a7659e74344777efb7739/jiter-0.10.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:13252b58c1f4d8c5b63ab103c03d909e8e1e7842d302473f482915d95fefd605", size = 523741, upload-time = "2025-05-18T19:03:38.168Z" }, - { url = "https://files.pythonhosted.org/packages/ea/10/768e8818538e5817c637b0df52e54366ec4cebc3346108a4457ea7a98f32/jiter-0.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7d1bbf3c465de4a24ab12fb7766a0003f6f9bce48b8b6a886158c4d569452dc5", size = 514527, upload-time = "2025-05-18T19:03:39.577Z" }, - { url = "https://files.pythonhosted.org/packages/73/6d/29b7c2dc76ce93cbedabfd842fc9096d01a0550c52692dfc33d3cc889815/jiter-0.10.0-cp311-cp311-win32.whl", hash = "sha256:db16e4848b7e826edca4ccdd5b145939758dadf0dc06e7007ad0e9cfb5928ae7", size = 210765, upload-time = "2025-05-18T19:03:41.271Z" }, - { url = "https://files.pythonhosted.org/packages/c2/c9/d394706deb4c660137caf13e33d05a031d734eb99c051142e039d8ceb794/jiter-0.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c9c1d5f10e18909e993f9641f12fe1c77b3e9b533ee94ffa970acc14ded3812", size = 209234, upload-time = "2025-05-18T19:03:42.918Z" }, - { url = "https://files.pythonhosted.org/packages/6d/b5/348b3313c58f5fbfb2194eb4d07e46a35748ba6e5b3b3046143f3040bafa/jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b", size = 312262, upload-time = "2025-05-18T19:03:44.637Z" }, - { url = "https://files.pythonhosted.org/packages/9c/4a/6a2397096162b21645162825f058d1709a02965606e537e3304b02742e9b/jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744", size = 320124, upload-time = "2025-05-18T19:03:46.341Z" }, - { url = "https://files.pythonhosted.org/packages/2a/85/1ce02cade7516b726dd88f59a4ee46914bf79d1676d1228ef2002ed2f1c9/jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2", size = 345330, upload-time = "2025-05-18T19:03:47.596Z" }, - { url = "https://files.pythonhosted.org/packages/75/d0/bb6b4f209a77190ce10ea8d7e50bf3725fc16d3372d0a9f11985a2b23eff/jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026", size = 369670, upload-time = "2025-05-18T19:03:49.334Z" }, - { url = "https://files.pythonhosted.org/packages/a0/f5/a61787da9b8847a601e6827fbc42ecb12be2c925ced3252c8ffcb56afcaf/jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c", size = 489057, upload-time = "2025-05-18T19:03:50.66Z" }, - { url = "https://files.pythonhosted.org/packages/12/e4/6f906272810a7b21406c760a53aadbe52e99ee070fc5c0cb191e316de30b/jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959", size = 389372, upload-time = "2025-05-18T19:03:51.98Z" }, - { url = "https://files.pythonhosted.org/packages/e2/ba/77013b0b8ba904bf3762f11e0129b8928bff7f978a81838dfcc958ad5728/jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a", size = 352038, upload-time = "2025-05-18T19:03:53.703Z" }, - { url = "https://files.pythonhosted.org/packages/67/27/c62568e3ccb03368dbcc44a1ef3a423cb86778a4389e995125d3d1aaa0a4/jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95", size = 391538, upload-time = "2025-05-18T19:03:55.046Z" }, - { url = "https://files.pythonhosted.org/packages/c0/72/0d6b7e31fc17a8fdce76164884edef0698ba556b8eb0af9546ae1a06b91d/jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea", size = 523557, upload-time = "2025-05-18T19:03:56.386Z" }, - { url = "https://files.pythonhosted.org/packages/2f/09/bc1661fbbcbeb6244bd2904ff3a06f340aa77a2b94e5a7373fd165960ea3/jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b", size = 514202, upload-time = "2025-05-18T19:03:57.675Z" }, - { url = "https://files.pythonhosted.org/packages/1b/84/5a5d5400e9d4d54b8004c9673bbe4403928a00d28529ff35b19e9d176b19/jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01", size = 211781, upload-time = "2025-05-18T19:03:59.025Z" }, - { url = "https://files.pythonhosted.org/packages/9b/52/7ec47455e26f2d6e5f2ea4951a0652c06e5b995c291f723973ae9e724a65/jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49", size = 206176, upload-time = "2025-05-18T19:04:00.305Z" }, - { url = "https://files.pythonhosted.org/packages/2e/b0/279597e7a270e8d22623fea6c5d4eeac328e7d95c236ed51a2b884c54f70/jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644", size = 311617, upload-time = "2025-05-18T19:04:02.078Z" }, - { url = "https://files.pythonhosted.org/packages/91/e3/0916334936f356d605f54cc164af4060e3e7094364add445a3bc79335d46/jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a", size = 318947, upload-time = "2025-05-18T19:04:03.347Z" }, - { url = "https://files.pythonhosted.org/packages/6a/8e/fd94e8c02d0e94539b7d669a7ebbd2776e51f329bb2c84d4385e8063a2ad/jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6", size = 344618, upload-time = "2025-05-18T19:04:04.709Z" }, - { url = "https://files.pythonhosted.org/packages/6f/b0/f9f0a2ec42c6e9c2e61c327824687f1e2415b767e1089c1d9135f43816bd/jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3", size = 368829, upload-time = "2025-05-18T19:04:06.912Z" }, - { url = "https://files.pythonhosted.org/packages/e8/57/5bbcd5331910595ad53b9fd0c610392ac68692176f05ae48d6ce5c852967/jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2", size = 491034, upload-time = "2025-05-18T19:04:08.222Z" }, - { url = "https://files.pythonhosted.org/packages/9b/be/c393df00e6e6e9e623a73551774449f2f23b6ec6a502a3297aeeece2c65a/jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25", size = 388529, upload-time = "2025-05-18T19:04:09.566Z" }, - { url = "https://files.pythonhosted.org/packages/42/3e/df2235c54d365434c7f150b986a6e35f41ebdc2f95acea3036d99613025d/jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041", size = 350671, upload-time = "2025-05-18T19:04:10.98Z" }, - { url = "https://files.pythonhosted.org/packages/c6/77/71b0b24cbcc28f55ab4dbfe029f9a5b73aeadaba677843fc6dc9ed2b1d0a/jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca", size = 390864, upload-time = "2025-05-18T19:04:12.722Z" }, - { url = "https://files.pythonhosted.org/packages/6a/d3/ef774b6969b9b6178e1d1e7a89a3bd37d241f3d3ec5f8deb37bbd203714a/jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4", size = 522989, upload-time = "2025-05-18T19:04:14.261Z" }, - { url = "https://files.pythonhosted.org/packages/0c/41/9becdb1d8dd5d854142f45a9d71949ed7e87a8e312b0bede2de849388cb9/jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e", size = 513495, upload-time = "2025-05-18T19:04:15.603Z" }, - { url = "https://files.pythonhosted.org/packages/9c/36/3468e5a18238bdedae7c4d19461265b5e9b8e288d3f86cd89d00cbb48686/jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d", size = 211289, upload-time = "2025-05-18T19:04:17.541Z" }, - { url = "https://files.pythonhosted.org/packages/7e/07/1c96b623128bcb913706e294adb5f768fb7baf8db5e1338ce7b4ee8c78ef/jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4", size = 205074, upload-time = "2025-05-18T19:04:19.21Z" }, - { url = "https://files.pythonhosted.org/packages/54/46/caa2c1342655f57d8f0f2519774c6d67132205909c65e9aa8255e1d7b4f4/jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca", size = 318225, upload-time = "2025-05-18T19:04:20.583Z" }, - { url = "https://files.pythonhosted.org/packages/43/84/c7d44c75767e18946219ba2d703a5a32ab37b0bc21886a97bc6062e4da42/jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070", size = 350235, upload-time = "2025-05-18T19:04:22.363Z" }, - { url = "https://files.pythonhosted.org/packages/01/16/f5a0135ccd968b480daad0e6ab34b0c7c5ba3bc447e5088152696140dcb3/jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca", size = 207278, upload-time = "2025-05-18T19:04:23.627Z" }, - { url = "https://files.pythonhosted.org/packages/98/fd/aced428e2bd3c6c1132f67c5a708f9e7fd161d0ca8f8c5862b17b93cdf0a/jiter-0.10.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bd6292a43c0fc09ce7c154ec0fa646a536b877d1e8f2f96c19707f65355b5a4d", size = 317665, upload-time = "2025-05-18T19:04:43.417Z" }, - { url = "https://files.pythonhosted.org/packages/b6/2e/47d42f15d53ed382aef8212a737101ae2720e3697a954f9b95af06d34e89/jiter-0.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:39de429dcaeb6808d75ffe9effefe96a4903c6a4b376b2f6d08d77c1aaee2f18", size = 312152, upload-time = "2025-05-18T19:04:44.797Z" }, - { url = "https://files.pythonhosted.org/packages/7b/02/aae834228ef4834fc18718724017995ace8da5f70aa1ec225b9bc2b2d7aa/jiter-0.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ce124f13a7a616fad3bb723f2bfb537d78239d1f7f219566dc52b6f2a9e48d", size = 346708, upload-time = "2025-05-18T19:04:46.127Z" }, - { url = "https://files.pythonhosted.org/packages/35/d4/6ff39dee2d0a9abd69d8a3832ce48a3aa644eed75e8515b5ff86c526ca9a/jiter-0.10.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:166f3606f11920f9a1746b2eea84fa2c0a5d50fd313c38bdea4edc072000b0af", size = 371360, upload-time = "2025-05-18T19:04:47.448Z" }, - { url = "https://files.pythonhosted.org/packages/a9/67/c749d962b4eb62445867ae4e64a543cbb5d63cc7d78ada274ac515500a7f/jiter-0.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28dcecbb4ba402916034fc14eba7709f250c4d24b0c43fc94d187ee0580af181", size = 492105, upload-time = "2025-05-18T19:04:48.792Z" }, - { url = "https://files.pythonhosted.org/packages/f6/d3/8fe1b1bae5161f27b1891c256668f598fa4c30c0a7dacd668046a6215fca/jiter-0.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86c5aa6910f9bebcc7bc4f8bc461aff68504388b43bfe5e5c0bd21efa33b52f4", size = 389577, upload-time = "2025-05-18T19:04:50.13Z" }, - { url = "https://files.pythonhosted.org/packages/ef/28/ecb19d789b4777898a4252bfaac35e3f8caf16c93becd58dcbaac0dc24ad/jiter-0.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ceeb52d242b315d7f1f74b441b6a167f78cea801ad7c11c36da77ff2d42e8a28", size = 353849, upload-time = "2025-05-18T19:04:51.443Z" }, - { url = "https://files.pythonhosted.org/packages/77/69/261f798f84790da6482ebd8c87ec976192b8c846e79444d0a2e0d33ebed8/jiter-0.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ff76d8887c8c8ee1e772274fcf8cc1071c2c58590d13e33bd12d02dc9a560397", size = 392029, upload-time = "2025-05-18T19:04:52.792Z" }, - { url = "https://files.pythonhosted.org/packages/cb/08/b8d15140d4d91f16faa2f5d416c1a71ab1bbe2b66c57197b692d04c0335f/jiter-0.10.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a9be4d0fa2b79f7222a88aa488bd89e2ae0a0a5b189462a12def6ece2faa45f1", size = 524386, upload-time = "2025-05-18T19:04:54.203Z" }, - { url = "https://files.pythonhosted.org/packages/9b/1d/23c41765cc95c0e23ac492a88450d34bf0fd87a37218d1b97000bffe0f53/jiter-0.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ab7fd8738094139b6c1ab1822d6f2000ebe41515c537235fd45dabe13ec9324", size = 515234, upload-time = "2025-05-18T19:04:55.838Z" }, - { url = "https://files.pythonhosted.org/packages/9f/14/381d8b151132e79790579819c3775be32820569f23806769658535fe467f/jiter-0.10.0-cp39-cp39-win32.whl", hash = "sha256:5f51e048540dd27f204ff4a87f5d79294ea0aa3aa552aca34934588cf27023cf", size = 211436, upload-time = "2025-05-18T19:04:57.183Z" }, - { url = "https://files.pythonhosted.org/packages/59/66/f23ae51dea8ee8ce429027b60008ca895d0fa0704f0c7fe5f09014a6cffb/jiter-0.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b28302349dc65703a9e4ead16f163b1c339efffbe1049c30a44b001a2a4fff9", size = 208777, upload-time = "2025-05-18T19:04:58.454Z" }, +version = "0.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/c0/a3bb4cc13aced219dd18191ea66e874266bd8aa7b96744e495e1c733aa2d/jiter-0.11.0.tar.gz", hash = "sha256:1d9637eaf8c1d6a63d6562f2a6e5ab3af946c66037eb1b894e8fad75422266e4", size = 167094, upload-time = "2025-09-15T09:20:38.212Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/21/7dd1235a19e26979be6098e87e4cced2e061752f3a40a17bbce6dea7fae1/jiter-0.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3893ce831e1c0094a83eeaf56c635a167d6fa8cc14393cc14298fd6fdc2a2449", size = 309875, upload-time = "2025-09-15T09:18:48.41Z" }, + { url = "https://files.pythonhosted.org/packages/71/f9/462b54708aa85b135733ccba70529dd68a18511bf367a87c5fd28676c841/jiter-0.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:25c625b9b61b5a8725267fdf867ef2e51b429687f6a4eef211f4612e95607179", size = 316505, upload-time = "2025-09-15T09:18:51.057Z" }, + { url = "https://files.pythonhosted.org/packages/bd/40/14e2eeaac6a47bff27d213834795472355fd39769272eb53cb7aa83d5aa8/jiter-0.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd4ca85fb6a62cf72e1c7f5e34ddef1b660ce4ed0886ec94a1ef9777d35eaa1f", size = 337613, upload-time = "2025-09-15T09:18:52.358Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ed/a5f1f8419c92b150a7c7fb5ccba1fb1e192887ad713d780e70874f0ce996/jiter-0.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:572208127034725e79c28437b82414028c3562335f2b4f451d98136d0fc5f9cd", size = 361438, upload-time = "2025-09-15T09:18:54.637Z" }, + { url = "https://files.pythonhosted.org/packages/dd/f5/70682c023dfcdd463a53faf5d30205a7d99c51d70d3e303c932d0936e5a2/jiter-0.11.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494ba627c7f550ad3dabb21862864b8f2216098dc18ff62f37b37796f2f7c325", size = 486180, upload-time = "2025-09-15T09:18:56.158Z" }, + { url = "https://files.pythonhosted.org/packages/7c/39/020d08cbab4eab48142ad88b837c41eb08a15c0767fdb7c0d3265128a44b/jiter-0.11.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8da18a99f58bca3ecc2d2bba99cac000a924e115b6c4f0a2b98f752b6fbf39a", size = 376681, upload-time = "2025-09-15T09:18:57.553Z" }, + { url = "https://files.pythonhosted.org/packages/52/10/b86733f6e594cf51dd142f37c602d8df87c554c5844958deaab0de30eb5d/jiter-0.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ffd3b0fff3fabbb02cc09910c08144db6bb5697a98d227a074401e01ee63dd", size = 348685, upload-time = "2025-09-15T09:18:59.208Z" }, + { url = "https://files.pythonhosted.org/packages/fb/ee/8861665e83a9e703aa5f65fddddb6225428e163e6b0baa95a7f9a8fb9aae/jiter-0.11.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8fe6530aa738a4f7d4e4702aa8f9581425d04036a5f9e25af65ebe1f708f23be", size = 385573, upload-time = "2025-09-15T09:19:00.593Z" }, + { url = "https://files.pythonhosted.org/packages/25/74/05afec03600951f128293813b5a208c9ba1bf587c57a344c05a42a69e1b1/jiter-0.11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e35d66681c133a03d7e974e7eedae89720fe8ca3bd09f01a4909b86a8adf31f5", size = 516669, upload-time = "2025-09-15T09:19:02.369Z" }, + { url = "https://files.pythonhosted.org/packages/93/d1/2e5bfe147cfbc2a5eef7f73eb75dc5c6669da4fa10fc7937181d93af9495/jiter-0.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59459beca2fbc9718b6f1acb7bfb59ebc3eb4294fa4d40e9cb679dafdcc6c60", size = 508767, upload-time = "2025-09-15T09:19:04.011Z" }, + { url = "https://files.pythonhosted.org/packages/87/50/597f71307e10426b5c082fd05d38c615ddbdd08c3348d8502963307f0652/jiter-0.11.0-cp310-cp310-win32.whl", hash = "sha256:b7b0178417b0dcfc5f259edbc6db2b1f5896093ed9035ee7bab0f2be8854726d", size = 205476, upload-time = "2025-09-15T09:19:05.594Z" }, + { url = "https://files.pythonhosted.org/packages/c7/86/1e5214b3272e311754da26e63edec93a183811d4fc2e0118addec365df8b/jiter-0.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:11df2bf99fb4754abddd7f5d940a48e51f9d11624d6313ca4314145fcad347f0", size = 204708, upload-time = "2025-09-15T09:19:06.955Z" }, + { url = "https://files.pythonhosted.org/packages/38/55/a69fefeef09c2eaabae44b935a1aa81517e49639c0a0c25d861cb18cd7ac/jiter-0.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cb5d9db02979c3f49071fce51a48f4b4e4cf574175fb2b11c7a535fa4867b222", size = 309503, upload-time = "2025-09-15T09:19:08.191Z" }, + { url = "https://files.pythonhosted.org/packages/bd/d5/a6aba9e6551f32f9c127184f398208e4eddb96c59ac065c8a92056089d28/jiter-0.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1dc6a123f3471c4730db7ca8ba75f1bb3dcb6faeb8d46dd781083e7dee88b32d", size = 317688, upload-time = "2025-09-15T09:19:09.918Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f3/5e86f57c1883971cdc8535d0429c2787bf734840a231da30a3be12850562/jiter-0.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09858f8d230f031c7b8e557429102bf050eea29c77ad9c34c8fe253c5329acb7", size = 337418, upload-time = "2025-09-15T09:19:11.078Z" }, + { url = "https://files.pythonhosted.org/packages/5e/4f/a71d8a24c2a70664970574a8e0b766663f5ef788f7fe1cc20ee0c016d488/jiter-0.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbe2196c4a0ce760925a74ab4456bf644748ab0979762139626ad138f6dac72d", size = 361423, upload-time = "2025-09-15T09:19:13.286Z" }, + { url = "https://files.pythonhosted.org/packages/8f/e5/b09076f4e7fd9471b91e16f9f3dc7330b161b738f3b39b2c37054a36e26a/jiter-0.11.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5beb56d22b63647bafd0b74979216fdee80c580c0c63410be8c11053860ffd09", size = 486367, upload-time = "2025-09-15T09:19:14.546Z" }, + { url = "https://files.pythonhosted.org/packages/fb/f1/98cb3a36f5e62f80cd860f0179f948d9eab5a316d55d3e1bab98d9767af5/jiter-0.11.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97025d09ef549795d8dc720a824312cee3253c890ac73c621721ddfc75066789", size = 376335, upload-time = "2025-09-15T09:19:15.939Z" }, + { url = "https://files.pythonhosted.org/packages/9f/d8/ec74886497ea393c29dbd7651ddecc1899e86404a6b1f84a3ddab0ab59fd/jiter-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d50880a6da65d8c23a2cf53c412847d9757e74cc9a3b95c5704a1d1a24667347", size = 348981, upload-time = "2025-09-15T09:19:17.568Z" }, + { url = "https://files.pythonhosted.org/packages/24/93/d22ad7fa3b86ade66c86153ceea73094fc2af8b20c59cb7fceab9fea4704/jiter-0.11.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:452d80a1c86c095a242007bd9fc5d21b8a8442307193378f891cb8727e469648", size = 385797, upload-time = "2025-09-15T09:19:19.121Z" }, + { url = "https://files.pythonhosted.org/packages/c8/bd/e25ff4a4df226e9b885f7cb01ee4b9dc74e3000e612d6f723860d71a1f34/jiter-0.11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e84e58198d4894668eec2da660ffff60e0f3e60afa790ecc50cb12b0e02ca1d4", size = 516597, upload-time = "2025-09-15T09:19:20.301Z" }, + { url = "https://files.pythonhosted.org/packages/be/fb/beda613db7d93ffa2fdd2683f90f2f5dce8daf4bc2d0d2829e7de35308c6/jiter-0.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df64edcfc5dd5279a791eea52aa113d432c933119a025b0b5739f90d2e4e75f1", size = 508853, upload-time = "2025-09-15T09:19:22.075Z" }, + { url = "https://files.pythonhosted.org/packages/20/64/c5b0d93490634e41e38e2a15de5d54fdbd2c9f64a19abb0f95305b63373c/jiter-0.11.0-cp311-cp311-win32.whl", hash = "sha256:144fc21337d21b1d048f7f44bf70881e1586401d405ed3a98c95a114a9994982", size = 205140, upload-time = "2025-09-15T09:19:23.351Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e6/c347c0e6f5796e97d4356b7e5ff0ce336498b7f4ef848fae621a56f1ccf3/jiter-0.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:b0f32e644d241293b892b1a6dd8f0b9cc029bfd94c97376b2681c36548aabab7", size = 204311, upload-time = "2025-09-15T09:19:24.591Z" }, + { url = "https://files.pythonhosted.org/packages/ba/b5/3009b112b8f673e568ef79af9863d8309a15f0a8cdcc06ed6092051f377e/jiter-0.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:2fb7b377688cc3850bbe5c192a6bd493562a0bc50cbc8b047316428fbae00ada", size = 305510, upload-time = "2025-09-15T09:19:25.893Z" }, + { url = "https://files.pythonhosted.org/packages/fe/82/15514244e03b9e71e086bbe2a6de3e4616b48f07d5f834200c873956fb8c/jiter-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a1b7cbe3f25bd0d8abb468ba4302a5d45617ee61b2a7a638f63fee1dc086be99", size = 316521, upload-time = "2025-09-15T09:19:27.525Z" }, + { url = "https://files.pythonhosted.org/packages/92/94/7a2e905f40ad2d6d660e00b68d818f9e29fb87ffe82774f06191e93cbe4a/jiter-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0a7f0ec81d5b7588c5cade1eb1925b91436ae6726dc2df2348524aeabad5de6", size = 338214, upload-time = "2025-09-15T09:19:28.727Z" }, + { url = "https://files.pythonhosted.org/packages/a8/9c/5791ed5bdc76f12110158d3316a7a3ec0b1413d018b41c5ed399549d3ad5/jiter-0.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07630bb46ea2a6b9c6ed986c6e17e35b26148cce2c535454b26ee3f0e8dcaba1", size = 361280, upload-time = "2025-09-15T09:19:30.013Z" }, + { url = "https://files.pythonhosted.org/packages/d4/7f/b7d82d77ff0d2cb06424141000176b53a9e6b16a1125525bb51ea4990c2e/jiter-0.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7764f27d28cd4a9cbc61704dfcd80c903ce3aad106a37902d3270cd6673d17f4", size = 487895, upload-time = "2025-09-15T09:19:31.424Z" }, + { url = "https://files.pythonhosted.org/packages/42/44/10a1475d46f1fc1fd5cc2e82c58e7bca0ce5852208e0fa5df2f949353321/jiter-0.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4a6c4a737d486f77f842aeb22807edecb4a9417e6700c7b981e16d34ba7c72", size = 378421, upload-time = "2025-09-15T09:19:32.746Z" }, + { url = "https://files.pythonhosted.org/packages/9a/5f/0dc34563d8164d31d07bc09d141d3da08157a68dcd1f9b886fa4e917805b/jiter-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf408d2a0abd919b60de8c2e7bc5eeab72d4dafd18784152acc7c9adc3291591", size = 347932, upload-time = "2025-09-15T09:19:34.612Z" }, + { url = "https://files.pythonhosted.org/packages/f7/de/b68f32a4fcb7b4a682b37c73a0e5dae32180140cd1caf11aef6ad40ddbf2/jiter-0.11.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cdef53eda7d18e799625023e1e250dbc18fbc275153039b873ec74d7e8883e09", size = 386959, upload-time = "2025-09-15T09:19:35.994Z" }, + { url = "https://files.pythonhosted.org/packages/76/0a/c08c92e713b6e28972a846a81ce374883dac2f78ec6f39a0dad9f2339c3a/jiter-0.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:53933a38ef7b551dd9c7f1064f9d7bb235bb3168d0fa5f14f0798d1b7ea0d9c5", size = 517187, upload-time = "2025-09-15T09:19:37.426Z" }, + { url = "https://files.pythonhosted.org/packages/89/b5/4a283bec43b15aad54fcae18d951f06a2ec3f78db5708d3b59a48e9c3fbd/jiter-0.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11840d2324c9ab5162fc1abba23bc922124fedcff0d7b7f85fffa291e2f69206", size = 509461, upload-time = "2025-09-15T09:19:38.761Z" }, + { url = "https://files.pythonhosted.org/packages/34/a5/f8bad793010534ea73c985caaeef8cc22dfb1fedb15220ecdf15c623c07a/jiter-0.11.0-cp312-cp312-win32.whl", hash = "sha256:4f01a744d24a5f2bb4a11657a1b27b61dc038ae2e674621a74020406e08f749b", size = 206664, upload-time = "2025-09-15T09:19:40.096Z" }, + { url = "https://files.pythonhosted.org/packages/ed/42/5823ec2b1469395a160b4bf5f14326b4a098f3b6898fbd327366789fa5d3/jiter-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:29fff31190ab3a26de026da2f187814f4b9c6695361e20a9ac2123e4d4378a4c", size = 203520, upload-time = "2025-09-15T09:19:41.798Z" }, + { url = "https://files.pythonhosted.org/packages/97/c4/d530e514d0f4f29b2b68145e7b389cbc7cac7f9c8c23df43b04d3d10fa3e/jiter-0.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4441a91b80a80249f9a6452c14b2c24708f139f64de959943dfeaa6cb915e8eb", size = 305021, upload-time = "2025-09-15T09:19:43.523Z" }, + { url = "https://files.pythonhosted.org/packages/7a/77/796a19c567c5734cbfc736a6f987affc0d5f240af8e12063c0fb93990ffa/jiter-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ff85fc6d2a431251ad82dbd1ea953affb5a60376b62e7d6809c5cd058bb39471", size = 314384, upload-time = "2025-09-15T09:19:44.849Z" }, + { url = "https://files.pythonhosted.org/packages/14/9c/824334de0b037b91b6f3fa9fe5a191c83977c7ec4abe17795d3cb6d174cf/jiter-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5e86126d64706fd28dfc46f910d496923c6f95b395138c02d0e252947f452bd", size = 337389, upload-time = "2025-09-15T09:19:46.094Z" }, + { url = "https://files.pythonhosted.org/packages/a2/95/ed4feab69e6cf9b2176ea29d4ef9d01a01db210a3a2c8a31a44ecdc68c38/jiter-0.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad8bd82165961867a10f52010590ce0b7a8c53da5ddd8bbb62fef68c181b921", size = 360519, upload-time = "2025-09-15T09:19:47.494Z" }, + { url = "https://files.pythonhosted.org/packages/b5/0c/2ad00f38d3e583caba3909d95b7da1c3a7cd82c0aa81ff4317a8016fb581/jiter-0.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b42c2cd74273455ce439fd9528db0c6e84b5623cb74572305bdd9f2f2961d3df", size = 487198, upload-time = "2025-09-15T09:19:49.116Z" }, + { url = "https://files.pythonhosted.org/packages/ea/8b/919b64cf3499b79bdfba6036da7b0cac5d62d5c75a28fb45bad7819e22f0/jiter-0.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0062dab98172dd0599fcdbf90214d0dcde070b1ff38a00cc1b90e111f071982", size = 377835, upload-time = "2025-09-15T09:19:50.468Z" }, + { url = "https://files.pythonhosted.org/packages/29/7f/8ebe15b6e0a8026b0d286c083b553779b4dd63db35b43a3f171b544de91d/jiter-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb948402821bc76d1f6ef0f9e19b816f9b09f8577844ba7140f0b6afe994bc64", size = 347655, upload-time = "2025-09-15T09:19:51.726Z" }, + { url = "https://files.pythonhosted.org/packages/8e/64/332127cef7e94ac75719dda07b9a472af6158ba819088d87f17f3226a769/jiter-0.11.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25a5b1110cca7329fd0daf5060faa1234be5c11e988948e4f1a1923b6a457fe1", size = 386135, upload-time = "2025-09-15T09:19:53.075Z" }, + { url = "https://files.pythonhosted.org/packages/20/c8/557b63527442f84c14774159948262a9d4fabb0d61166f11568f22fc60d2/jiter-0.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bf11807e802a214daf6c485037778843fadd3e2ec29377ae17e0706ec1a25758", size = 516063, upload-time = "2025-09-15T09:19:54.447Z" }, + { url = "https://files.pythonhosted.org/packages/86/13/4164c819df4a43cdc8047f9a42880f0ceef5afeb22e8b9675c0528ebdccd/jiter-0.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:dbb57da40631c267861dd0090461222060960012d70fd6e4c799b0f62d0ba166", size = 508139, upload-time = "2025-09-15T09:19:55.764Z" }, + { url = "https://files.pythonhosted.org/packages/fa/70/6e06929b401b331d41ddb4afb9f91cd1168218e3371972f0afa51c9f3c31/jiter-0.11.0-cp313-cp313-win32.whl", hash = "sha256:8e36924dad32c48d3c5e188d169e71dc6e84d6cb8dedefea089de5739d1d2f80", size = 206369, upload-time = "2025-09-15T09:19:57.048Z" }, + { url = "https://files.pythonhosted.org/packages/f4/0d/8185b8e15de6dce24f6afae63380e16377dd75686d56007baa4f29723ea1/jiter-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:452d13e4fd59698408087235259cebe67d9d49173b4dacb3e8d35ce4acf385d6", size = 202538, upload-time = "2025-09-15T09:19:58.35Z" }, + { url = "https://files.pythonhosted.org/packages/13/3a/d61707803260d59520721fa326babfae25e9573a88d8b7b9cb54c5423a59/jiter-0.11.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:089f9df9f69532d1339e83142438668f52c97cd22ee2d1195551c2b1a9e6cf33", size = 313737, upload-time = "2025-09-15T09:19:59.638Z" }, + { url = "https://files.pythonhosted.org/packages/cd/cc/c9f0eec5d00f2a1da89f6bdfac12b8afdf8d5ad974184863c75060026457/jiter-0.11.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29ed1fe69a8c69bf0f2a962d8d706c7b89b50f1332cd6b9fbda014f60bd03a03", size = 346183, upload-time = "2025-09-15T09:20:01.442Z" }, + { url = "https://files.pythonhosted.org/packages/a6/87/fc632776344e7aabbab05a95a0075476f418c5d29ab0f2eec672b7a1f0ac/jiter-0.11.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a4d71d7ea6ea8786291423fe209acf6f8d398a0759d03e7f24094acb8ab686ba", size = 204225, upload-time = "2025-09-15T09:20:03.102Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d9/51cf35d92bea21f2051da8ca2328831589e67e2bf971e85b1a6e6c0d2030/jiter-0.11.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:719891c2fb7628a41adff4f2f54c19380a27e6fdfdb743c24680ef1a54c67bd0", size = 312764, upload-time = "2025-09-15T09:20:21.378Z" }, + { url = "https://files.pythonhosted.org/packages/da/48/eae309ce5c180faa1bb45e378a503717da22ceb2b0488f78e548f97c2b6b/jiter-0.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df7f1927cbdf34cb91262a5418ca06920fd42f1cf733936d863aeb29b45a14ef", size = 305861, upload-time = "2025-09-15T09:20:22.603Z" }, + { url = "https://files.pythonhosted.org/packages/83/4f/13b80e18b0331f0fecc09cb2f09f722530b9a395006941b01491fe58baea/jiter-0.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e71ae6d969d0c9bab336c5e9e2fabad31e74d823f19e3604eaf96d9a97f463df", size = 339507, upload-time = "2025-09-15T09:20:23.857Z" }, + { url = "https://files.pythonhosted.org/packages/97/6d/c2fd1512873d3f23d24537e97765e7090a00de466516aa442b994b064918/jiter-0.11.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5661469a7b2be25ade3a4bb6c21ffd1e142e13351a0759f264dfdd3ad99af1ab", size = 363751, upload-time = "2025-09-15T09:20:25.12Z" }, + { url = "https://files.pythonhosted.org/packages/b0/99/48d156c742e75d33b9c8be44b1142d233823be491acdb1009629e4109e6a/jiter-0.11.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76c15ef0d3d02f8b389066fa4c410a0b89e9cc6468a1f0674c5925d2f3c3e890", size = 488591, upload-time = "2025-09-15T09:20:26.397Z" }, + { url = "https://files.pythonhosted.org/packages/ba/fd/214452149f63847b791b1f6e9558f59e94674c47418c03e9787236ac8656/jiter-0.11.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63782a1350917a27817030716566ed3d5b3c731500fd42d483cbd7094e2c5b25", size = 378906, upload-time = "2025-09-15T09:20:27.637Z" }, + { url = "https://files.pythonhosted.org/packages/de/91/25e38fbbfc17111d7b70b24290a41d611cc2a27fa6cd0ed84ddae38ec3e6/jiter-0.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a7092b699646a1ddc03a7b112622d9c066172627c7382659befb0d2996f1659", size = 350288, upload-time = "2025-09-15T09:20:28.951Z" }, + { url = "https://files.pythonhosted.org/packages/b5/d8/d6d2eefa9f0ff6ac6b725f5164a94f15bb4dee68584b5054043d735803da/jiter-0.11.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f637b8e818f6d75540f350a6011ce21252573c0998ea1b4365ee54b7672c23c5", size = 388442, upload-time = "2025-09-15T09:20:30.223Z" }, + { url = "https://files.pythonhosted.org/packages/ed/e4/cd7e27852de498d441a575a147ac7a15cf66768ae2cde8c43ea5b464c827/jiter-0.11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a624d87719e1b5d09c15286eaee7e1532a40c692a096ea7ca791121365f548c1", size = 518273, upload-time = "2025-09-15T09:20:31.569Z" }, + { url = "https://files.pythonhosted.org/packages/77/a2/6681a9a503141752b33c92c58512ed8da13849ed3dbf88a3f8aba9bfb255/jiter-0.11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9d0146d8d9b3995821bb586fc8256636258947c2f39da5bab709f3a28fb1a0b", size = 510254, upload-time = "2025-09-15T09:20:32.889Z" }, + { url = "https://files.pythonhosted.org/packages/38/32/df1a06f397074da35cf8fe79ec07da203358a2912b2a6349a0d4a88a1e0a/jiter-0.11.0-cp39-cp39-win32.whl", hash = "sha256:d067655a7cf0831eb8ec3e39cbd752995e9b69a2206df3535b3a067fac23b032", size = 207076, upload-time = "2025-09-15T09:20:34.196Z" }, + { url = "https://files.pythonhosted.org/packages/1d/91/11bc61fa76fd6197f5baa8576614852ee8586a16c2f25085edc6b47a369d/jiter-0.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:f05d03775a11aaf132c447436983169958439f1219069abf24662a672851f94e", size = 206077, upload-time = "2025-09-15T09:20:35.598Z" }, + { url = "https://files.pythonhosted.org/packages/70/f3/ce100253c80063a7b8b406e1d1562657fd4b9b4e1b562db40e68645342fb/jiter-0.11.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:902b43386c04739229076bd1c4c69de5d115553d982ab442a8ae82947c72ede7", size = 336380, upload-time = "2025-09-15T09:20:36.867Z" }, ] [[package]] @@ -769,23 +770,9 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, ] -[[package]] -name = "llama-index-agent-openai" -version = "0.4.9" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "llama-index-core" }, - { name = "llama-index-llms-openai" }, - { name = "openai" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/28/e5/2bf8d39a62b10b5a4901e3ff8664abe5a8b91c95720775c0a6b559f53c5e/llama_index_agent_openai-0.4.9.tar.gz", hash = "sha256:153cc0f49dcaa0cc44795e2d3ea20efb7dd1251368c0d7704a6e26aac6611c9d", size = 12226, upload-time = "2025-05-30T20:11:00.444Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ec/3e/24fcc3e486c730e4f0bbd7bd94f22adce2e6f570689b8fa1f0eddf7ad71b/llama_index_agent_openai-0.4.9-py3-none-any.whl", hash = "sha256:d696b014ef5652cdae3fe934cc2146fb05ffa978a242d432a2ae895524935c20", size = 14205, upload-time = "2025-05-30T20:10:59.198Z" }, -] - [[package]] name = "llama-index-core" -version = "0.12.41" +version = "0.14.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -798,6 +785,7 @@ dependencies = [ { name = "filetype" }, { name = "fsspec" }, { name = "httpx" }, + { name = "llama-index-workflows" }, { name = "nest-asyncio" }, { name = "networkx", version = "3.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, @@ -807,9 +795,11 @@ dependencies = [ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, { name = "numpy", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, { name = "pillow" }, + { name = "platformdirs" }, { name = "pydantic" }, { name = "pyyaml" }, { name = "requests" }, + { name = "setuptools" }, { name = "sqlalchemy", extra = ["asyncio"] }, { name = "tenacity" }, { name = "tiktoken" }, @@ -818,35 +808,63 @@ dependencies = [ { name = "typing-inspect" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/5b/2d/9b18c26212558464c65037823385e5ee9361c99bcc22fd81480cc2760be9/llama_index_core-0.12.41.tar.gz", hash = "sha256:34f77c51b12b11ee1d743ff183c1b61afe2975f9970357723a744d4e080f5b3d", size = 7292626, upload-time = "2025-06-07T20:31:51.582Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/82/a89342afc1639d2186546fca1c17a0b0d57e21e62e2bb16e838e41f22f71/llama_index_core-0.14.2.tar.gz", hash = "sha256:352eb4c3d73fb848bfe08f2d0f9f9aefd4c0789b651ada3c9fd17f638ac1c87b", size = 11577624, upload-time = "2025-09-16T03:43:21.598Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/07/d1427e7d6b13664edd878696233bd98a02f67d708c133b66ffb563b5add6/llama_index_core-0.12.41-py3-none-any.whl", hash = "sha256:61fc1e912303250ee32778d5db566e242f7bd9ee043940860a436e3c93f17ef5", size = 7665699, upload-time = "2025-06-07T20:31:47.4Z" }, + { url = "https://files.pythonhosted.org/packages/37/93/df13d337edc51e77a449e21299cf82d4f267018d73fefd05659e269edaa4/llama_index_core-0.14.2-py3-none-any.whl", hash = "sha256:96e536ece3f81bbf2169c772965d23e215f9e31aa15a761dd50fa05d3d1e53b9", size = 11918780, upload-time = "2025-09-16T03:43:18.849Z" }, +] + +[[package]] +name = "llama-index-instrumentation" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/e5/a3628da5d716d6bbc2c0a8d39b629dff81b33d5625c5b934e1456370064f/llama_index_instrumentation-0.4.1.tar.gz", hash = "sha256:a79d0dd2baba34f05ff4354d63a99b212322635b8afa6cc96ed00a7e11ebfdc3", size = 45788, upload-time = "2025-09-15T03:53:00.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/7a/c414f4dc9a7dd90d050c387489436bab2d678a566b704ede2f5b62f82ad7/llama_index_instrumentation-0.4.1-py3-none-any.whl", hash = "sha256:0d3ac926d0db3d39c0ec34ee72da5322d61e06b87fe956407e4a1e7a2708b936", size = 15063, upload-time = "2025-09-15T03:52:59.098Z" }, ] [[package]] name = "llama-index-llms-openai" -version = "0.4.5" +version = "0.5.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llama-index-core" }, { name = "openai" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b1/76/3f196018354c5a230b863f2a527ccbb4a32cae2b9674a8a6ed3788ae1736/llama_index_llms_openai-0.4.5.tar.gz", hash = "sha256:c22dd37597826c2c2f91f45e80e2d4313499560525ace3ac7ad1a50a54a67b17", size = 24179, upload-time = "2025-06-10T19:09:09.637Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/fe/ac57ecb9b5ea4243e097fbc3f5de22f6bd1a787b72a7c80542af80afbf4d/llama_index_llms_openai-0.5.6.tar.gz", hash = "sha256:92533e83be2eb321d84a01a84fb2bf4506bf684c410cd94ccb29ae6c949a27d4", size = 24239, upload-time = "2025-09-08T20:46:25.018Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/22/73bc65916b369ef763c03161e1414cba930c753325f9892ab6a0b4be7025/llama_index_llms_openai-0.4.5-py3-none-any.whl", hash = "sha256:4704412b298eec5f6e2c31b12a09512f8883829b5bb398ddc533329078af2af7", size = 25298, upload-time = "2025-06-10T19:09:08.272Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e2/d78be8cbc645668eba088223d63114a076758626fe12e3b4ec9efa2ba342/llama_index_llms_openai-0.5.6-py3-none-any.whl", hash = "sha256:a93a897fe733a6d7b668cbc6cca546e644054ddf5497821141b2d4b5ffb6ea80", size = 25368, upload-time = "2025-09-08T20:46:23.79Z" }, ] [[package]] name = "llama-index-protocols-ag-ui" -version = "0.1.2" +version = "0.2.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ag-ui-protocol" }, { name = "llama-index-core" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a0/eb/5ee4925732835611fc298677c0848eef128c2f8ddbe23081ee83c1804c6d/llama_index_protocols_ag_ui-0.1.2.tar.gz", hash = "sha256:344013766dc300ff9d0692685929ad709026d87af575f420a68005358c379252", size = 8244, upload-time = "2025-06-16T20:52:36.214Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/7a/dccb404712bddc8203b806f5b276365e081a30c4ffcdce67a72f1b99b57a/llama_index_protocols_ag_ui-0.2.2.tar.gz", hash = "sha256:33c9c0a435411c0336b35b3bfb08c93bb069e64b3e4a36deb32f517c6b49ba1f", size = 8251, upload-time = "2025-09-24T17:39:38.389Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/dc/9cb1a834e8cb74af60f143e71140d9bb8db7b85e18ff5c6cd63ad6678990/llama_index_protocols_ag_ui-0.1.2-py3-none-any.whl", hash = "sha256:405c13eed60095e7013e98062bf6c33dd524e1978c509a0d99147543bad7cae2", size = 9799, upload-time = "2025-06-16T20:52:35.389Z" }, + { url = "https://files.pythonhosted.org/packages/74/b0/cf863f8fabcddbc79170211cae49d85957bf3601df870ea58aeee28ff999/llama_index_protocols_ag_ui-0.2.2-py3-none-any.whl", hash = "sha256:5097b563464514013644fa9ef55252231cb3632396517593d93355b8cda81e85", size = 9761, upload-time = "2025-09-24T17:39:37.616Z" }, +] + +[[package]] +name = "llama-index-workflows" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eval-type-backport", marker = "python_full_version < '3.10'" }, + { name = "llama-index-instrumentation" }, + { name = "pydantic" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/be/f1358bc7c0a624b6f6239fdf460060b06699d1259bdea12722ab9617fa20/llama_index_workflows-2.4.0.tar.gz", hash = "sha256:24adc797409b2b575f942ffeec687773126843d251bec4c2b9fd984acf1e6ccb", size = 1290953, upload-time = "2025-09-23T22:58:51.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/a9/97d1e96be588d97a27ab2a966ca2c456eac06ebf2df9af36bacb07debd28/llama_index_workflows-2.4.0-py3-none-any.whl", hash = "sha256:639536d67fbb9c9d75c0789763f774231e6957397afc7368a79900b86a73d44b", size = 62109, upload-time = "2025-09-23T22:58:50.186Z" }, ] [[package]] @@ -1296,7 +1314,7 @@ wheels = [ [[package]] name = "openai" -version = "1.86.0" +version = "1.109.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1308,9 +1326,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ec/7a/9ad4a61f1502f0e59d8c27fb629e28a63259a44d8d31cd2314e1534a2d9f/openai-1.86.0.tar.gz", hash = "sha256:c64d5b788359a8fdf69bd605ae804ce41c1ce2e78b8dd93e2542e0ee267f1e4b", size = 468272, upload-time = "2025-06-10T16:50:32.962Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c6/a1/a303104dc55fc546a3f6914c842d3da471c64eec92043aef8f652eb6c524/openai-1.109.1.tar.gz", hash = "sha256:d173ed8dbca665892a6db099b4a2dfac624f94d20a93f46eb0b56aae940ed869", size = 564133, upload-time = "2025-09-24T13:00:53.075Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/58/c1/dfb16b3432810fc9758564f9d1a4dbce6b93b7fb763ba57530c7fc48316d/openai-1.86.0-py3-none-any.whl", hash = "sha256:c8889c39410621fe955c230cc4c21bfe36ec887f4e60a957de05f507d7e1f349", size = 730296, upload-time = "2025-06-10T16:50:30.495Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/7dd3d207ec669cacc1f186fd856a0f61dbc255d24f6fdc1a6715d6051b0f/openai-1.109.1-py3-none-any.whl", hash = "sha256:6bcaf57086cf59159b8e27447e4e7dd019db5d29a438072fbd49c290c7e65315", size = 948627, upload-time = "2025-09-24T13:00:50.754Z" }, ] [[package]] @@ -1808,8 +1826,8 @@ source = { editable = "." } dependencies = [ { name = "fastapi" }, { name = "jsonpatch" }, - { name = "llama-index-agent-openai" }, { name = "llama-index-core" }, + { name = "llama-index-llms-openai" }, { name = "llama-index-protocols-ag-ui" }, { name = "uvicorn" }, ] @@ -1818,12 +1836,21 @@ dependencies = [ requires-dist = [ { name = "fastapi", specifier = ">=0.100.0" }, { name = "jsonpatch", specifier = ">=1.33" }, - { name = "llama-index-agent-openai", specifier = ">=0.4.9,<0.5" }, - { name = "llama-index-core", specifier = ">=0.12.41,<0.13" }, - { name = "llama-index-protocols-ag-ui", specifier = ">=0.1.2" }, + { name = "llama-index-core", specifier = ">=0.14.0,<0.15" }, + { name = "llama-index-llms-openai", specifier = ">=0.5.0,<0.6.0" }, + { name = "llama-index-protocols-ag-ui", specifier = ">=0.2.2,<0.3" }, { name = "uvicorn", specifier = ">=0.27.0" }, ] +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + [[package]] name = "sniffio" version = "1.3.1"