From f1d4599364af9a60683b09993a9808e4b14e0120 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Wed, 10 Sep 2025 16:59:46 -0300 Subject: [PATCH 01/34] feat: Add support for cloud Signed-off-by: Luis Valdes --- .../feature/agentic_chat/page.tsx | 11 +- .../feature/agentic_chat_reasoning/page.tsx | 11 +- .../feature/agentic_generative_ui/page.tsx | 193 ++++---- .../feature/human_in_the_loop/page.tsx | 332 ++++++++------ .../feature/predictive_state_updates/page.tsx | 189 ++++---- .../feature/shared_state/page.tsx | 95 ++-- .../feature/subgraphs/page.tsx | 186 +++++--- .../feature/tool_based_generative_ui/page.tsx | 431 ++++++++++-------- typescript-sdk/apps/dojo/src/cloudAgents.ts | 42 ++ .../integrations/langgraph/python/.gitignore | 3 + 10 files changed, 891 insertions(+), 602 deletions(-) create mode 100644 typescript-sdk/apps/dojo/src/cloudAgents.ts diff --git a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat/page.tsx b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat/page.tsx index f635969aa..29bdcced2 100644 --- a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat/page.tsx +++ b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat/page.tsx @@ -4,6 +4,7 @@ import "@copilotkit/react-ui/styles.css"; import "./style.css"; import { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from "@copilotkit/react-core"; import { CopilotChat } from "@copilotkit/react-ui"; +import { cloudAgents } from "@/cloudAgents"; interface AgenticChatProps { params: Promise<{ @@ -14,10 +15,18 @@ interface AgenticChatProps { const AgenticChat: React.FC = ({ params }) => { const { integrationId } = React.use(params); + let runtimeUrl = `/api/copilotkit/${integrationId}`; + let publicApiKey: string | undefined = undefined; + if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) { + runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL; + publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey; + } + return ( diff --git a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat_reasoning/page.tsx b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat_reasoning/page.tsx index ee7333c09..c5bb4aa27 100644 --- a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat_reasoning/page.tsx +++ b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_chat_reasoning/page.tsx @@ -5,6 +5,7 @@ import "./style.css"; import { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from "@copilotkit/react-core"; import { CopilotChat } from "@copilotkit/react-ui"; import { ChevronDown } from "lucide-react"; +import { cloudAgents } from "@/cloudAgents"; import { Button } from "@/components/ui/button"; import { DropdownMenu, @@ -24,10 +25,18 @@ interface AgenticChatProps { const AgenticChat: React.FC = ({ params }) => { const { integrationId } = React.use(params); + let runtimeUrl = `/api/copilotkit/${integrationId}`; + let publicApiKey: string | undefined = undefined; + if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) { + runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL; + publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey; + } + return ( diff --git a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_generative_ui/page.tsx b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_generative_ui/page.tsx index 36e32531c..124fed804 100644 --- a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_generative_ui/page.tsx +++ b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/agentic_generative_ui/page.tsx @@ -5,6 +5,7 @@ import "./style.css"; import { CopilotKit, useCoAgentStateRender } from "@copilotkit/react-core"; import { CopilotChat } from "@copilotkit/react-ui"; import { useTheme } from "next-themes"; +import { cloudAgents } from "@/cloudAgents"; interface AgenticGenerativeUIProps { params: Promise<{ @@ -14,10 +15,17 @@ interface AgenticGenerativeUIProps { const AgenticGenerativeUI: React.FC = ({ params }) => { const { integrationId } = React.use(params); + let runtimeUrl = `/api/copilotkit/${integrationId}`; + let publicApiKey: string | undefined = undefined; + if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) { + runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL; + publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey; + } return ( @@ -42,18 +50,19 @@ const Chat = () => { return null; } - const completedCount = state.steps.filter(step => step.status === "completed").length; + const completedCount = state.steps.filter((step) => step.status === "completed").length; const progressPercentage = (completedCount / state.steps.length) * 100; return (
-
+
{/* Header */}
@@ -64,16 +73,20 @@ const Chat = () => { {completedCount}/{state.steps.length} Complete
- + {/* Progress Bar */} -
-
+
-
+
@@ -81,48 +94,55 @@ const Chat = () => {
{state.steps.map((step, index) => { const isCompleted = step.status === "completed"; - const isCurrentPending = step.status === "pending" && + const isCurrentPending = + step.status === "pending" && index === state.steps.findIndex((s) => s.status === "pending"); const isFuturePending = step.status === "pending" && !isCurrentPending; return ( -
{/* Connector Line */} {index < state.steps.length - 1 && ( -
+
)} {/* Status Icon */} -
+
{isCompleted ? ( ) : isCurrentPending ? ( @@ -134,21 +154,30 @@ const Chat = () => { {/* Step Content */}
-
+
{step.description}
{isCurrentPending && ( -
+
Processing...
)} @@ -156,11 +185,13 @@ const Chat = () => { {/* Animated Background for Current Step */} {isCurrentPending && ( -
+
)}
); @@ -168,16 +199,20 @@ const Chat = () => {
{/* Decorative Elements */} -
-
+
+
); @@ -216,14 +251,7 @@ function SpinnerIcon() { fill="none" viewBox="0 0 24 24" > - + - - + + + ); } diff --git a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/human_in_the_loop/page.tsx b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/human_in_the_loop/page.tsx index 4ea41f853..c615fa800 100644 --- a/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/human_in_the_loop/page.tsx +++ b/typescript-sdk/apps/dojo/src/app/[integrationId]/feature/human_in_the_loop/page.tsx @@ -5,6 +5,7 @@ import "./style.css"; import { CopilotKit, useCopilotAction, useLangGraphInterrupt } from "@copilotkit/react-core"; import { CopilotChat } from "@copilotkit/react-ui"; import { useTheme } from "next-themes"; +import { cloudAgents } from "@/cloudAgents"; interface HumanInTheLoopProps { params: Promise<{ @@ -15,10 +16,18 @@ interface HumanInTheLoopProps { const HumanInTheLoop: React.FC = ({ params }) => { const { integrationId } = React.use(params); + let runtimeUrl = `/api/copilotkit/${integrationId}`; + let publicApiKey: string | undefined = undefined; + if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) { + runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL; + publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey; + } + return ( @@ -34,29 +43,29 @@ interface Step { // Shared UI Components const StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => ( -
-
+
+
{children}
); -const StepHeader = ({ - theme, - enabledCount, - totalCount, - status, - showStatus = false -}: { - theme?: string; - enabledCount: number; - totalCount: number; +const StepHeader = ({ + theme, + enabledCount, + totalCount, + status, + showStatus = false, +}: { + theme?: string; + enabledCount: number; + totalCount: number; status?: string; showStatus?: boolean; }) => ( @@ -70,23 +79,27 @@ const StepHeader = ({ {enabledCount}/{totalCount} Selected
{showStatus && ( -
+
{status === "executing" ? "Ready" : "Waiting"}
)}
- -
-
+
0 ? (enabledCount / totalCount) * 100 : 0}%` }} /> @@ -94,31 +107,31 @@ const StepHeader = ({
); -const StepItem = ({ - step, - theme, - status, - onToggle, - disabled = false -}: { - step: { description: string; status: string }; - theme?: string; +const StepItem = ({ + step, + theme, + status, + onToggle, + disabled = false, +}: { + step: { description: string; status: string }; + theme?: string; status?: string; onToggle: () => void; disabled?: boolean; }) => ( -
-
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -158,7 +158,7 @@ "pydantic-ai::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n
\n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n
\n
\n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n
\n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n
\n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n
\n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n
\n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n
\n ),\n )}\n
\n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -184,7 +184,7 @@ "server-starter::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n
\n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n
\n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -210,7 +210,7 @@ "server-starter-all-features::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n
\n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -236,7 +236,7 @@ "server-starter-all-features::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -262,7 +262,7 @@ "server-starter-all-features::agentic_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n \n \n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", "language": "typescript", "type": "file" }, @@ -288,7 +288,7 @@ "server-starter-all-features::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -314,7 +314,7 @@ "server-starter-all-features::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -340,7 +340,7 @@ "server-starter-all-features::predictive_state_updates": [ { "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", + "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n const chatTitle = \"AI Document Editor\";\n const chatDescription = \"Ask me to create or edit a document\";\n const initialLabel = \"Hi 👋 How can I help with your document?\";\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction(\n {\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n },\n [agentState?.document],\n );\n\n // Action to write the document.\n useCopilotAction(\n {\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n },\n [agentState?.document],\n );\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n \n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n \n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n \n )}\n \n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", "language": "typescript", "type": "file" }, @@ -366,7 +366,7 @@ "vercel-ai-sdk::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -392,7 +392,7 @@ "langgraph::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -410,7 +410,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -424,7 +424,7 @@ "langgraph::agentic_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n \n \n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", "language": "typescript", "type": "file" }, @@ -442,7 +442,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -456,7 +456,7 @@ "langgraph::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -474,7 +474,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -488,7 +488,7 @@ "langgraph::predictive_state_updates": [ { "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", + "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n const chatTitle = \"AI Document Editor\";\n const chatDescription = \"Ask me to create or edit a document\";\n const initialLabel = \"Hi 👋 How can I help with your document?\";\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction(\n {\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n },\n [agentState?.document],\n );\n\n // Action to write the document.\n useCopilotAction(\n {\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n },\n [agentState?.document],\n );\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n \n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n \n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n \n )}\n \n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", "language": "typescript", "type": "file" }, @@ -506,7 +506,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", "language": "python", "type": "file" }, @@ -520,7 +520,7 @@ "langgraph::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -538,7 +538,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -552,7 +552,7 @@ "langgraph::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -570,7 +570,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -584,7 +584,7 @@ "langgraph::agentic_chat_reasoning": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { cloudAgents } from \"@/cloudAgents\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -602,7 +602,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", "language": "python", "type": "file" }, @@ -611,7 +611,7 @@ "langgraph::subgraphs": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = 'flights' | 'hotels' | 'experiences' | 'supervisor'\n\ninterface TravelAgentState {\n experiences: Experience[],\n flights: Flight[],\n hotels: Hotel[],\n itinerary: Itinerary\n planning_step: string\n active_agent: AvailableAgents\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: 'supervisor'\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends 'flights' ? Flight[] : TAgent extends 'hotels' ? Hotel[] : never,\n recommendation: TAgent extends 'flights' ? Flight : TAgent extends 'hotels' ? Hotel : never,\n agent: TAgent\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case 'flights': return 'Flights Agent';\n case 'hotels': return 'Hotels Agent';\n case 'experiences': return 'Experiences Agent';\n default: return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

{formatAgentName(agent)}: {message}

\n\n
\n {options.map((opt, idx) => {\n if ('airline' in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
\n {opt.duration}\n
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
\n 📍 {opt.location}\n
\n
\n {opt.price_per_night}\n
\n \n );\n })}\n
\n
\n )\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight);\n\n const chatTitle = 'Travel Planning Assistant';\n const chatDescription = 'Plan your perfect trip with AI specialists';\n const initialLabel = 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n }\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n {selectedFlight.airline} - {selectedFlight.price}\n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = 'supervisor';\n if (nodeName?.includes('flights_agent')) {\n activeAgent = 'flights';\n }\n if (nodeName?.includes('hotels_agent')) {\n activeAgent = 'hotels';\n }\n if (nodeName?.includes('experiences_agent')) {\n activeAgent = 'experiences';\n }\n return (\n
\n
Active Agent:
\n
\n
\n 👨‍💼\n Supervisor\n
\n
\n ✈️\n Flights\n
\n
\n 🏨\n Hotels\n
\n
\n 🎯\n Experiences\n
\n
\n
\n )\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} - {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} - {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = \"flights\" | \"hotels\" | \"experiences\" | \"supervisor\";\n\ninterface TravelAgentState {\n experiences: Experience[];\n flights: Flight[];\n hotels: Hotel[];\n itinerary: Itinerary;\n planning_step: string;\n active_agent: AvailableAgents;\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: \"supervisor\",\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends \"flights\" ? Flight[] : TAgent extends \"hotels\" ? Hotel[] : never;\n recommendation: TAgent extends \"flights\" ? Flight : TAgent extends \"hotels\" ? Hotel : never;\n agent: TAgent;\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case \"flights\":\n return \"Flights Agent\";\n case \"hotels\":\n return \"Hotels Agent\";\n case \"experiences\":\n return \"Experiences Agent\";\n default:\n return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

\n {formatAgentName(agent)}: {message}\n

\n\n
\n {options.map((opt, idx) => {\n if (\"airline\" in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
{opt.duration}
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
📍 {opt.location}
\n
{opt.price_per_night}
\n \n );\n })}\n
\n
\n );\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"Travel Planning Assistant\";\n const chatDescription = \"Plan your perfect trip with AI specialists\";\n const initialLabel =\n 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n },\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n \n {selectedFlight.airline} - {selectedFlight.price}\n \n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = \"supervisor\";\n if (nodeName?.includes(\"flights_agent\")) {\n activeAgent = \"flights\";\n }\n if (nodeName?.includes(\"hotels_agent\")) {\n activeAgent = \"hotels\";\n }\n if (nodeName?.includes(\"experiences_agent\")) {\n activeAgent = \"experiences\";\n }\n return (\n
\n
Active Agent:
\n
\n \n 👨‍💼\n Supervisor\n
\n \n ✈️\n Flights\n
\n \n 🏨\n Hotels\n \n \n 🎯\n Experiences\n \n \n \n );\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n \n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n \n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} -{\" \"}\n {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n \n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n \n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} -{\" \"}\n {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -629,7 +629,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = None\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = []\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -643,7 +643,7 @@ "langgraph-fastapi::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -661,7 +661,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -669,7 +669,7 @@ "langgraph-fastapi::agentic_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n \n \n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", "language": "typescript", "type": "file" }, @@ -687,7 +687,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -695,7 +695,7 @@ "langgraph-fastapi::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -713,7 +713,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -721,7 +721,7 @@ "langgraph-fastapi::predictive_state_updates": [ { "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", + "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n const chatTitle = \"AI Document Editor\";\n const chatDescription = \"Ask me to create or edit a document\";\n const initialLabel = \"Hi 👋 How can I help with your document?\";\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction(\n {\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n },\n [agentState?.document],\n );\n\n // Action to write the document.\n useCopilotAction(\n {\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n },\n [agentState?.document],\n );\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n \n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n \n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n \n )}\n \n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", "language": "typescript", "type": "file" }, @@ -739,7 +739,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", "language": "python", "type": "file" } @@ -747,7 +747,7 @@ "langgraph-fastapi::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -765,7 +765,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -773,7 +773,7 @@ "langgraph-fastapi::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -791,7 +791,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -799,7 +799,7 @@ "langgraph-fastapi::agentic_chat_reasoning": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { ChevronDown } from \"lucide-react\";\nimport { cloudAgents } from \"@/cloudAgents\";\nimport { Button } from \"@/components/ui/button\";\nimport {\n DropdownMenu,\n DropdownMenuContent,\n DropdownMenuItem,\n DropdownMenuLabel,\n DropdownMenuSeparator,\n DropdownMenuTrigger,\n} from \"@/components/ui/dropdown-menu\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n model: string;\n}\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"agentic_chat_reasoning\",\n initialState: {\n model: \"OpenAI\",\n },\n });\n\n // Initialize model if not set\n const selectedModel = agentState?.model || \"OpenAI\";\n\n const handleModelChange = (model: string) => {\n setAgentState({ model });\n };\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n },\n });\n\n return (\n
\n {/* Reasoning Model Dropdown */}\n
\n
\n
\n \n Reasoning Model:\n \n \n \n \n \n \n Select Model\n \n handleModelChange(\"OpenAI\")}>\n OpenAI\n \n handleModelChange(\"Anthropic\")}>\n Anthropic\n \n handleModelChange(\"Gemini\")}>\n Gemini\n \n \n \n
\n
\n
\n\n {/* Chat Container */}\n
\n
\n \n
\n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -817,7 +817,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", "language": "python", "type": "file" } @@ -825,7 +825,7 @@ "langgraph-fastapi::subgraphs": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = 'flights' | 'hotels' | 'experiences' | 'supervisor'\n\ninterface TravelAgentState {\n experiences: Experience[],\n flights: Flight[],\n hotels: Hotel[],\n itinerary: Itinerary\n planning_step: string\n active_agent: AvailableAgents\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: 'supervisor'\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends 'flights' ? Flight[] : TAgent extends 'hotels' ? Hotel[] : never,\n recommendation: TAgent extends 'flights' ? Flight : TAgent extends 'hotels' ? Hotel : never,\n agent: TAgent\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case 'flights': return 'Flights Agent';\n case 'hotels': return 'Hotels Agent';\n case 'experiences': return 'Experiences Agent';\n default: return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

{formatAgentName(agent)}: {message}

\n\n
\n {options.map((opt, idx) => {\n if ('airline' in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
\n {opt.duration}\n
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
\n 📍 {opt.location}\n
\n
\n {opt.price_per_night}\n
\n \n );\n })}\n
\n
\n )\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight);\n\n const chatTitle = 'Travel Planning Assistant';\n const chatDescription = 'Plan your perfect trip with AI specialists';\n const initialLabel = 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n }\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n {selectedFlight.airline} - {selectedFlight.price}\n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = 'supervisor';\n if (nodeName?.includes('flights_agent')) {\n activeAgent = 'flights';\n }\n if (nodeName?.includes('hotels_agent')) {\n activeAgent = 'hotels';\n }\n if (nodeName?.includes('experiences_agent')) {\n activeAgent = 'experiences';\n }\n return (\n
\n
Active Agent:
\n
\n
\n 👨‍💼\n Supervisor\n
\n
\n ✈️\n Flights\n
\n
\n 🏨\n Hotels\n
\n
\n 🎯\n Experiences\n
\n
\n
\n )\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} - {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} - {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = \"flights\" | \"hotels\" | \"experiences\" | \"supervisor\";\n\ninterface TravelAgentState {\n experiences: Experience[];\n flights: Flight[];\n hotels: Hotel[];\n itinerary: Itinerary;\n planning_step: string;\n active_agent: AvailableAgents;\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: \"supervisor\",\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends \"flights\" ? Flight[] : TAgent extends \"hotels\" ? Hotel[] : never;\n recommendation: TAgent extends \"flights\" ? Flight : TAgent extends \"hotels\" ? Hotel : never;\n agent: TAgent;\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case \"flights\":\n return \"Flights Agent\";\n case \"hotels\":\n return \"Hotels Agent\";\n case \"experiences\":\n return \"Experiences Agent\";\n default:\n return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

\n {formatAgentName(agent)}: {message}\n

\n\n
\n {options.map((opt, idx) => {\n if (\"airline\" in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
{opt.duration}
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
📍 {opt.location}
\n
{opt.price_per_night}
\n \n );\n })}\n
\n
\n );\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"Travel Planning Assistant\";\n const chatDescription = \"Plan your perfect trip with AI specialists\";\n const initialLabel =\n 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n },\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n \n {selectedFlight.airline} - {selectedFlight.price}\n \n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = \"supervisor\";\n if (nodeName?.includes(\"flights_agent\")) {\n activeAgent = \"flights\";\n }\n if (nodeName?.includes(\"hotels_agent\")) {\n activeAgent = \"hotels\";\n }\n if (nodeName?.includes(\"experiences_agent\")) {\n activeAgent = \"experiences\";\n }\n return (\n
\n
Active Agent:
\n
\n \n 👨‍💼\n Supervisor\n
\n \n ✈️\n Flights\n
\n \n 🏨\n Hotels\n \n \n 🎯\n Experiences\n \n \n \n );\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n \n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n \n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} -{\" \"}\n {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n \n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n \n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} -{\" \"}\n {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -843,7 +843,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = None\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = []\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -851,7 +851,7 @@ "langgraph-typescript::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -869,7 +869,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -883,7 +883,7 @@ "langgraph-typescript::agentic_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n \n \n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", "language": "typescript", "type": "file" }, @@ -901,7 +901,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -915,7 +915,7 @@ "langgraph-typescript::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -933,7 +933,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any]\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -947,7 +947,7 @@ "langgraph-typescript::predictive_state_updates": [ { "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", + "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n const chatTitle = \"AI Document Editor\";\n const chatDescription = \"Ask me to create or edit a document\";\n const initialLabel = \"Hi 👋 How can I help with your document?\";\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction(\n {\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n },\n [agentState?.document],\n );\n\n // Action to write the document.\n useCopilotAction(\n {\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n },\n [agentState?.document],\n );\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n \n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n \n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n \n )}\n \n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", "language": "typescript", "type": "file" }, @@ -965,7 +965,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any]\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n return Command(\n goto=\"chat_node\"\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", "language": "python", "type": "file" }, @@ -979,7 +979,7 @@ "langgraph-typescript::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -997,7 +997,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any]\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -1011,7 +1011,7 @@ "langgraph-typescript::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -1029,7 +1029,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -1043,7 +1043,7 @@ "langgraph-typescript::subgraphs": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = 'flights' | 'hotels' | 'experiences' | 'supervisor'\n\ninterface TravelAgentState {\n experiences: Experience[],\n flights: Flight[],\n hotels: Hotel[],\n itinerary: Itinerary\n planning_step: string\n active_agent: AvailableAgents\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: 'supervisor'\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends 'flights' ? Flight[] : TAgent extends 'hotels' ? Hotel[] : never,\n recommendation: TAgent extends 'flights' ? Flight : TAgent extends 'hotels' ? Hotel : never,\n agent: TAgent\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case 'flights': return 'Flights Agent';\n case 'hotels': return 'Hotels Agent';\n case 'experiences': return 'Experiences Agent';\n default: return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

{formatAgentName(agent)}: {message}

\n\n
\n {options.map((opt, idx) => {\n if ('airline' in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
\n {opt.duration}\n
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
\n 📍 {opt.location}\n
\n
\n {opt.price_per_night}\n
\n \n );\n })}\n
\n
\n )\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight);\n\n const chatTitle = 'Travel Planning Assistant';\n const chatDescription = 'Plan your perfect trip with AI specialists';\n const initialLabel = 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n
\n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n }\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n {selectedFlight.airline} - {selectedFlight.price}\n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = 'supervisor';\n if (nodeName?.includes('flights_agent')) {\n activeAgent = 'flights';\n }\n if (nodeName?.includes('hotels_agent')) {\n activeAgent = 'hotels';\n }\n if (nodeName?.includes('experiences_agent')) {\n activeAgent = 'experiences';\n }\n return (\n
\n
Active Agent:
\n
\n
\n 👨‍💼\n Supervisor\n
\n
\n ✈️\n Flights\n
\n
\n 🏨\n Hotels\n
\n
\n 🎯\n Experiences\n
\n
\n
\n )\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} - {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} - {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SubgraphsProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\n// Travel planning data types\ninterface Flight {\n airline: string;\n arrival: string;\n departure: string;\n duration: string;\n price: string;\n}\n\ninterface Hotel {\n location: string;\n name: string;\n price_per_night: string;\n rating: string;\n}\n\ninterface Experience {\n name: string;\n description: string;\n location: string;\n type: string;\n}\n\ninterface Itinerary {\n hotel?: Hotel;\n flight?: Flight;\n experiences?: Experience[];\n}\n\ntype AvailableAgents = \"flights\" | \"hotels\" | \"experiences\" | \"supervisor\";\n\ninterface TravelAgentState {\n experiences: Experience[];\n flights: Flight[];\n hotels: Hotel[];\n itinerary: Itinerary;\n planning_step: string;\n active_agent: AvailableAgents;\n}\n\nconst INITIAL_STATE: TravelAgentState = {\n itinerary: {},\n experiences: [],\n flights: [],\n hotels: [],\n planning_step: \"start\",\n active_agent: \"supervisor\",\n};\n\ninterface InterruptEvent {\n message: string;\n options: TAgent extends \"flights\" ? Flight[] : TAgent extends \"hotels\" ? Hotel[] : never;\n recommendation: TAgent extends \"flights\" ? Flight : TAgent extends \"hotels\" ? Hotel : never;\n agent: TAgent;\n}\n\nfunction InterruptHumanInTheLoop({\n event,\n resolve,\n}: {\n event: { value: InterruptEvent };\n resolve: (value: string) => void;\n}) {\n const { message, options, agent, recommendation } = event.value;\n\n // Format agent name with emoji\n const formatAgentName = (agent: string) => {\n switch (agent) {\n case \"flights\":\n return \"Flights Agent\";\n case \"hotels\":\n return \"Hotels Agent\";\n case \"experiences\":\n return \"Experiences Agent\";\n default:\n return `${agent} Agent`;\n }\n };\n\n const handleOptionSelect = (option: any) => {\n resolve(JSON.stringify(option));\n };\n\n return (\n
\n

\n {formatAgentName(agent)}: {message}\n

\n\n
\n {options.map((opt, idx) => {\n if (\"airline\" in opt) {\n const isRecommended = (recommendation as Flight).airline === opt.airline;\n // Flight options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.airline}\n {opt.price}\n
\n
\n {opt.departure} → {opt.arrival}\n
\n
{opt.duration}
\n \n );\n }\n const isRecommended = (recommendation as Hotel).name === opt.name;\n\n // Hotel options\n return (\n handleOptionSelect(opt)}\n >\n {isRecommended && ⭐ Recommended}\n
\n {opt.name}\n {opt.rating}\n
\n
📍 {opt.location}
\n
{opt.price_per_night}
\n \n );\n })}\n
\n
\n );\n}\n\nexport default function Subgraphs({ params }: SubgraphsProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"Travel Planning Assistant\";\n const chatDescription = \"Plan your perfect trip with AI specialists\";\n const initialLabel =\n 'Hi! ✈️ Ready to plan an amazing trip? Try saying \"Plan a trip to Paris\" or \"Find me flights to Tokyo\"';\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight);\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nfunction TravelPlanner() {\n const { isMobile } = useMobileView();\n const { state: agentState, nodeName } = useCoAgent({\n name: \"subgraphs\",\n initialState: INITIAL_STATE,\n config: {\n streamSubgraphs: true,\n },\n });\n\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n\n // Current itinerary strip\n const ItineraryStrip = () => {\n const selectedFlight = agentState?.itinerary?.flight;\n const selectedHotel = agentState?.itinerary?.hotel;\n const hasExperiences = agentState?.experiences?.length > 0;\n\n return (\n
\n
Current Itinerary:
\n
\n
\n 📍\n Amsterdam → San Francisco\n
\n {selectedFlight && (\n
\n ✈️\n \n {selectedFlight.airline} - {selectedFlight.price}\n \n
\n )}\n {selectedHotel && (\n
\n 🏨\n {selectedHotel.name}\n
\n )}\n {hasExperiences && (\n
\n 🎯\n {agentState.experiences.length} experiences planned\n
\n )}\n
\n
\n );\n };\n\n // Compact agent status\n const AgentStatus = () => {\n let activeAgent = \"supervisor\";\n if (nodeName?.includes(\"flights_agent\")) {\n activeAgent = \"flights\";\n }\n if (nodeName?.includes(\"hotels_agent\")) {\n activeAgent = \"hotels\";\n }\n if (nodeName?.includes(\"experiences_agent\")) {\n activeAgent = \"experiences\";\n }\n return (\n
\n
Active Agent:
\n
\n \n 👨‍💼\n Supervisor\n
\n \n ✈️\n Flights\n
\n \n 🏨\n Hotels\n \n \n 🎯\n Experiences\n \n \n \n );\n };\n\n // Travel details component\n const TravelDetails = () => (\n
\n
\n

✈️ Flight Options

\n
\n {agentState?.flights?.length > 0 ? (\n agentState.flights.map((flight, index) => (\n
\n {flight.airline}:\n \n {flight.departure} → {flight.arrival} ({flight.duration}) - {flight.price}\n \n
\n ))\n ) : (\n

No flights found yet

\n )}\n {agentState?.itinerary?.flight && (\n
\n Selected: {agentState.itinerary.flight.airline} -{\" \"}\n {agentState.itinerary.flight.price}\n
\n )}\n
\n
\n\n
\n

🏨 Hotel Options

\n
\n {agentState?.hotels?.length > 0 ? (\n agentState.hotels.map((hotel, index) => (\n
\n {hotel.name}:\n \n {hotel.location} - {hotel.price_per_night} ({hotel.rating})\n \n
\n ))\n ) : (\n

No hotels found yet

\n )}\n {agentState?.itinerary?.hotel && (\n
\n Selected: {agentState.itinerary.hotel.name} -{\" \"}\n {agentState.itinerary.hotel.price_per_night}\n
\n )}\n
\n
\n\n
\n

🎯 Experiences

\n
\n {agentState?.experiences?.length > 0 ? (\n agentState.experiences.map((experience, index) => (\n
\n
{experience.name}
\n
{experience.type}
\n
{experience.description}
\n
Location: {experience.location}
\n
\n ))\n ) : (\n

No experiences planned yet

\n )}\n
\n
\n
\n );\n\n return (\n
\n \n \n \n
\n );\n}\n", "language": "typescript", "type": "file" }, @@ -1061,7 +1061,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = None\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA travel agent supervisor demo showcasing multi-agent architecture with subgraphs.\nThe supervisor coordinates specialized agents: flights finder, hotels finder, and experiences finder.\n\"\"\"\n\nfrom typing import Dict, List, Any, Optional, Annotated, Union\nfrom dataclasses import dataclass\nimport json\nimport os\nfrom pydantic import BaseModel, Field\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\n\n# OpenAI imports\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage, AIMessage\n\ndef create_interrupt(message: str, options: List[Any], recommendation: Any, agent: str):\n return interrupt({\n \"message\": message,\n \"options\": options,\n \"recommendation\": recommendation,\n \"agent\": agent,\n })\n\n# State schema for travel planning\n@dataclass\nclass Flight:\n airline: str\n departure: str\n arrival: str\n price: str\n duration: str\n\n@dataclass\nclass Hotel:\n name: str\n location: str\n price_per_night: str\n rating: str\n\n@dataclass\nclass Experience:\n name: str\n type: str # \"restaurant\" or \"activity\"\n description: str\n location: str\n\ndef merge_itinerary(left: Union[dict, None] = None, right: Union[dict, None] = None) -> dict:\n \"\"\"Custom reducer to merge shopping cart updates.\"\"\"\n if not left:\n left = {}\n if not right:\n right = {}\n\n return {**left, **right}\n\nclass TravelAgentState(MessagesState):\n \"\"\"Shared state for the travel agent system\"\"\"\n # Travel request details\n origin: str = \"\"\n destination: str = \"\"\n\n # Results from each agent\n flights: List[Flight] = None\n hotels: List[Hotel] = None\n experiences: List[Experience] = None\n\n itinerary: Annotated[dict, merge_itinerary] = None\n\n # Tools available to all agents\n tools: List[Any] = []\n\n # Supervisor routing\n next_agent: Optional[str] = None\n\n# Static data for demonstration\nSTATIC_FLIGHTS = [\n Flight(\"KLM\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$650\", \"11h 30m\"),\n Flight(\"United\", \"Amsterdam (AMS)\", \"San Francisco (SFO)\", \"$720\", \"12h 15m\")\n]\n\nSTATIC_HOTELS = [\n Hotel(\"Hotel Zephyr\", \"Fisherman's Wharf\", \"$280/night\", \"4.2 stars\"),\n Hotel(\"The Ritz-Carlton\", \"Nob Hill\", \"$550/night\", \"4.8 stars\"),\n Hotel(\"Hotel Zoe\", \"Union Square\", \"$320/night\", \"4.4 stars\")\n]\n\nSTATIC_EXPERIENCES = [\n Experience(\"Pier 39\", \"activity\", \"Iconic waterfront destination with shops and sea lions\", \"Fisherman's Wharf\"),\n Experience(\"Golden Gate Bridge\", \"activity\", \"World-famous suspension bridge with stunning views\", \"Golden Gate\"),\n Experience(\"Swan Oyster Depot\", \"restaurant\", \"Historic seafood counter serving fresh oysters\", \"Polk Street\"),\n Experience(\"Tartine Bakery\", \"restaurant\", \"Artisanal bakery famous for bread and pastries\", \"Mission District\")\n]\n\n# Flights finder subgraph\nasync def flights_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds flight options\"\"\"\n\n # Simulate flight search with static data\n flights = STATIC_FLIGHTS\n\n selected_flight = state.get('itinerary', {}).get('flight', None)\n if not selected_flight:\n selected_flight = create_interrupt(\n message=f\"\"\"\n Found {len(flights)} flight options from {state.get('origin', 'Amsterdam')} to {state.get('destination', 'San Francisco')}.\n I recommend choosing the flight by {flights[0].airline} since it's known to be on time and cheaper.\n \"\"\",\n options=flights,\n recommendation=flights[0],\n agent=\"flights\"\n )\n\n if isinstance(selected_flight, str):\n selected_flight = json.loads(selected_flight)\n return Command(\n goto=END,\n update={\n \"flights\": flights,\n \"itinerary\": {\n \"flight\": selected_flight\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Flights Agent: Great. I'll book you the {selected_flight[\"airline\"]} flight from {selected_flight[\"departure\"]} to {selected_flight[\"arrival\"]}.\"\n }]\n }\n )\n\n# Hotels finder subgraph\nasync def hotels_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds hotel options\"\"\"\n\n # Simulate hotel search with static data\n hotels = STATIC_HOTELS\n selected_hotel = state.get('itinerary', {}).get('hotel', None)\n if not selected_hotel:\n selected_hotel = create_interrupt(\n message=f\"\"\"\n Found {len(hotels)} accommodation options in {state.get('destination', 'San Francisco')}.\n I recommend choosing the {hotels[2].name} since it strikes the balance between rating, price, and location.\n \"\"\",\n options=hotels,\n recommendation=hotels[2],\n agent=\"hotels\"\n )\n\n if isinstance(selected_hotel, str):\n selected_hotel = json.loads(selected_hotel)\n return Command(\n goto=END,\n update={\n \"hotels\": hotels,\n \"itinerary\": {\n \"hotel\": selected_hotel\n },\n \"messages\": state[\"messages\"] + [{\n \"role\": \"assistant\",\n \"content\": f\"Hotels Agent: Excellent choice! You'll like {selected_hotel[\"name\"]}.\"\n }]\n }\n )\n\n# Experiences finder subgraph\nasync def experiences_finder(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Subgraph that finds restaurant and activity recommendations\"\"\"\n\n # Filter experiences (2 restaurants, 2 activities)\n restaurants = [exp for exp in STATIC_EXPERIENCES if exp.type == \"restaurant\"][:2]\n activities = [exp for exp in STATIC_EXPERIENCES if exp.type == \"activity\"][:2]\n experiences = restaurants + activities\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n itinerary = state.get(\"itinerary\", {})\n\n system_prompt = f\"\"\"\n You are the experiences agent. Your job is to find restaurants and activities for the user.\n You already went ahead and found a bunch of experiences. All you have to do now, is to let the user know of your findings.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flight chosen: {itinerary.get(\"hotel\", None)}\n - Hotel chosen: {itinerary.get(\"hotel\", None)}\n - activities found: {activities}\n - restaurants found: {restaurants}\n \"\"\"\n\n # Get supervisor decision\n response = await model.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"experiences\": experiences,\n \"messages\": state[\"messages\"] + [response]\n }\n )\n\nclass SupervisorResponseFormatter(BaseModel):\n \"\"\"Always use this tool to structure your response to the user.\"\"\"\n answer: str = Field(description=\"The answer to the user\")\n next_agent: str | None = Field(description=\"The agent to go to. Not required if you do not want to route to another agent.\")\n\n# Supervisor agent\nasync def supervisor_agent(state: TravelAgentState, config: RunnableConfig):\n \"\"\"Main supervisor that coordinates all subgraphs\"\"\"\n\n itinerary = state.get(\"itinerary\", {})\n\n # Check what's already completed\n has_flights = itinerary.get(\"flight\", None) is not None\n has_hotels = itinerary.get(\"hotel\", None) is not None\n has_experiences = state.get(\"experiences\", None) is not None\n\n system_prompt = f\"\"\"\n You are a travel planning supervisor. Your job is to coordinate specialized agents to help plan a trip.\n \n Current status:\n - Origin: {state.get('origin', 'Amsterdam')}\n - Destination: {state.get('destination', 'San Francisco')}\n - Flights found: {has_flights}\n - Hotels found: {has_hotels}\n - Experiences found: {has_experiences}\n - Itinerary (Things that the user has already confirmed selection on): {json.dumps(itinerary, indent=2)}\n \n Available agents:\n - flights_agent: Finds flight options\n - hotels_agent: Finds hotel options \n - experiences_agent: Finds restaurant and activity recommendations\n - {END}: Mark task as complete when all information is gathered\n \n You must route to the appropriate agent based on what's missing. Once all agents have completed their tasks, route to 'complete'.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Bind the routing tool\n model_with_tools = model.bind_tools(\n [SupervisorResponseFormatter],\n parallel_tool_calls=False,\n )\n\n # Get supervisor decision\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls for routing\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n if isinstance(tool_call, dict):\n tool_call_args = tool_call[\"args\"]\n else:\n tool_call_args = tool_call.args\n\n next_agent = tool_call_args[\"next_agent\"]\n\n # Add tool response\n tool_response = {\n \"role\": \"tool\",\n \"content\": f\"Routing to {next_agent} and providing the answer\",\n \"tool_call_id\": tool_call.id if hasattr(tool_call, 'id') else tool_call[\"id\"]\n }\n\n messages = messages + [tool_response, AIMessage(content=tool_call_args[\"answer\"])]\n\n if next_agent is not None:\n return Command(goto=next_agent)\n\n # Fallback if no tool call\n return Command(\n goto=END,\n update={\"messages\": messages}\n )\n\n# Create subgraphs\nflights_graph = StateGraph(TravelAgentState)\nflights_graph.add_node(\"flights_agent_chat_node\", flights_finder)\nflights_graph.set_entry_point(\"flights_agent_chat_node\")\nflights_graph.add_edge(START, \"flights_agent_chat_node\")\nflights_graph.add_edge(\"flights_agent_chat_node\", END)\nflights_subgraph = flights_graph.compile()\n\nhotels_graph = StateGraph(TravelAgentState)\nhotels_graph.add_node(\"hotels_agent_chat_node\", hotels_finder)\nhotels_graph.set_entry_point(\"hotels_agent_chat_node\")\nhotels_graph.add_edge(START, \"hotels_agent_chat_node\")\nhotels_graph.add_edge(\"hotels_agent_chat_node\", END)\nhotels_subgraph = hotels_graph.compile()\n\nexperiences_graph = StateGraph(TravelAgentState)\nexperiences_graph.add_node(\"experiences_agent_chat_node\", experiences_finder)\nexperiences_graph.set_entry_point(\"experiences_agent_chat_node\")\nexperiences_graph.add_edge(START, \"experiences_agent_chat_node\")\nexperiences_graph.add_edge(\"experiences_agent_chat_node\", END)\nexperiences_subgraph = experiences_graph.compile()\n\n# Main supervisor workflow\nworkflow = StateGraph(TravelAgentState)\n\n# Add supervisor and subgraphs as nodes\nworkflow.add_node(\"supervisor\", supervisor_agent)\nworkflow.add_node(\"flights_agent\", flights_subgraph)\nworkflow.add_node(\"hotels_agent\", hotels_subgraph)\nworkflow.add_node(\"experiences_agent\", experiences_subgraph)\n\n# Set entry point\nworkflow.set_entry_point(\"supervisor\")\nworkflow.add_edge(START, \"supervisor\")\n\n# Add edges back to supervisor after each subgraph\nworkflow.add_edge(\"flights_agent\", \"supervisor\")\nworkflow.add_edge(\"hotels_agent\", \"supervisor\")\nworkflow.add_edge(\"experiences_agent\", \"supervisor\")\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -1075,7 +1075,7 @@ "agno::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -1095,7 +1095,7 @@ "agno::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -1115,7 +1115,7 @@ "llama-index::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -1141,7 +1141,7 @@ "llama-index::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -1167,7 +1167,7 @@ "llama-index::agentic_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n \n \n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", "language": "typescript", "type": "file" }, @@ -1193,7 +1193,7 @@ "llama-index::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -1219,7 +1219,7 @@ "crewai::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -1245,7 +1245,7 @@ "crewai::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -1271,7 +1271,7 @@ "crewai::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -1297,7 +1297,7 @@ "crewai::agentic_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter(step => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n
\n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n \n {/* Progress Bar */}\n
\n
\n
\n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending = step.status === \"pending\" && \n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n
\n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n
\n )}\n\n {/* Status Icon */}\n
\n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n
\n {step.description}\n
\n {isCurrentPending && (\n
\n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n
\n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n
\n
\n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n
\n )}\n
\n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n
\n );\n })}\n
\n\n {/* Decorative Elements */}\n \n \n
\n
\n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", "language": "typescript", "type": "file" }, @@ -1323,7 +1323,7 @@ "crewai::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -1349,7 +1349,7 @@ "crewai::predictive_state_updates": [ { "name": "page.tsx", - "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n const chatTitle = 'AI Document Editor'\n const chatDescription = 'Ask me to create or edit a document'\n const initialLabel = 'Hi 👋 How can I help with your document?'\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction({\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n }, [agentState?.document]);\n\n // Action to write the document.\n useCopilotAction({\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n }, [agentState?.document]);\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n
\n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n
\n )}\n
\n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", + "content": "\"use client\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\n\nimport MarkdownIt from \"markdown-it\";\nimport React from \"react\";\n\nimport { diffWords } from \"diff\";\nimport { useEditor, EditorContent } from \"@tiptap/react\";\nimport StarterKit from \"@tiptap/starter-kit\";\nimport { useEffect, useState } from \"react\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\nconst extensions = [StarterKit];\n\ninterface PredictiveStateUpdatesProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function PredictiveStateUpdates({ params }: PredictiveStateUpdatesProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n const chatTitle = \"AI Document Editor\";\n const chatDescription = \"Ask me to create or edit a document\";\n const initialLabel = \"Hi 👋 How can I help with your document?\";\n\n return (\n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n \n
\n \n );\n}\n\ninterface AgentState {\n document: string;\n}\n\nconst DocumentEditor = () => {\n const editor = useEditor({\n extensions,\n immediatelyRender: false,\n editorProps: {\n attributes: { class: \"min-h-screen p-10\" },\n },\n });\n const [placeholderVisible, setPlaceholderVisible] = useState(false);\n const [currentDocument, setCurrentDocument] = useState(\"\");\n const { isLoading } = useCopilotChat();\n\n const {\n state: agentState,\n setState: setAgentState,\n nodeName,\n } = useCoAgent({\n name: \"predictive_state_updates\",\n initialState: {\n document: \"\",\n },\n });\n\n useEffect(() => {\n if (isLoading) {\n setCurrentDocument(editor?.getText() || \"\");\n }\n editor?.setEditable(!isLoading);\n }, [isLoading]);\n\n useEffect(() => {\n if (nodeName == \"end\") {\n // set the text one final time when loading is done\n if (currentDocument.trim().length > 0 && currentDocument !== agentState?.document) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument, true);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n }\n }\n }, [nodeName]);\n\n useEffect(() => {\n if (isLoading) {\n if (currentDocument.trim().length > 0) {\n const newDocument = agentState?.document || \"\";\n const diff = diffPartialText(currentDocument, newDocument);\n const markdown = fromMarkdown(diff);\n editor?.commands.setContent(markdown);\n } else {\n const markdown = fromMarkdown(agentState?.document || \"\");\n editor?.commands.setContent(markdown);\n }\n }\n }, [agentState?.document]);\n\n const text = editor?.getText() || \"\";\n\n useEffect(() => {\n setPlaceholderVisible(text.length === 0);\n\n if (!isLoading) {\n setCurrentDocument(text);\n setAgentState({\n document: text,\n });\n }\n }, [text]);\n\n // TODO(steve): Remove this when all agents have been updated to use write_document tool.\n useCopilotAction(\n {\n name: \"confirm_changes\",\n renderAndWaitForResponse: ({ args, respond, status }) => (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n ),\n },\n [agentState?.document],\n );\n\n // Action to write the document.\n useCopilotAction(\n {\n name: \"write_document\",\n description: `Present the proposed changes to the user for review`,\n parameters: [\n {\n name: \"document\",\n type: \"string\",\n description: \"The full updated document in markdown format\",\n },\n ],\n renderAndWaitForResponse({ args, status, respond }) {\n if (status === \"executing\") {\n return (\n {\n editor?.commands.setContent(fromMarkdown(currentDocument));\n setAgentState({ document: currentDocument });\n }}\n onConfirm={() => {\n editor?.commands.setContent(fromMarkdown(agentState?.document || \"\"));\n setCurrentDocument(agentState?.document || \"\");\n setAgentState({ document: agentState?.document || \"\" });\n }}\n />\n );\n }\n return <>;\n },\n },\n [agentState?.document],\n );\n\n return (\n
\n {placeholderVisible && (\n
\n Write whatever you want here in Markdown format...\n
\n )}\n \n
\n );\n};\n\ninterface ConfirmChangesProps {\n args: any;\n respond: any;\n status: any;\n onReject: () => void;\n onConfirm: () => void;\n}\n\nfunction ConfirmChanges({ args, respond, status, onReject, onConfirm }: ConfirmChangesProps) {\n const [accepted, setAccepted] = useState(null);\n return (\n \n

Confirm Changes

\n

Do you want to accept the changes?

\n {accepted === null && (\n
\n {\n if (respond) {\n setAccepted(false);\n onReject();\n respond({ accepted: false });\n }\n }}\n >\n Reject\n \n {\n if (respond) {\n setAccepted(true);\n onConfirm();\n respond({ accepted: true });\n }\n }}\n >\n Confirm\n \n
\n )}\n {accepted !== null && (\n
\n \n {accepted ? \"✓ Accepted\" : \"✗ Rejected\"}\n
\n \n )}\n \n );\n}\n\nfunction fromMarkdown(text: string) {\n const md = new MarkdownIt({\n typographer: true,\n html: true,\n });\n\n return md.render(text);\n}\n\nfunction diffPartialText(oldText: string, newText: string, isComplete: boolean = false) {\n let oldTextToCompare = oldText;\n if (oldText.length > newText.length && !isComplete) {\n // make oldText shorter\n oldTextToCompare = oldText.slice(0, newText.length);\n }\n\n const changes = diffWords(oldTextToCompare, newText);\n\n let result = \"\";\n changes.forEach((part) => {\n if (part.added) {\n result += `${part.value}`;\n } else if (part.removed) {\n result += `${part.value}`;\n } else {\n result += part.value;\n }\n });\n\n if (oldText.length > newText.length && !isComplete) {\n result += oldText.slice(newText.length);\n }\n\n return result;\n}\n\nfunction isAlpha(text: string) {\n return /[a-zA-Z\\u00C0-\\u017F]/.test(text.trim());\n}\n", "language": "typescript", "type": "file" }, From 165163666e3cdada7b418dd7d095489a780b68cc Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Wed, 17 Sep 2025 17:27:54 -0300 Subject: [PATCH 09/34] Update packages to latest version Signed-off-by: Luis Valdes --- typescript-sdk/apps/dojo/package.json | 10 +- typescript-sdk/pnpm-lock.yaml | 157 +++++++++++++------------- 2 files changed, 82 insertions(+), 85 deletions(-) diff --git a/typescript-sdk/apps/dojo/package.json b/typescript-sdk/apps/dojo/package.json index ae3d54051..809cf2220 100644 --- a/typescript-sdk/apps/dojo/package.json +++ b/typescript-sdk/apps/dojo/package.json @@ -22,11 +22,11 @@ "@ag-ui/server-starter-all-features": "workspace:*", "@ag-ui/vercel-ai-sdk": "workspace:*", "@ai-sdk/openai": "^1.3.22", - "@copilotkit/react-core": "1.10.1", - "@copilotkit/react-ui": "1.10.1", - "@copilotkit/runtime": "1.10.1", - "@copilotkit/runtime-client-gql": "1.10.1", - "@copilotkit/shared": "1.10.1", + "@copilotkit/react-core": "1.10.4", + "@copilotkit/react-ui": "1.10.4", + "@copilotkit/runtime": "1.10.4", + "@copilotkit/runtime-client-gql": "1.10.4", + "@copilotkit/shared": "1.10.4", "@mastra/client-js": "^0.10.18", "@mastra/core": "^0.13.0", "@mastra/dynamodb": "^0.13.3", diff --git a/typescript-sdk/pnpm-lock.yaml b/typescript-sdk/pnpm-lock.yaml index 8ae5a06e0..88034b214 100644 --- a/typescript-sdk/pnpm-lock.yaml +++ b/typescript-sdk/pnpm-lock.yaml @@ -112,20 +112,20 @@ importers: specifier: ^1.3.22 version: 1.3.22(zod@3.25.67) '@copilotkit/react-core': - specifier: 1.10.1 - version: 1.10.1(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + specifier: 1.10.4 + version: 1.10.4(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@copilotkit/react-ui': - specifier: 1.10.1 - version: 1.10.1(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + specifier: 1.10.4 + version: 1.10.4(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@copilotkit/runtime': - specifier: 1.10.1 - version: 1.10.1(@ag-ui/client@0.0.35)(@ag-ui/core@0.0.35)(@ag-ui/encoder@0.0.35)(@ag-ui/proto@0.0.35)(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-agent-runtime@3.844.0)(@aws-sdk/client-bedrock-runtime@3.844.0)(@aws-sdk/client-dynamodb@3.859.0)(@aws-sdk/client-kendra@3.844.0)(@aws-sdk/credential-provider-node@3.859.0)(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@2.4.0(deepmerge@4.3.1)(dotenv@17.0.1)(react@19.1.0)(zod@3.25.67))(@ibm-cloud/watsonx-ai@1.6.8)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.35.1)(axios@1.10.0)(cohere-ai@7.17.1)(fast-xml-parser@5.2.5)(google-auth-library@10.1.0)(ibm-cloud-sdk-core@5.4.0)(ignore@5.3.2)(jsonwebtoken@9.0.2)(lodash@4.17.21)(pg@8.16.3)(playwright@1.53.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(redis@5.6.1)(ws@8.18.3) + specifier: 1.10.4 + version: 1.10.4(@ag-ui/client@0.0.35)(@ag-ui/core@0.0.35)(@ag-ui/encoder@0.0.35)(@ag-ui/proto@0.0.35)(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-agent-runtime@3.844.0)(@aws-sdk/client-bedrock-runtime@3.844.0)(@aws-sdk/client-dynamodb@3.859.0)(@aws-sdk/client-kendra@3.844.0)(@aws-sdk/credential-provider-node@3.859.0)(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@2.4.0(deepmerge@4.3.1)(dotenv@17.0.1)(react@19.1.0)(zod@3.25.67))(@ibm-cloud/watsonx-ai@1.6.8)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.35.1)(axios@1.10.0)(cohere-ai@7.17.1)(fast-xml-parser@5.2.5)(google-auth-library@10.1.0)(ibm-cloud-sdk-core@5.4.0)(ignore@5.3.2)(jsonwebtoken@9.0.2)(lodash@4.17.21)(pg@8.16.3)(playwright@1.53.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(redis@5.6.1)(ws@8.18.3) '@copilotkit/runtime-client-gql': - specifier: 1.10.1 - version: 1.10.1(graphql@16.11.0)(react@19.1.0) + specifier: 1.10.4 + version: 1.10.4(graphql@16.11.0)(react@19.1.0) '@copilotkit/shared': - specifier: 1.10.1 - version: 1.10.1 + specifier: 1.10.4 + version: 1.10.4 '@mastra/client-js': specifier: ^0.10.18 version: 0.10.18(@sinclair/typebox@0.34.37)(openapi-types@12.1.3)(react@19.1.0)(zod@3.25.67) @@ -362,10 +362,10 @@ importers: dependencies: '@langchain/core': specifier: ^0.3.66 - version: 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)) + version: 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) '@langchain/langgraph-sdk': specifier: ^0.1.2 - version: 0.1.2(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + version: 0.1.2(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) partial-json: specifier: ^0.1.7 version: 0.1.7 @@ -807,9 +807,6 @@ packages: '@ag-ui/client@0.0.35': resolution: {integrity: sha512-rHtMQSU232dZeVx9qAGt1+j4ar4RWqwFanXcyNxAwbAh0XrY7VZeXFBDUeazy1LtBoViS7xehX8V1Ssf1a+bUw==} - '@ag-ui/core@0.0.30': - resolution: {integrity: sha512-cBukbc2O0qMKi/BKix6Exld5zSqGKR72376KA6NZNQz/xYAiPNhmK40VX77d/hyblhtXT3BlBGrYmda9V4ETlw==} - '@ag-ui/core@0.0.35': resolution: {integrity: sha512-YAqrln3S3fdo+Hs5FFQPODXiBttyilv/E3xSSHCuxqC0Y/Fp3+VqyDx97BorO3NVp2VKZ9cG2nsO3cbmcTwkQw==} @@ -822,12 +819,15 @@ packages: '@ag-ui/encoder@0.0.37': resolution: {integrity: sha512-KD5t0ll3n1pn1ZX1xwQ1YxYZrtJjIttLEsUpj8mQgfh8+ZQ1ZSvlPSciKOQkHf7+Sw9eS6kHVDd5nOOLV1N1xw==} + '@ag-ui/langgraph@0.0.12': + resolution: {integrity: sha512-2j7IqIUYh0WAdvCXH65hd5aSHi23EOKVo78BauXa2vZI8Pricpkq9Adr2coZB2cwCbTV37lMn5yVlpDV0iQvOA==} + peerDependencies: + '@ag-ui/client': '>=0.0.37' + '@ag-ui/core': '>=0.0.37' + '@ag-ui/langgraph@0.0.7': resolution: {integrity: sha512-KARfd7xJ9iDTMF0IOhRLgeVb+Jur9sjXI4rilDTSblkGT9/L56YFTkqrkGt+E7QF+qvbReN1SQuu2JxmUFkO9Q==} - '@ag-ui/langgraph@0.0.8': - resolution: {integrity: sha512-p7oXmbnei6y8wOyASKp1CHT/6VolXiGFC3H9GwnAr8w1uM49bzWoJ7GP6iQBkexQFToc8jfaNEg28PSVGynm6A==} - '@ag-ui/proto@0.0.35': resolution: {integrity: sha512-+rz3LAYHcR3D2xVgRKa7QE5mp+cwmZs6j+1XxG5dT7HNdg51uKea12L57EVY2bxE3JzpAvCIgOjFEmQCNH82pw==} @@ -1461,29 +1461,29 @@ packages: '@cfworker/json-schema@4.1.1': resolution: {integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==} - '@copilotkit/react-core@1.10.1': - resolution: {integrity: sha512-MnIQMHsR7ooeuaDoAO2yDtPJGb3EYp5h0KkF3I8YAb2a5dah/KcbfYQ4Xcr9wfyc8GIp+aBC3L2C6UOdEYSUvg==} + '@copilotkit/react-core@1.10.4': + resolution: {integrity: sha512-x2C3eeD92zcmE2GM5/z3kxuRbxBoU1VyAxJHc02Up7TFqxEKM276Azj9OSffhRWrmx5n2tQJt3RWtH2MUEcJ4Q==} peerDependencies: react: ^18 || ^19 || ^19.0.0-rc react-dom: ^18 || ^19 || ^19.0.0-rc - '@copilotkit/react-ui@1.10.1': - resolution: {integrity: sha512-a9+PC0K3GKgmGOzItRm8xBsbf1sBKPpSCzvqdAwo2JP1FJ89A2Hkz47FGKxRTuDx0eXekzXIRAeZVHrGgmkBPA==} + '@copilotkit/react-ui@1.10.4': + resolution: {integrity: sha512-C2dmBhndsWRPKqSnScOztTuyCBcowBZyqa4JrUyVlvVsiYVa1rEqMJTo6Gkw+tq6XEK+cJgAPcRtymght3Grxg==} peerDependencies: react: ^18 || ^19 || ^19.0.0-rc - '@copilotkit/runtime-client-gql@1.10.1': - resolution: {integrity: sha512-AypscN1HB+oKdNrXZFNLKbL7owvlNsXMMzXNYDZGieqAUYM/p2/RmBF+uFPCJS+gZcR3xKv9Zda208mTrhaDWQ==} + '@copilotkit/runtime-client-gql@1.10.4': + resolution: {integrity: sha512-wOo5gzR7EiVEBpyBYYq84o5fb0OQ1pcJ8AzPZWEDc4GaV2xLDogjd7gKrP2zw5tV4tGV5HizHpo5UiE4kHZAgw==} peerDependencies: react: ^18 || ^19 || ^19.0.0-rc - '@copilotkit/runtime@1.10.1': - resolution: {integrity: sha512-q9lsihD1irbMyhdsIgXf7uUQOiP6PHASCr3bP+G4t+bA9GvH3O/YnirxA2mALSq3iFTBUtBCW9WDOX/PbLU75g==} + '@copilotkit/runtime@1.10.4': + resolution: {integrity: sha512-/KuKjISiQYoV6T8rE1khzTKO5Abctl/Flh50kIvyGAjwDSKyOTWJ/XEn8zAGP8/xo5KzSQQYaODFoz3oncGN6g==} peerDependencies: - '@ag-ui/client': '>=0.0.34' - '@ag-ui/core': '>=0.0.34' - '@ag-ui/encoder': '>=0.0.34' - '@ag-ui/proto': '>=0.0.34' + '@ag-ui/client': '>=0.0.37' + '@ag-ui/core': '>=0.0.37' + '@ag-ui/encoder': '>=0.0.37' + '@ag-ui/proto': '>=0.0.37' '@copilotkit/runtime@1.9.3': resolution: {integrity: sha512-I07oRSoanaXZPM0ex+f/6Vz+TzypQoiCOsH9w7C9ChU1wytsRN2mhucxQY8TCrocw90R5ZkoPJRr/rBCrqT/dg==} @@ -1493,8 +1493,8 @@ packages: '@ag-ui/encoder': '>=0.0.34' '@ag-ui/proto': '>=0.0.34' - '@copilotkit/shared@1.10.1': - resolution: {integrity: sha512-b5DLcgF+K8Ecd5kweNLwO7uHx3H/K8CVflyKHL7zprxTaVyi+3NirxpDnUpFvrp/NiZw3NRVDSVnqsrorFGJyw==} + '@copilotkit/shared@1.10.4': + resolution: {integrity: sha512-vvF3vG03e5OZcUd0x7FdpIPRx60W3vyiK+0maGyEZoU1xUlBJbxaCkAED6gAH3JBZl/BZNWoJKsYDxiQUY2V3A==} '@copilotkit/shared@1.9.3': resolution: {integrity: sha512-6VpgvrBL1HKLV2RJ+n2l+eDyPKBUzIN+CJA8N6B0vioyUqZa3VWTRsGAKoTE9Mb112xJ/fKczN68Zo0oNNSWFw==} @@ -7181,6 +7181,7 @@ packages: libsql@0.5.17: resolution: {integrity: sha512-RRlj5XQI9+Wq+/5UY8EnugSWfRmHEw4hn3DKlPrkUgZONsge1PwTtHcpStP6MSNi8ohcbsRgEHJaymA33a8cBw==} + cpu: [x64, arm64, wasm32, arm] os: [darwin, linux, win32] lightningcss-darwin-arm64@1.30.1: @@ -9545,11 +9546,6 @@ snapshots: uuid: 11.1.0 zod: 3.25.71 - '@ag-ui/core@0.0.30': - dependencies: - rxjs: 7.8.1 - zod: 3.25.71 - '@ag-ui/core@0.0.35': dependencies: rxjs: 7.8.1 @@ -9570,11 +9566,12 @@ snapshots: '@ag-ui/core': 0.0.37 '@ag-ui/proto': 0.0.37 - '@ag-ui/langgraph@0.0.7(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(react@19.1.0)': + '@ag-ui/langgraph@0.0.12(@ag-ui/client@0.0.35)(@ag-ui/core@0.0.35)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@ag-ui/client': 0.0.35 + '@ag-ui/core': 0.0.35 '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) - '@langchain/langgraph-sdk': 0.0.78(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react@19.1.0) + '@langchain/langgraph-sdk': 0.0.105(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) partial-json: 0.1.7 rxjs: 7.8.1 transitivePeerDependencies: @@ -9583,12 +9580,13 @@ snapshots: - '@opentelemetry/sdk-trace-base' - openai - react + - react-dom - '@ag-ui/langgraph@0.0.8(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@ag-ui/langgraph@0.0.7(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(react@19.1.0)': dependencies: '@ag-ui/client': 0.0.35 '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) - '@langchain/langgraph-sdk': 0.0.105(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@langchain/langgraph-sdk': 0.0.78(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react@19.1.0) partial-json: 0.1.7 rxjs: 7.8.1 transitivePeerDependencies: @@ -9597,7 +9595,6 @@ snapshots: - '@opentelemetry/sdk-trace-base' - openai - react - - react-dom '@ag-ui/proto@0.0.35': dependencies: @@ -11274,10 +11271,10 @@ snapshots: '@cfworker/json-schema@4.1.1': {} - '@copilotkit/react-core@1.10.1(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@copilotkit/react-core@1.10.4(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@copilotkit/runtime-client-gql': 1.10.1(graphql@16.11.0)(react@19.1.0) - '@copilotkit/shared': 1.10.1 + '@copilotkit/runtime-client-gql': 1.10.4(graphql@16.11.0)(react@19.1.0) + '@copilotkit/shared': 1.10.4 '@scarf/scarf': 1.4.0 react: 19.1.0 react-dom: 19.1.0(react@19.1.0) @@ -11289,11 +11286,11 @@ snapshots: - graphql - supports-color - '@copilotkit/react-ui@1.10.1(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@copilotkit/react-ui@1.10.4(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: - '@copilotkit/react-core': 1.10.1(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@copilotkit/runtime-client-gql': 1.10.1(graphql@16.11.0)(react@19.1.0) - '@copilotkit/shared': 1.10.1 + '@copilotkit/react-core': 1.10.4(@types/react@19.1.5)(graphql@16.11.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@copilotkit/runtime-client-gql': 1.10.4(graphql@16.11.0)(react@19.1.0) + '@copilotkit/shared': 1.10.4 '@headlessui/react': 2.2.4(react-dom@19.1.0(react@19.1.0))(react@19.1.0) react: 19.1.0 react-markdown: 10.1.0(@types/react@19.1.5)(react@19.1.0) @@ -11308,9 +11305,9 @@ snapshots: - react-dom - supports-color - '@copilotkit/runtime-client-gql@1.10.1(graphql@16.11.0)(react@19.1.0)': + '@copilotkit/runtime-client-gql@1.10.4(graphql@16.11.0)(react@19.1.0)': dependencies: - '@copilotkit/shared': 1.10.1 + '@copilotkit/shared': 1.10.4 '@urql/core': 5.1.1(graphql@16.11.0) react: 19.1.0 untruncate-json: 0.0.1 @@ -11319,19 +11316,19 @@ snapshots: - encoding - graphql - '@copilotkit/runtime@1.10.1(@ag-ui/client@0.0.35)(@ag-ui/core@0.0.35)(@ag-ui/encoder@0.0.35)(@ag-ui/proto@0.0.35)(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-agent-runtime@3.844.0)(@aws-sdk/client-bedrock-runtime@3.844.0)(@aws-sdk/client-dynamodb@3.859.0)(@aws-sdk/client-kendra@3.844.0)(@aws-sdk/credential-provider-node@3.859.0)(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@2.4.0(deepmerge@4.3.1)(dotenv@17.0.1)(react@19.1.0)(zod@3.25.67))(@ibm-cloud/watsonx-ai@1.6.8)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.35.1)(axios@1.10.0)(cohere-ai@7.17.1)(fast-xml-parser@5.2.5)(google-auth-library@10.1.0)(ibm-cloud-sdk-core@5.4.0)(ignore@5.3.2)(jsonwebtoken@9.0.2)(lodash@4.17.21)(pg@8.16.3)(playwright@1.53.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(redis@5.6.1)(ws@8.18.3)': + '@copilotkit/runtime@1.10.4(@ag-ui/client@0.0.35)(@ag-ui/core@0.0.35)(@ag-ui/encoder@0.0.35)(@ag-ui/proto@0.0.35)(@aws-crypto/sha256-js@5.2.0)(@aws-sdk/client-bedrock-agent-runtime@3.844.0)(@aws-sdk/client-bedrock-runtime@3.844.0)(@aws-sdk/client-dynamodb@3.859.0)(@aws-sdk/client-kendra@3.844.0)(@aws-sdk/credential-provider-node@3.859.0)(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@2.4.0(deepmerge@4.3.1)(dotenv@17.0.1)(react@19.1.0)(zod@3.25.67))(@ibm-cloud/watsonx-ai@1.6.8)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(@smithy/util-utf8@2.3.0)(@upstash/redis@1.35.1)(axios@1.10.0)(cohere-ai@7.17.1)(fast-xml-parser@5.2.5)(google-auth-library@10.1.0)(ibm-cloud-sdk-core@5.4.0)(ignore@5.3.2)(jsonwebtoken@9.0.2)(lodash@4.17.21)(pg@8.16.3)(playwright@1.53.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(redis@5.6.1)(ws@8.18.3)': dependencies: '@ag-ui/client': 0.0.35 '@ag-ui/core': 0.0.35 '@ag-ui/encoder': 0.0.35 - '@ag-ui/langgraph': 0.0.8(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + '@ag-ui/langgraph': 0.0.12(@ag-ui/client@0.0.35)(@ag-ui/core@0.0.35)(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(react-dom@19.1.0(react@19.1.0))(react@19.1.0) '@ag-ui/proto': 0.0.35 '@anthropic-ai/sdk': 0.57.0 - '@copilotkit/shared': 1.10.1 + '@copilotkit/shared': 1.10.4 '@graphql-yoga/plugin-defer-stream': 3.13.4(graphql-yoga@5.13.4(graphql@16.11.0))(graphql@16.11.0) '@langchain/aws': 0.1.11(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))) - '@langchain/community': 0.3.43(7diojckpk4p4gsegiv67ljolbe) - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/community': 0.3.43(6700d873fa55f615078a7d0bda5c02a5) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) '@langchain/google-gauth': 0.1.8(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(zod@3.25.71) '@langchain/langgraph-sdk': 0.0.70(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react@19.1.0) '@langchain/openai': 0.4.9(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(ws@8.18.3) @@ -11514,7 +11511,7 @@ snapshots: '@copilotkit/shared': 1.9.3 '@graphql-yoga/plugin-defer-stream': 3.13.4(graphql-yoga@5.13.4(graphql@16.11.0))(graphql@16.11.0) '@langchain/aws': 0.1.11(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))) - '@langchain/community': 0.3.43(7diojckpk4p4gsegiv67ljolbe) + '@langchain/community': 0.3.43(6700d873fa55f615078a7d0bda5c02a5) '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) '@langchain/google-gauth': 0.1.8(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(zod@3.25.71) '@langchain/langgraph-sdk': 0.0.70(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react@19.1.0) @@ -11688,9 +11685,9 @@ snapshots: - ws - youtubei.js - '@copilotkit/shared@1.10.1': + '@copilotkit/shared@1.10.4': dependencies: - '@ag-ui/core': 0.0.30 + '@ag-ui/core': 0.0.37 '@segment/analytics-node': 2.2.1 chalk: 4.1.2 graphql: 16.11.0 @@ -12500,15 +12497,15 @@ snapshots: '@aws-sdk/client-bedrock-runtime': 3.844.0 '@aws-sdk/client-kendra': 3.844.0 '@aws-sdk/credential-provider-node': 3.859.0 - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) transitivePeerDependencies: - aws-crt - '@langchain/community@0.3.43(7diojckpk4p4gsegiv67ljolbe)': + '@langchain/community@0.3.43(6700d873fa55f615078a7d0bda5c02a5)': dependencies: '@browserbasehq/stagehand': 2.4.0(deepmerge@4.3.1)(dotenv@17.0.1)(react@19.1.0)(zod@3.25.67) '@ibm-cloud/watsonx-ai': 1.6.8 - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) '@langchain/openai': 0.4.9(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(ws@8.18.3) binary-extensions: 2.3.0 expr-eval: 2.0.2 @@ -12516,7 +12513,7 @@ snapshots: ibm-cloud-sdk-core: 5.4.0 js-yaml: 4.1.0 langchain: 0.3.26(@langchain/aws@0.1.11(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))))(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(axios@1.10.0)(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(ws@8.18.3) - langsmith: 0.3.49(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)) + langsmith: 0.3.49(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) openai: 4.104.0(ws@8.18.3)(zod@3.25.71) uuid: 10.0.0 zod: 3.25.71 @@ -12562,7 +12559,7 @@ snapshots: - handlebars - peggy - '@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))': + '@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67))': dependencies: '@cfworker/json-schema': 4.1.1 ansi-styles: 5.2.0 @@ -12582,14 +12579,14 @@ snapshots: - '@opentelemetry/sdk-trace-base' - openai - '@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71))': + '@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))': dependencies: '@cfworker/json-schema': 4.1.1 ansi-styles: 5.2.0 camelcase: 6.3.0 decamelize: 1.2.0 js-tiktoken: 1.0.20 - langsmith: 0.3.49(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)) + langsmith: 0.3.49(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) mustache: 4.2.0 p-queue: 6.6.2 p-retry: 4.6.2 @@ -12604,7 +12601,7 @@ snapshots: '@langchain/google-common@0.1.8(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(zod@3.25.71)': dependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) uuid: 10.0.0 zod-to-json-schema: 3.24.6(zod@3.25.71) transitivePeerDependencies: @@ -12612,7 +12609,7 @@ snapshots: '@langchain/google-gauth@0.1.8(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(zod@3.25.71)': dependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) '@langchain/google-common': 0.1.8(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(zod@3.25.71) google-auth-library: 8.9.0 transitivePeerDependencies: @@ -12627,7 +12624,7 @@ snapshots: p-retry: 4.6.2 uuid: 9.0.1 optionalDependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) @@ -12638,7 +12635,7 @@ snapshots: p-retry: 4.6.2 uuid: 9.0.1 optionalDependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) react: 19.1.0 '@langchain/langgraph-sdk@0.0.78(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react@19.1.0)': @@ -12651,20 +12648,20 @@ snapshots: '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) react: 19.1.0 - '@langchain/langgraph-sdk@0.1.2(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + '@langchain/langgraph-sdk@0.1.2(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@types/json-schema': 7.0.15 p-queue: 6.6.2 p-retry: 4.6.2 uuid: 9.0.1 optionalDependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) react: 19.1.0 react-dom: 19.1.0(react@19.1.0) '@langchain/openai@0.4.9(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(ws@8.18.3)': dependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) js-tiktoken: 1.0.20 openai: 4.104.0(ws@8.18.3)(zod@3.25.71) zod: 3.25.71 @@ -12675,7 +12672,7 @@ snapshots: '@langchain/textsplitters@0.1.0(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))': dependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) js-tiktoken: 1.0.20 '@libsql/client@0.15.10': @@ -16539,7 +16536,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-module-utils@2.12.0(@typescript-eslint/parser@8.32.1(eslint@9.27.0(jiti@2.4.2))(typescript@5.8.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0)(eslint@9.27.0(jiti@2.4.2)))(eslint@9.27.0(jiti@2.4.2)): + eslint-module-utils@2.12.0(@typescript-eslint/parser@8.32.1(eslint@9.27.0(jiti@2.4.2))(typescript@5.8.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.27.0(jiti@2.4.2)): dependencies: debug: 3.2.7 optionalDependencies: @@ -16561,7 +16558,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.27.0(jiti@2.4.2) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.32.1(eslint@9.27.0(jiti@2.4.2))(typescript@5.8.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0)(eslint@9.27.0(jiti@2.4.2)))(eslint@9.27.0(jiti@2.4.2)) + eslint-module-utils: 2.12.0(@typescript-eslint/parser@8.32.1(eslint@9.27.0(jiti@2.4.2))(typescript@5.8.2))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.27.0(jiti@2.4.2)) hasown: 2.0.2 is-core-module: 2.16.1 is-glob: 4.0.3 @@ -17506,7 +17503,7 @@ snapshots: isstream: 0.1.2 jsonwebtoken: 9.0.2 mime-types: 2.1.35 - retry-axios: 2.6.0(axios@1.10.0(debug@4.4.1)) + retry-axios: 2.6.0(axios@1.10.0) tough-cookie: 4.1.4 transitivePeerDependencies: - supports-color @@ -18279,7 +18276,7 @@ snapshots: langchain@0.3.26(@langchain/aws@0.1.11(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))))(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(axios@1.10.0)(openai@4.104.0(ws@8.18.3)(zod@3.25.71))(ws@8.18.3): dependencies: - '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)) + '@langchain/core': 0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.100.0(ws@8.18.3)(zod@3.25.67)) '@langchain/openai': 0.4.9(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)))(ws@8.18.3) '@langchain/textsplitters': 0.1.0(@langchain/core@0.3.66(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71))) js-tiktoken: 1.0.20 @@ -18327,7 +18324,7 @@ snapshots: '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) openai: 4.100.0(ws@8.18.3)(zod@3.25.67) - langsmith@0.3.49(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(zod@3.25.71)): + langsmith@0.3.49(@opentelemetry/api@1.9.0)(@opentelemetry/exporter-trace-otlp-proto@0.203.0(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0))(openai@4.104.0(ws@8.18.3)(zod@3.25.71)): dependencies: '@types/uuid': 10.0.0 chalk: 4.1.2 @@ -20242,7 +20239,7 @@ snapshots: onetime: 5.1.2 signal-exit: 3.0.7 - retry-axios@2.6.0(axios@1.10.0(debug@4.4.1)): + retry-axios@2.6.0(axios@1.10.0): dependencies: axios: 1.10.0(debug@4.4.1) From 0c677f7bc6bf40926adc98dc782b3d08bc313a08 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Wed, 17 Sep 2025 19:32:55 -0300 Subject: [PATCH 10/34] Update file Signed-off-by: Luis Valdes --- typescript-sdk/apps/dojo/src/files.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/typescript-sdk/apps/dojo/src/files.json b/typescript-sdk/apps/dojo/src/files.json index 6f955a0bd..358055a4f 100644 --- a/typescript-sdk/apps/dojo/src/files.json +++ b/typescript-sdk/apps/dojo/src/files.json @@ -184,7 +184,7 @@ "adk-middleware::agentic_chat": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgent, useCopilotAction, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useCopilotAction({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nexport default AgenticChat;\n", "language": "typescript", "type": "file" }, @@ -210,7 +210,7 @@ "adk-middleware::tool_based_generative_ui": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [],\n english: string[] | [],\n image_names: string[] | [],\n selectedImage: string | null,\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial\n setHaikus: Dispatch>\n haikus: GenerateHaiku[]\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n\n const chatTitle = 'Haiku Generator'\n const chatDescription = 'Ask me to create haikus'\n const initialLabel = 'I\\'m a haiku generator 👋. How can I help you?'\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && }\n \n \n );\n}\n\nfunction MobileChat({ chatTitle, chatDescription, initialLabel }: { chatTitle: string, chatDescription: string, initialLabel: string }) {\n const defaultChatHeight = 50\n\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n )\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\"\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

\n {generatedHaiku.english?.[index]}\n

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus(prevHaikus => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n \n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([{\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\n \"A placeholder verse—\",\n \"even in a blank canvas,\",\n \"it beckons flowers.\",\n ],\n image_names: [],\n selectedImage: null,\n }])\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction({\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join('\\n -')}`,\n },\n ],\n followUp: false,\n handler: async ({ japanese, english, image_names }: { japanese: string[], english: string[], image_names: string[] }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus(prev => [newHaiku, ...prev].filter(h => h.english[0] !== \"A placeholder verse—\"));\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return (\n \n );\n },\n }, [haikus]);\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n
\n
\n {haikus.map((haiku, index) => (\n (haikus.length == 1 || index == activeIndex) && (\n\n \n {haiku.japanese.map((line, lineIndex) => (\n \n

\n {line}\n

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName }\n } else {\n return { ...h }\n }\n })\n })}\n />\n ))}\n
\n )}\n
\n )\n ))}\n
\n \n \n );\n}\n\nfunction Thumbnails({ haikus, activeIndex, setActiveIndex, isMobile }: { haikus: Haiku[], activeIndex: number, setActiveIndex: (index: number) => void, isMobile: boolean }) {\n if (haikus.length == 0 || isMobile) { return null }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n \n

{line}

\n

{haiku.english?.[lineIndex]}

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n \n ))}\n \n )\n\n}", + "content": "\"use client\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotKitCSSProperties, CopilotSidebar, CopilotChat } from \"@copilotkit/react-ui\";\nimport { Dispatch, SetStateAction, useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport React, { useMemo } from \"react\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface ToolBasedGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\ninterface GenerateHaiku {\n japanese: string[] | [];\n english: string[] | [];\n image_names: string[] | [];\n selectedImage: string | null;\n}\n\ninterface HaikuCardProps {\n generatedHaiku: GenerateHaiku | Partial;\n setHaikus: Dispatch>;\n haikus: GenerateHaiku[];\n}\n\nexport default function ToolBasedGenerativeUI({ params }: ToolBasedGenerativeUIProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n\n const chatTitle = \"Haiku Generator\";\n const chatDescription = \"Ask me to create haikus\";\n const initialLabel = \"I'm a haiku generator 👋. How can I help you?\";\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n\n {/* Desktop Sidebar */}\n {!isMobile && (\n \n )}\n\n {/* Mobile Pull-Up Chat */}\n {isMobile && (\n \n )}\n \n \n );\n}\n\nfunction MobileChat({\n chatTitle,\n chatDescription,\n initialLabel,\n}: {\n chatTitle: string;\n chatDescription: string;\n initialLabel: string;\n}) {\n const defaultChatHeight = 50;\n\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n return (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen &&
setIsChatOpen(false)} />}\n \n );\n}\n\nconst VALID_IMAGE_NAMES = [\n \"Osaka_Castle_Turret_Stone_Wall_Pine_Trees_Daytime.jpg\",\n \"Tokyo_Skyline_Night_Tokyo_Tower_Mount_Fuji_View.jpg\",\n \"Itsukushima_Shrine_Miyajima_Floating_Torii_Gate_Sunset_Long_Exposure.jpg\",\n \"Takachiho_Gorge_Waterfall_River_Lush_Greenery_Japan.jpg\",\n \"Bonsai_Tree_Potted_Japanese_Art_Green_Foliage.jpeg\",\n \"Shirakawa-go_Gassho-zukuri_Thatched_Roof_Village_Aerial_View.jpg\",\n \"Ginkaku-ji_Silver_Pavilion_Kyoto_Japanese_Garden_Pond_Reflection.jpg\",\n \"Senso-ji_Temple_Asakusa_Cherry_Blossoms_Kimono_Umbrella.jpg\",\n \"Cherry_Blossoms_Sakura_Night_View_City_Lights_Japan.jpg\",\n \"Mount_Fuji_Lake_Reflection_Cherry_Blossoms_Sakura_Spring.jpg\",\n];\n\nfunction getRandomImage(): string {\n return VALID_IMAGE_NAMES[Math.floor(Math.random() * VALID_IMAGE_NAMES.length)];\n}\n\nconst validateAndCorrectImageNames = (rawNames: string[] | undefined): string[] | null => {\n if (!rawNames || rawNames.length !== 3) {\n return null;\n }\n\n const correctedNames: string[] = [];\n const usedValidNames = new Set();\n\n for (const name of rawNames) {\n if (VALID_IMAGE_NAMES.includes(name) && !usedValidNames.has(name)) {\n correctedNames.push(name);\n usedValidNames.add(name);\n if (correctedNames.length === 3) break;\n }\n }\n\n while (correctedNames.length < 3) {\n const nextImage = getRandomImage();\n if (!usedValidNames.has(nextImage)) {\n correctedNames.push(nextImage);\n usedValidNames.add(nextImage);\n }\n }\n\n return correctedNames.slice(0, 3);\n};\n\nfunction HaikuCard({ generatedHaiku, setHaikus, haikus }: HaikuCardProps) {\n return (\n \n
\n {generatedHaiku?.japanese?.map((line, index) => (\n
\n

{line}

\n

{generatedHaiku.english?.[index]}

\n
\n ))}\n {generatedHaiku?.japanese && generatedHaiku.japanese.length >= 2 && (\n
\n {(() => {\n const firstLine = generatedHaiku?.japanese?.[0];\n if (!firstLine) return null;\n const haikuIndex = haikus.findIndex((h: any) => h.japanese[0] === firstLine);\n const haiku = haikus[haikuIndex];\n if (!haiku?.image_names) return null;\n\n return haiku.image_names.map((imageName, imgIndex) => (\n {\n setHaikus((prevHaikus) => {\n const newHaikus = prevHaikus.map((h, idx) => {\n if (idx === haikuIndex) {\n return {\n ...h,\n selectedImage: imageName,\n };\n }\n return h;\n });\n return newHaikus;\n });\n }}\n />\n ));\n })()}\n
\n )}\n
\n
\n );\n}\n\ninterface Haiku {\n japanese: string[];\n english: string[];\n image_names: string[];\n selectedImage: string | null;\n}\n\nfunction Haiku() {\n const [haikus, setHaikus] = useState([\n {\n japanese: [\"仮の句よ\", \"まっさらながら\", \"花を呼ぶ\"],\n english: [\"A placeholder verse—\", \"even in a blank canvas,\", \"it beckons flowers.\"],\n image_names: [],\n selectedImage: null,\n },\n ]);\n const [activeIndex, setActiveIndex] = useState(0);\n const [isJustApplied, setIsJustApplied] = useState(false);\n\n useCopilotAction(\n {\n name: \"generate_haiku\",\n parameters: [\n {\n name: \"japanese\",\n type: \"string[]\",\n },\n {\n name: \"english\",\n type: \"string[]\",\n },\n {\n name: \"image_names\",\n type: \"string[]\",\n description: `Names of 3 relevant images selected from the following: \\n -${VALID_IMAGE_NAMES.join(\"\\n -\")}`,\n },\n ],\n followUp: false,\n handler: async ({\n japanese,\n english,\n image_names,\n }: {\n japanese: string[];\n english: string[];\n image_names: string[];\n }) => {\n const finalCorrectedImages = validateAndCorrectImageNames(image_names);\n const newHaiku = {\n japanese: japanese || [],\n english: english || [],\n image_names: finalCorrectedImages || [],\n selectedImage: finalCorrectedImages?.[0] || null,\n };\n setHaikus((prev) =>\n [newHaiku, ...prev].filter((h) => h.english[0] !== \"A placeholder verse—\"),\n );\n setActiveIndex(haikus.length - 1);\n setIsJustApplied(true);\n setTimeout(() => setIsJustApplied(false), 600);\n return \"Haiku generated.\";\n },\n render: ({ args: generatedHaiku }: { args: Partial }) => {\n return ;\n },\n },\n [haikus],\n );\n\n const { isMobile } = useMobileView();\n\n return (\n
\n \n\n {/* Main Display */}\n \n
\n {haikus.map(\n (haiku, index) =>\n (haikus.length == 1 || index == activeIndex) && (\n \n {haiku.japanese.map((line, lineIndex) => (\n \n \n {line}\n

\n \n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n \n {haiku.image_names.map((imageName, imgIndex) => (\n \n setHaikus((prevHaikus) => {\n return prevHaikus.map((h, idx) => {\n if (idx === index) {\n return { ...h, selectedImage: imageName };\n } else {\n return { ...h };\n }\n });\n })\n }\n />\n ))}\n
\n )}\n \n ),\n )}\n \n \n \n );\n}\n\nfunction Thumbnails({\n haikus,\n activeIndex,\n setActiveIndex,\n isMobile,\n}: {\n haikus: Haiku[];\n activeIndex: number;\n setActiveIndex: (index: number) => void;\n isMobile: boolean;\n}) {\n if (haikus.length == 0 || isMobile) {\n return null;\n }\n return (\n
\n {haikus.map((haiku, index) => (\n setActiveIndex(index)}\n >\n {haiku.japanese.map((line, lineIndex) => (\n
\n

{line}

\n

\n {haiku.english?.[lineIndex]}\n

\n
\n ))}\n {haiku.image_names && haiku.image_names.length === 3 && (\n
\n {haiku.image_names.map((imageName, imgIndex) => (\n \n ))}\n
\n )}\n
\n ))}\n \n );\n}\n", "language": "typescript", "type": "file" }, @@ -236,7 +236,7 @@ "adk-middleware::human_in_the_loop": [ { "name": "page.tsx", - "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n
\n {children}\n
\n
\n);\n\nconst StepHeader = ({ \n theme, \n enabledCount, \n totalCount, \n status, \n showStatus = false \n}: { \n theme?: string; \n enabledCount: number; \n totalCount: number; \n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n
\n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n \n
\n
0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({ \n step, \n theme, \n status, \n onToggle, \n disabled = false \n}: { \n step: { description: string; status: string }; \n theme?: string; \n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n
\n \n
\n);\n\nconst ActionButton = ({ \n variant, \n theme, \n disabled, \n onClick, \n children \n}: { \n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n \n const variantClasses = {\n primary: \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary: theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success: \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger: \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\"\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({ \n theme, \n variant = \"default\" \n}: { \n theme?: string; \n variant?: \"default\" | \"success\" | \"danger\" \n}) => (\n <>\n
\n
\n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n \n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter(step => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n \n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: ['langgraph', 'langgraph-fastapi', 'langgraph-typescript'].includes(integrationId) ? 'disabled' : 'enabled',\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter(step => step.status === \"enabled\")});\n }\n };\n\n return (\n \n \n \n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n
\n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n
\n )}\n\n \n
\n );\n};\n\n\nexport default HumanInTheLoop;\n", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction, useLangGraphInterrupt } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n
\n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n
\n
\n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useCopilotAction({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n renderAndWaitForResponse: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", "language": "typescript", "type": "file" }, @@ -262,7 +262,7 @@ "adk-middleware::shared_state": [ { "name": "page.tsx", - "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50\n const {\n isChatOpen,\n setChatHeight,\n setIsChatOpen,\n isDragging,\n chatHeight,\n handleDragStart\n } = useMobileChat(defaultChatHeight)\n\n const chatTitle = 'AI Recipe Assistant'\n const chatDescription = 'Ask me to craft recipes'\n const initialLabel = 'Hi 👋 How can I help with your recipe?'\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n
\n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n setIsChatOpen(false)}\n />\n )}\n \n ) : (\n \n )}\n \n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n
\n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n \n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n \n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n
\n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { cloudAgents } from \"@/cloudAgents\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n let runtimeUrl = `/api/copilotkit/${integrationId}`;\n let publicApiKey: string | undefined = undefined;\n if (process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL) {\n runtimeUrl = process.env.NEXT_PUBLIC_COPILOTKIT_RUNTIME_URL;\n publicApiKey = cloudAgents.find((agent) => agent.id === integrationId)?.publicApiKey;\n }\n const { isMobile } = useMobileView();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n \n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n \n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", "language": "typescript", "type": "file" }, From a74d065f7358bea38489df5e9dd02650d152b52b Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 11:01:38 -0300 Subject: [PATCH 11/34] Set default value an empty array Signed-off-by: Luis Valdes --- .../langgraph/examples/python/agents/agentic_chat/agent.py | 2 +- .../examples/python/agents/agentic_chat_reasoning/agent.py | 2 +- .../examples/python/agents/agentic_generative_ui/agent.py | 2 +- .../langgraph/examples/python/agents/human_in_the_loop/agent.py | 2 +- .../examples/python/agents/predictive_state_updates/agent.py | 2 +- .../langgraph/examples/python/agents/shared_state/agent.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py index cf62ab0b4..695c1d485 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py @@ -41,7 +41,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): # 2. Bind the tools to the model model_with_tools = model.bind_tools( [ - *state["tools"], + *state.get("tools", []), # your_tool_here ], diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat_reasoning/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat_reasoning/agent.py index 64ceb4889..26dc53f4e 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat_reasoning/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat_reasoning/agent.py @@ -52,7 +52,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): # 2. Bind the tools to the model model_with_tools = model.bind_tools( [ - *state["tools"], + *state.get("tools", []), # your_tool_here ], ) diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_generative_ui/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_generative_ui/agent.py index 957be07c7..8734ec794 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_generative_ui/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_generative_ui/agent.py @@ -99,7 +99,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): # Bind the tools to the model model_with_tools = model.bind_tools( [ - *state["tools"], + *state.get("tools", []), generate_task_steps_generative_ui ], # Disable parallel tool calls to avoid race conditions diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/human_in_the_loop/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/human_in_the_loop/agent.py index f9bb71d10..25fde3409 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/human_in_the_loop/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/human_in_the_loop/agent.py @@ -91,7 +91,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): # Bind the tools to the model model_with_tools = model.bind_tools( [ - *state["tools"], + *state.get("tools", []), plan_execution_steps ], # Disable parallel tool calls to avoid race conditions diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/predictive_state_updates/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/predictive_state_updates/agent.py index 39b9f6ef1..a2accd7df 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/predictive_state_updates/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/predictive_state_updates/agent.py @@ -83,7 +83,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): # Bind the tools to the model model_with_tools = model.bind_tools( [ - *state["tools"], + *state.get("tools", []), write_document_local ], # Disable parallel tool calls to avoid race conditions diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/shared_state/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/shared_state/agent.py index 4830f36b4..c8f21f09b 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/shared_state/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/shared_state/agent.py @@ -177,7 +177,7 @@ async def chat_node(state: Dict[str, Any], config: RunnableConfig): # Bind the tools to the model model_with_tools = model.bind_tools( [ - *state["tools"], + *state.get("tools", []), generate_recipe ], # Disable parallel tool calls to avoid race conditions From 3b824c9d60e5014b5f0620ab74311bf334b05132 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 11:24:56 -0300 Subject: [PATCH 12/34] Add onrender python version file Signed-off-by: Luis Valdes --- .../integrations/langgraph/examples/python/.python-version | 1 + 1 file changed, 1 insertion(+) create mode 100644 typescript-sdk/integrations/langgraph/examples/python/.python-version diff --git a/typescript-sdk/integrations/langgraph/examples/python/.python-version b/typescript-sdk/integrations/langgraph/examples/python/.python-version new file mode 100644 index 000000000..be71774ed --- /dev/null +++ b/typescript-sdk/integrations/langgraph/examples/python/.python-version @@ -0,0 +1 @@ +3.12.9 \ No newline at end of file From b912e3a62b4f08a40bceea6c6c59077e4cfd4156 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 11:28:52 -0300 Subject: [PATCH 13/34] Remove patch Signed-off-by: Luis Valdes --- .../integrations/langgraph/examples/python/.python-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/typescript-sdk/integrations/langgraph/examples/python/.python-version b/typescript-sdk/integrations/langgraph/examples/python/.python-version index be71774ed..fdcfcfdfc 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/.python-version +++ b/typescript-sdk/integrations/langgraph/examples/python/.python-version @@ -1 +1 @@ -3.12.9 \ No newline at end of file +3.12 \ No newline at end of file From 5595874c0abbc878878fdd65bda22c2646a6e2e6 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 11:42:14 -0300 Subject: [PATCH 14/34] Change python version Signed-off-by: Luis Valdes --- .../integrations/langgraph/examples/python/.python-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/typescript-sdk/integrations/langgraph/examples/python/.python-version b/typescript-sdk/integrations/langgraph/examples/python/.python-version index fdcfcfdfc..82dc52c3c 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/.python-version +++ b/typescript-sdk/integrations/langgraph/examples/python/.python-version @@ -1 +1 @@ -3.12 \ No newline at end of file +3.13.4 \ No newline at end of file From e024c73c0dc8577309b3b37332b96239de5e10ea Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 12:04:01 -0300 Subject: [PATCH 15/34] Add configuration for onrender Signed-off-by: Luis Valdes --- .../langgraph/examples/python/render.yaml | 13 +++++++++++++ .../integrations/langgraph/examples/python/start.sh | 4 ++++ 2 files changed, 17 insertions(+) create mode 100644 typescript-sdk/integrations/langgraph/examples/python/render.yaml create mode 100644 typescript-sdk/integrations/langgraph/examples/python/start.sh diff --git a/typescript-sdk/integrations/langgraph/examples/python/render.yaml b/typescript-sdk/integrations/langgraph/examples/python/render.yaml new file mode 100644 index 000000000..9b98bb067 --- /dev/null +++ b/typescript-sdk/integrations/langgraph/examples/python/render.yaml @@ -0,0 +1,13 @@ +services: + - type: web + name: langgraph-agui-dojo + runtime: python + plan: free + buildCommand: | + chmod +x start.sh + python -m pip install --upgrade pip + pip install poetry + startCommand: ./start.sh + envVars: + - key: PYTHON_VERSION + value: "3.13.4" diff --git a/typescript-sdk/integrations/langgraph/examples/python/start.sh b/typescript-sdk/integrations/langgraph/examples/python/start.sh new file mode 100644 index 000000000..5b301fdf7 --- /dev/null +++ b/typescript-sdk/integrations/langgraph/examples/python/start.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# Start the application +exec pnpx @langchain/langgraph-cli@latest dev --no-browser --host 0.0.0.0 --port ${PORT:-8000} From 82cf10e837b5f0982ac953e01afaa371237925e2 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 12:10:25 -0300 Subject: [PATCH 16/34] Unset environment variable Signed-off-by: Luis Valdes --- .../integrations/langgraph/examples/python/render.yaml | 2 ++ .../integrations/langgraph/examples/python/start.sh | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/typescript-sdk/integrations/langgraph/examples/python/render.yaml b/typescript-sdk/integrations/langgraph/examples/python/render.yaml index 9b98bb067..4b70541c4 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/render.yaml +++ b/typescript-sdk/integrations/langgraph/examples/python/render.yaml @@ -11,3 +11,5 @@ services: envVars: - key: PYTHON_VERSION value: "3.13.4" + - key: VIRTUAL_ENV + value: "" diff --git a/typescript-sdk/integrations/langgraph/examples/python/start.sh b/typescript-sdk/integrations/langgraph/examples/python/start.sh index 5b301fdf7..00be27df5 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/start.sh +++ b/typescript-sdk/integrations/langgraph/examples/python/start.sh @@ -1,4 +1,8 @@ #!/bin/bash +# Handle virtual environment path mismatch by unsetting VIRTUAL_ENV +# This prevents the warning about path mismatch +unset VIRTUAL_ENV + # Start the application exec pnpx @langchain/langgraph-cli@latest dev --no-browser --host 0.0.0.0 --port ${PORT:-8000} From 8df85fe3fa493d2861de7b22157e83eca04f393e Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 12:51:19 -0300 Subject: [PATCH 17/34] Change python version Signed-off-by: Luis Valdes --- .../langgraph/examples/python/.python-version | 2 +- .../langgraph/examples/python/render.yaml | 15 --------------- .../langgraph/examples/python/start.sh | 8 -------- 3 files changed, 1 insertion(+), 24 deletions(-) delete mode 100644 typescript-sdk/integrations/langgraph/examples/python/render.yaml delete mode 100644 typescript-sdk/integrations/langgraph/examples/python/start.sh diff --git a/typescript-sdk/integrations/langgraph/examples/python/.python-version b/typescript-sdk/integrations/langgraph/examples/python/.python-version index 82dc52c3c..be71774ed 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/.python-version +++ b/typescript-sdk/integrations/langgraph/examples/python/.python-version @@ -1 +1 @@ -3.13.4 \ No newline at end of file +3.12.9 \ No newline at end of file diff --git a/typescript-sdk/integrations/langgraph/examples/python/render.yaml b/typescript-sdk/integrations/langgraph/examples/python/render.yaml deleted file mode 100644 index 4b70541c4..000000000 --- a/typescript-sdk/integrations/langgraph/examples/python/render.yaml +++ /dev/null @@ -1,15 +0,0 @@ -services: - - type: web - name: langgraph-agui-dojo - runtime: python - plan: free - buildCommand: | - chmod +x start.sh - python -m pip install --upgrade pip - pip install poetry - startCommand: ./start.sh - envVars: - - key: PYTHON_VERSION - value: "3.13.4" - - key: VIRTUAL_ENV - value: "" diff --git a/typescript-sdk/integrations/langgraph/examples/python/start.sh b/typescript-sdk/integrations/langgraph/examples/python/start.sh deleted file mode 100644 index 00be27df5..000000000 --- a/typescript-sdk/integrations/langgraph/examples/python/start.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Handle virtual environment path mismatch by unsetting VIRTUAL_ENV -# This prevents the warning about path mismatch -unset VIRTUAL_ENV - -# Start the application -exec pnpx @langchain/langgraph-cli@latest dev --no-browser --host 0.0.0.0 --port ${PORT:-8000} From 017da0bba59667cf4e8cc47fd700a7e748573fca Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 13:26:03 -0300 Subject: [PATCH 18/34] Add missing tool Signed-off-by: Luis Valdes --- .../agents/tool_based_generative_ui/agent.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/tool_based_generative_ui/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/tool_based_generative_ui/agent.py index bf683ebdb..474a56a10 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/tool_based_generative_ui/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/tool_based_generative_ui/agent.py @@ -13,6 +13,42 @@ from langgraph.graph import MessagesState from langgraph.prebuilt import ToolNode +# This tool generates a haiku on the server. +# The tool call will be streamed to the frontend as it is being generated. +GENERATE_HAIKU_TOOL = { + "type": "function", + "function": { + "name": "generate_haiku", + "description": "Generate a haiku in Japanese and its English translation", + "parameters": { + "type": "object", + "properties": { + "japanese": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of three lines of the haiku in Japanese" + }, + "english": { + "type": "array", + "items": { + "type": "string" + }, + "description": "An array of three lines of the haiku in English" + }, + "image_names": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Names of 3 relevant images from the provided list" + } + }, + "required": ["japanese", "english", "image_names"] + } + } +} class AgentState(MessagesState): """ @@ -37,6 +73,7 @@ async def chat_node(state: AgentState, config: RunnableConfig) -> Command[Litera model_with_tools = model.bind_tools( [ *state.get("tools", []), # bind tools defined by ag-ui + GENERATE_HAIKU_TOOL, ], parallel_tool_calls=False, ) From 5474e4eba2fb725637917dbd89fe618aa20af832 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 13:51:55 -0300 Subject: [PATCH 19/34] Add tool to change background Signed-off-by: Luis Valdes --- .../examples/python/agents/agentic_chat/agent.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py index 695c1d485..c325d47bd 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py @@ -12,6 +12,7 @@ from langgraph.graph import StateGraph, END, START from langgraph.graph import MessagesState from langgraph.types import Command +from langchain_core.tools import tool class AgentState(MessagesState): """ @@ -19,6 +20,15 @@ class AgentState(MessagesState): """ tools: List[Any] = [] +@tool +def change_background(background: str) -> str: # pylint: disable=unused-argument + """ + Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc. + + Args: + background: str: The background color to change to. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc. + """ # pylint: disable=line-too-long + async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): """ Standard chat node based on the ReAct design pattern. It handles: @@ -42,6 +52,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): model_with_tools = model.bind_tools( [ *state.get("tools", []), + change_background, # your_tool_here ], From c32f8bd96cbe150f50f5d6163446a87d03129cb5 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 14:39:13 -0300 Subject: [PATCH 20/34] Modify testing Signed-off-by: Luis Valdes --- .../apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts | 10 ++++++++++ .../examples/python/agents/agentic_chat/agent.py | 10 +--------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts b/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts index f2d648a5e..06a69e777 100644 --- a/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts +++ b/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts @@ -73,7 +73,17 @@ export class ToolBaseGenUIPage { } async extractMainDisplayHaikuContent(page: Page): Promise { + // Wait for the main haiku display to be visible + await page.waitForSelector('[data-testid="main-haiku-display"]', { state: 'visible' }); + const mainDisplayLines = page.locator('[data-testid="main-haiku-line"]'); + + // Wait for at least 3 haiku lines to be present + await page.waitForFunction(() => { + const elements = document.querySelectorAll('[data-testid="main-haiku-line"]'); + return elements.length >= 3; + }); + const mainCount = await mainDisplayLines.count(); const lines: string[] = []; diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py index c325d47bd..2c3419477 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py @@ -14,20 +14,13 @@ from langgraph.types import Command from langchain_core.tools import tool + class AgentState(MessagesState): """ State of our graph. """ tools: List[Any] = [] -@tool -def change_background(background: str) -> str: # pylint: disable=unused-argument - """ - Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc. - - Args: - background: str: The background color to change to. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc. - """ # pylint: disable=line-too-long async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): """ @@ -52,7 +45,6 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): model_with_tools = model.bind_tools( [ *state.get("tools", []), - change_background, # your_tool_here ], From c6e3f34eb8fbef702ff10a8f97c828ae8c040bfb Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 15:05:26 -0300 Subject: [PATCH 21/34] Add tool Signed-off-by: Luis Valdes --- .../examples/python/agents/agentic_chat/agent.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py index 2c3419477..180167cb1 100644 --- a/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py +++ b/typescript-sdk/integrations/langgraph/examples/python/agents/agentic_chat/agent.py @@ -14,7 +14,6 @@ from langgraph.types import Command from langchain_core.tools import tool - class AgentState(MessagesState): """ State of our graph. @@ -22,6 +21,16 @@ class AgentState(MessagesState): tools: List[Any] = [] +@tool +def change_background(background: str) -> str: # pylint: disable=unused-argument + """ + Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc. + + Args: + background: str: The background color to change to. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc. + """ # pylint: disable=line-too-long + + async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): """ Standard chat node based on the ReAct design pattern. It handles: @@ -45,6 +54,7 @@ async def chat_node(state: AgentState, config: Optional[RunnableConfig] = None): model_with_tools = model.bind_tools( [ *state.get("tools", []), + change_background, # your_tool_here ], From 4df875439c153a422624f8bc8488e5b13da26349 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 15:19:19 -0300 Subject: [PATCH 22/34] Change logic to get the background Signed-off-by: Luis Valdes --- .../dojo/e2e/featurePages/AgenticChatPage.ts | 129 +++++++++++++++--- 1 file changed, 110 insertions(+), 19 deletions(-) diff --git a/typescript-sdk/apps/dojo/e2e/featurePages/AgenticChatPage.ts b/typescript-sdk/apps/dojo/e2e/featurePages/AgenticChatPage.ts index 3bb6d34d4..01c02cb73 100644 --- a/typescript-sdk/apps/dojo/e2e/featurePages/AgenticChatPage.ts +++ b/typescript-sdk/apps/dojo/e2e/featurePages/AgenticChatPage.ts @@ -57,33 +57,92 @@ export class AgenticChatPage { async getBackground( property: "backgroundColor" | "backgroundImage" = "backgroundColor" ): Promise { - // Wait a bit for background to apply - await this.page.waitForTimeout(500); + // Wait for React to render and apply styles + await this.page.waitForTimeout(2000); - // Try multiple selectors for the background element + // Wait for the main container with background style to be present + await this.page.waitForSelector('.flex.justify-center.items-center.h-full.w-full', { + state: 'visible', + timeout: 10000 + }); + + // Try to get the background from the main container + const mainContainer = this.page.locator('.flex.justify-center.items-center.h-full.w-full').first(); + + try { + const backgroundValue = await mainContainer.evaluate((el) => { + // Get the inline style background value + const inlineBackground = el.style.background; + if (inlineBackground && inlineBackground !== '--copilot-kit-background-color') { + return inlineBackground; + } + + // Get computed style + const computedStyle = getComputedStyle(el); + const computedBackground = computedStyle.background; + const computedBackgroundColor = computedStyle.backgroundColor; + + // Check if it's a CSS custom property + if (inlineBackground === '--copilot-kit-background-color') { + // Try to resolve the CSS custom property + const customPropValue = computedStyle.getPropertyValue('--copilot-kit-background-color'); + if (customPropValue) { + return customPropValue; + } + } + + // Return computed values + if (computedBackground && computedBackground !== 'rgba(0, 0, 0, 0)' && computedBackground !== 'transparent') { + return computedBackground; + } + + if (computedBackgroundColor && computedBackgroundColor !== 'rgba(0, 0, 0, 0)' && computedBackgroundColor !== 'transparent') { + return computedBackgroundColor; + } + + return computedBackground || computedBackgroundColor; + }); + + console.log(`Main container background: ${backgroundValue}`); + + if (backgroundValue && backgroundValue !== 'rgba(0, 0, 0, 0)' && backgroundValue !== 'transparent') { + return backgroundValue; + } + } catch (error) { + console.log('Error getting background from main container:', error); + } + + // Fallback: try other selectors const selectors = [ 'div[style*="background"]', 'div[style*="background-color"]', - '.flex.justify-center.items-center.h-full.w-full', - 'div.flex.justify-center.items-center.h-full.w-full', - '[class*="bg-"]', - 'div[class*="background"]' + '.copilotKitWindow', + 'body' ]; for (const selector of selectors) { try { const element = this.page.locator(selector).first(); - if (await element.isVisible({ timeout: 1000 })) { + console.log(`Checking fallback selector: ${selector}`); + + if (await element.isVisible({ timeout: 5000 })) { const value = await element.evaluate( (el, prop) => { - // Check inline style first - if (el.style.background) return el.style.background; - if (el.style.backgroundColor) return el.style.backgroundColor; + const computedStyle = getComputedStyle(el); + const inlineStyle = el.style[prop as any]; + + // Prefer inline style + if (inlineStyle && inlineStyle !== 'rgba(0, 0, 0, 0)' && inlineStyle !== 'transparent') { + return inlineStyle; + } + // Then computed style - return getComputedStyle(el)[prop as any]; + const computedValue = computedStyle[prop as any]; + return computedValue; }, property ); + if (value && value !== "rgba(0, 0, 0, 0)" && value !== "transparent") { console.log(`[${selector}] ${property}: ${value}`); return value; @@ -94,13 +153,45 @@ export class AgenticChatPage { } } - // Fallback to original element - const value = await this.chatBackground.first().evaluate( - (el, prop) => getComputedStyle(el)[prop as any], - property - ); - console.log(`[Fallback] ${property}: ${value}`); - return value; + // Final fallback + const fallbackValue = await this.page.evaluate((prop) => { + return getComputedStyle(document.body)[prop as any]; + }, property); + + console.log(`[Final Fallback] ${property}: ${fallbackValue}`); + return fallbackValue; + } + + async waitForBackgroundChange(expectedBackground?: string, timeout: number = 10000): Promise { + const startTime = Date.now(); + + while (Date.now() - startTime < timeout) { + try { + const currentBackground = await this.getBackground(); + + // If we're looking for a specific background + if (expectedBackground) { + if (currentBackground.includes(expectedBackground) || + currentBackground === expectedBackground) { + return; + } + } else { + // Just wait for any non-default background + if (currentBackground !== 'oklch(1 0 0)' && + currentBackground !== 'rgba(0, 0, 0, 0)' && + currentBackground !== 'transparent' && + !currentBackground.includes('--copilot-kit-background-color')) { + return; + } + } + + await this.page.waitForTimeout(500); + } catch (error) { + await this.page.waitForTimeout(500); + } + } + + throw new Error(`Background did not change to expected value within ${timeout}ms`); } async getGradientButtonByName(name: string | RegExp) { From 05aa7795bfc8df3d3dc09908cc82d9622f2d2ddc Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 15:36:11 -0300 Subject: [PATCH 23/34] Add condition to wait for haiku lines equals to 3 Signed-off-by: Luis Valdes --- .../e2e/featurePages/ToolBaseGenUIPage.ts | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts b/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts index 06a69e777..c0d86fb88 100644 --- a/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts +++ b/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts @@ -33,39 +33,51 @@ export class ToolBaseGenUIPage { } async extractChatHaikuContent(page: Page): Promise { - await page.waitForTimeout(3000); - await page.locator('[data-testid="haiku-card"]').first().waitFor({ state: 'visible' }); + // Wait for haiku cards to be visible + await page.waitForSelector('[data-testid="haiku-card"]', { state: 'visible' }); + const allHaikuCards = page.locator('[data-testid="haiku-card"]'); const cardCount = await allHaikuCards.count(); let chatHaikuContainer; let chatHaikuLines; + // Find the most recent haiku card with lines for (let cardIndex = cardCount - 1; cardIndex >= 0; cardIndex--) { chatHaikuContainer = allHaikuCards.nth(cardIndex); chatHaikuLines = chatHaikuContainer.locator('[data-testid="haiku-line"]'); - const linesCount = await chatHaikuLines.count(); - - if (linesCount > 0) { - try { - await chatHaikuLines.first().waitFor({ state: 'visible', timeout: 5000 }); - break; - } catch (error) { - continue; - } + + try { + // Wait for at least 3 haiku lines to be present in this card + await page.waitForFunction((cardIdx) => { + const cards = document.querySelectorAll('[data-testid="haiku-card"]'); + if (cards[cardIdx]) { + const lines = cards[cardIdx].querySelectorAll('[data-testid="haiku-line"]'); + return lines.length >= 3; + } + return false; + }, cardIndex, { timeout: 10000 }); + + // Verify the lines are visible + await chatHaikuLines.first().waitFor({ state: 'visible', timeout: 5000 }); + break; + } catch (error) { + continue; } } if (!chatHaikuLines) { - throw new Error('No haiku cards with visible lines found'); + throw new Error('No haiku cards with 3 visible lines found'); } const count = await chatHaikuLines.count(); const lines: string[] = []; - for (let i = 0; i < count; i++) { - const haikuLine = chatHaikuLines.nth(i); - const japaneseText = await haikuLine.locator('p').first().innerText(); - lines.push(japaneseText); + if (count > 0) { + for (let i = 0; i < count; i++) { + const haikuLine = chatHaikuLines.nth(i); + const japaneseText = await haikuLine.locator('p').first().innerText(); + lines.push(japaneseText); + } } const chatHaikuContent = lines.join('').replace(/\s/g, ''); From cd8eb55010c192b5c83da325a0f48f3d15610ca0 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 15:41:37 -0300 Subject: [PATCH 24/34] Disable ADK tests when running cloud agents Signed-off-by: Luis Valdes --- .../dojo/e2e/tests/adkMiddlewareTests/agenticChatPage.spec.ts | 2 ++ .../e2e/tests/adkMiddlewareTests/humanInTheLoopPage.spec.ts | 3 +++ .../tests/adkMiddlewareTests/predictiveStateUpdatePage.spec.ts | 3 +++ .../dojo/e2e/tests/adkMiddlewareTests/sharedStatePage.spec.ts | 3 +++ .../e2e/tests/adkMiddlewareTests/toolBasedGenUIPage.spec.ts | 3 +++ 5 files changed, 14 insertions(+) diff --git a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/agenticChatPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/agenticChatPage.spec.ts index 74521ef2b..5bb827697 100644 --- a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/agenticChatPage.spec.ts +++ b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/agenticChatPage.spec.ts @@ -6,6 +6,8 @@ import { } from "../../test-isolation-helper"; import { AgenticChatPage } from "../../featurePages/AgenticChatPage"; +// Skip all tests in this file when CLOUD_AGENTS is set +test.skip(!!process.env.CLOUD_AGENTS, 'Skipping ADK Middleware tests when CLOUD_AGENTS is set'); test.describe("Agentic Chat Feature", () => { test("[ADK Middleware] Agentic Chat sends and receives a message", async ({ diff --git a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/humanInTheLoopPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/humanInTheLoopPage.spec.ts index 87c5b29af..453b0a932 100644 --- a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/humanInTheLoopPage.spec.ts +++ b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/humanInTheLoopPage.spec.ts @@ -1,6 +1,9 @@ import { test, expect, waitForAIResponse, retryOnAIFailure } from "../../test-isolation-helper"; import { HumanInLoopPage } from "../../pages/adkMiddlewarePages/HumanInLoopPage"; +// Skip all tests in this file when CLOUD_AGENTS is set +test.skip(!!process.env.CLOUD_AGENTS, 'Skipping ADK Middleware tests when CLOUD_AGENTS is set'); + test.describe("Human in the Loop Feature", () => { test("[ADK Middleware] should interact with the chat and perform steps", async ({ page, diff --git a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/predictiveStateUpdatePage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/predictiveStateUpdatePage.spec.ts index 67bafee68..aed715db5 100644 --- a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/predictiveStateUpdatePage.spec.ts +++ b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/predictiveStateUpdatePage.spec.ts @@ -5,6 +5,9 @@ import { } from "../../test-isolation-helper"; import { PredictiveStateUpdatesPage } from "../../pages/adkMiddlewarePages/PredictiveStateUpdatesPage"; +// Skip all tests in this file when CLOUD_AGENTS is set +test.skip(!!process.env.CLOUD_AGENTS, 'Skipping ADK Middleware tests when CLOUD_AGENTS is set'); + test.describe("Predictive State Updates Feature", () => { test("[ADK Middleware] should interact with agent and approve asked changes", async ({ page, diff --git a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/sharedStatePage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/sharedStatePage.spec.ts index 4a6cd11b6..93c7a456e 100644 --- a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/sharedStatePage.spec.ts +++ b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/sharedStatePage.spec.ts @@ -1,6 +1,9 @@ import { test, expect } from "@playwright/test"; import { SharedStatePage } from "../../featurePages/SharedStatePage"; +// Skip all tests in this file when CLOUD_AGENTS is set +test.skip(!!process.env.CLOUD_AGENTS, 'Skipping ADK Middleware tests when CLOUD_AGENTS is set'); + test.describe("Shared State Feature", () => { test("[ADK Middleware] should interact with the chat to get a recipe on prompt", async ({ page, diff --git a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/toolBasedGenUIPage.spec.ts b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/toolBasedGenUIPage.spec.ts index 15d3c768b..57966a4ed 100644 --- a/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/toolBasedGenUIPage.spec.ts +++ b/typescript-sdk/apps/dojo/e2e/tests/adkMiddlewareTests/toolBasedGenUIPage.spec.ts @@ -1,6 +1,9 @@ import { test, expect } from "@playwright/test"; import { ToolBaseGenUIPage } from "../../featurePages/ToolBaseGenUIPage"; +// Skip all tests in this file when CLOUD_AGENTS is set +test.skip(!!process.env.CLOUD_AGENTS, 'Skipping ADK Middleware tests when CLOUD_AGENTS is set'); + const pageURL = "/adk-middleware/feature/tool_based_generative_ui"; test.describe("Tool Based Generative UI Feature", () => { From 14490ed132872ff2414083283004bbdd9923209c Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 16:28:24 -0300 Subject: [PATCH 25/34] Add logic to wait for main content to be edited Signed-off-by: Luis Valdes --- .../e2e/featurePages/ToolBaseGenUIPage.ts | 41 +++++++++++++++++-- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts b/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts index c0d86fb88..a1d8dfdb9 100644 --- a/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts +++ b/typescript-sdk/apps/dojo/e2e/featurePages/ToolBaseGenUIPage.ts @@ -112,9 +112,13 @@ export class ToolBaseGenUIPage { } async checkHaikuDisplay(page: Page): Promise { + // Wait for both chat and main display to be fully loaded + await page.waitForTimeout(3000); + const chatHaikuContent = await this.extractChatHaikuContent(page); - await page.waitForTimeout(5000); + // Wait a bit more for main display to sync + await page.waitForTimeout(2000); const mainHaikuContent = await this.extractMainDisplayHaikuContent(page); @@ -123,14 +127,43 @@ export class ToolBaseGenUIPage { return; } + // Check if contents match exactly if (chatHaikuContent === mainHaikuContent) { expect(mainHaikuContent).toBe(chatHaikuContent); + return; + } + + // If they don't match, check if one is a substring of the other (partial loading) + if (mainHaikuContent.includes(chatHaikuContent) || chatHaikuContent.includes(mainHaikuContent)) { + console.log(`Content partially matches - Chat: "${chatHaikuContent}", Main: "${mainHaikuContent}"`); + + // Wait for content to stabilize and try again + await page.waitForTimeout(5000); + + const finalChatContent = await this.extractChatHaikuContent(page); + const finalMainContent = await this.extractMainDisplayHaikuContent(page); + + // Use the longer content as the expected result (more complete) + const expectedContent = finalChatContent.length >= finalMainContent.length ? finalChatContent : finalMainContent; + + expect(finalMainContent).toBe(expectedContent); + expect(finalChatContent).toBe(expectedContent); } else { - await page.waitForTimeout(3000); + // Contents are completely different - this might indicate an error + console.log(`Content mismatch - Chat: "${chatHaikuContent}", Main: "${mainHaikuContent}"`); + + // Wait longer and try one more time + await page.waitForTimeout(5000); + + const retryMainContent = await this.extractMainDisplayHaikuContent(page); + const retryChatContent = await this.extractChatHaikuContent(page); - const updatedMainContent = await this.extractMainDisplayHaikuContent(page); + // At least verify both have content + expect(retryChatContent.length).toBeGreaterThan(0); + expect(retryMainContent.length).toBeGreaterThan(0); - expect(updatedMainContent).toBe(chatHaikuContent); + // Try to match again + expect(retryMainContent).toBe(retryChatContent); } } } \ No newline at end of file From d4902643751c4db8e65f496e7a9b90ec8d09ddc7 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 17:21:04 -0300 Subject: [PATCH 26/34] Add weatherTool Signed-off-by: Luis Valdes --- typescript-sdk/apps/dojo/src/mastra/index.ts | 80 +++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/typescript-sdk/apps/dojo/src/mastra/index.ts b/typescript-sdk/apps/dojo/src/mastra/index.ts index abc55418a..df718d80e 100644 --- a/typescript-sdk/apps/dojo/src/mastra/index.ts +++ b/typescript-sdk/apps/dojo/src/mastra/index.ts @@ -34,7 +34,7 @@ export const mastra = new Mastra({ Your primary function is to help users get weather details for specific locations. When responding: - Always ask for a location if none is provided - - If the location name isn’t in English, please translate it + - If the location name isn't in English, please translate it - If giving a location with multiple parts (e.g. "New York, NY"), use the most relevant part (e.g. "New York") - Include relevant details like humidity, wind conditions, and precipitation - Keep responses concise but informative @@ -53,6 +53,84 @@ export const mastra = new Mastra({ }, }, }), + tools: { + weatherTool: createTool({ + id: "weatherTool", + description: "Get current weather for a location", + inputSchema: z.object({ + location: z.string().describe("The location to get weather for"), + }), + outputSchema: z.string(), + execute: async ({ context }) => { + const { location } = context; + + try { + // Use OpenWeatherMap API or similar weather service + // For now, we'll use a free weather API (Open-Meteo) + const geocodeResponse = await fetch( + `https://geocoding-api.open-meteo.com/v1/search?name=${encodeURIComponent(location)}&count=1&language=en&format=json` + ); + + if (!geocodeResponse.ok) { + throw new Error(`Geocoding failed: ${geocodeResponse.status}`); + } + + const geocodeData = await geocodeResponse.json(); + + if (!geocodeData.results || geocodeData.results.length === 0) { + return `Sorry, I couldn't find weather data for "${location}". Please check the location name and try again.`; + } + + const { latitude, longitude, name, country } = geocodeData.results[0]; + + // Get weather data + const weatherResponse = await fetch( + `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}¤t=temperature_2m,relative_humidity_2m,wind_speed_10m,weather_code&timezone=auto` + ); + + if (!weatherResponse.ok) { + throw new Error(`Weather API failed: ${weatherResponse.status}`); + } + + const weatherData = await weatherResponse.json(); + + if (!weatherData.current) { + return `Sorry, I couldn't retrieve current weather data for "${location}". The weather service might be temporarily unavailable.`; + } + + const current = weatherData.current; + const temperature = current.temperature_2m; + const humidity = current.relative_humidity_2m; + const windSpeed = current.wind_speed_10m; + const weatherCode = current.weather_code; + + // Simple weather code mapping + const getWeatherCondition = (code: number): string => { + if (code === 0) return "Clear sky"; + if (code <= 3) return "Partly cloudy"; + if (code <= 48) return "Foggy"; + if (code <= 67) return "Rainy"; + if (code <= 77) return "Snowy"; + if (code <= 82) return "Rainy"; + if (code <= 86) return "Snowy"; + return "Stormy"; + }; + + const condition = getWeatherCondition(weatherCode); + + return `The current weather in ${name}, ${country} is as follows: +Temperature: ${temperature}°C +Humidity: ${humidity}% +Wind Speed: ${windSpeed} km/h +Conditions: ${condition}`; + + } catch (error) { + console.error("Weather tool error:", error); + return `I'm sorry, but I'm having trouble retrieving weather data for "${location}" at the moment. This could be due to a temporary service issue. Please try again later or check another weather source.`; + } + }, + }), + }, }), shared_state: new Agent({ name: "shared_state", From d63923e454f8f254c066f36b6672efa80a801bb7 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 17:53:27 -0300 Subject: [PATCH 27/34] Add logging Signed-off-by: Luis Valdes --- .../example/src/mastra/tools/weather-tool.ts | 67 +++++++++++++------ 1 file changed, 48 insertions(+), 19 deletions(-) diff --git a/typescript-sdk/integrations/mastra/example/src/mastra/tools/weather-tool.ts b/typescript-sdk/integrations/mastra/example/src/mastra/tools/weather-tool.ts index 5a0e9b16c..86685122e 100644 --- a/typescript-sdk/integrations/mastra/example/src/mastra/tools/weather-tool.ts +++ b/typescript-sdk/integrations/mastra/example/src/mastra/tools/weather-tool.ts @@ -41,30 +41,59 @@ export const weatherTool = createTool({ }); const getWeather = async (location: string) => { - const geocodingUrl = `https://geocoding-api.open-meteo.com/v1/search?name=${encodeURIComponent(location)}&count=1`; - const geocodingResponse = await fetch(geocodingUrl); - const geocodingData = (await geocodingResponse.json()) as GeocodingResponse; + try { + const geocodingUrl = `https://geocoding-api.open-meteo.com/v1/search?name=${encodeURIComponent(location)}&count=1&language=en&format=json`; + const geocodingResponse = await fetch(geocodingUrl); - if (!geocodingData.results?.[0]) { - throw new Error(`Location '${location}' not found`); - } + if (!geocodingResponse.ok) { + throw new Error(`Geocoding API failed with status: ${geocodingResponse.status}`); + } - const { latitude, longitude, name } = geocodingData.results[0]; + const geocodingData = (await geocodingResponse.json()) as GeocodingResponse; - const weatherUrl = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}¤t=temperature_2m,apparent_temperature,relative_humidity_2m,wind_speed_10m,wind_gusts_10m,weather_code`; + if (!geocodingData.results?.[0]) { + throw new Error(`Location '${location}' not found`); + } - const response = await fetch(weatherUrl); - const data = (await response.json()) as WeatherResponse; + const { latitude, longitude, name } = geocodingData.results[0]; - return { - temperature: data.current.temperature_2m, - feelsLike: data.current.apparent_temperature, - humidity: data.current.relative_humidity_2m, - windSpeed: data.current.wind_speed_10m, - windGust: data.current.wind_gusts_10m, - conditions: getWeatherCondition(data.current.weather_code), - location: name, - }; + const weatherUrl = `https://api.open-meteo.com/v1/forecast?latitude=${latitude}&longitude=${longitude}¤t=temperature_2m,apparent_temperature,relative_humidity_2m,wind_speed_10m,wind_gusts_10m,weather_code&timezone=auto`; + + const response = await fetch(weatherUrl); + + if (!response.ok) { + throw new Error(`Weather API failed with status: ${response.status}`); + } + + const data = (await response.json()) as WeatherResponse; + + // Add validation to check if the response has the expected structure + if (!data || !data.current) { + console.error('Invalid weather API response:', JSON.stringify(data, null, 2)); + throw new Error(`Invalid weather data received for location '${location}'`); + } + + const current = data.current; + + // Validate that all required fields are present + if (current.temperature_2m === undefined || current.temperature_2m === null) { + console.error('Missing temperature data in response:', JSON.stringify(current, null, 2)); + throw new Error(`Temperature data not available for location '${location}'`); + } + + return { + temperature: current.temperature_2m, + feelsLike: current.apparent_temperature ?? current.temperature_2m, + humidity: current.relative_humidity_2m ?? 0, + windSpeed: current.wind_speed_10m ?? 0, + windGust: current.wind_gusts_10m ?? 0, + conditions: getWeatherCondition(current.weather_code ?? 0), + location: name, + }; + } catch (error) { + console.error(`Weather tool error for location '${location}':`, error); + throw new Error(`Failed to get weather data for '${location}': ${error instanceof Error ? error.message : 'Unknown error'}`); + } }; function getWeatherCondition(code: number): string { From d5f1bce0dae7a6433ce21b2380992a8ec3991c25 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 18:29:38 -0300 Subject: [PATCH 28/34] chore: Install only chromium Signed-off-by: Luis Valdes --- typescript-sdk/apps/dojo/e2e/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/typescript-sdk/apps/dojo/e2e/package.json b/typescript-sdk/apps/dojo/e2e/package.json index 8ea54b975..ef79527c0 100644 --- a/typescript-sdk/apps/dojo/e2e/package.json +++ b/typescript-sdk/apps/dojo/e2e/package.json @@ -4,7 +4,7 @@ "private": true, "description": "Scheduled Playwright smoke tests for CopilotKit demo apps", "scripts": { - "postinstall": "playwright install --with-deps", + "postinstall": "playwright install --with-deps chromium", "test": "playwright test", "test:ui": "playwright test --ui", "report": "playwright show-report" From c07b23a6c4c751463b9344d1d3f19ff4c1f8b58a Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 20:01:30 -0300 Subject: [PATCH 29/34] fix: Add state Signed-off-by: Luis Valdes --- .../pydantic-ai/examples/server/api/agentic_chat.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py index c91a84ad2..b0fbddc19 100644 --- a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py +++ b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py @@ -2,12 +2,21 @@ from __future__ import annotations +from dataclasses import dataclass from datetime import datetime +from typing import Any from zoneinfo import ZoneInfo from pydantic_ai import Agent -agent = Agent('openai:gpt-4o-mini') + +@dataclass +class ChatState: + """State handler for the agentic chat agent.""" + state: dict[str, Any] + + +agent = Agent('openai:gpt-4o-mini', deps_type=ChatState) app = agent.to_ag_ui() From fd8be8cbd4cae826d5b1d7dcd3a44e577e8c6e5e Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 20:06:28 -0300 Subject: [PATCH 30/34] fix: Add state Signed-off-by: Luis Valdes --- .../pydantic-ai/examples/server/api/agentic_chat.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py index b0fbddc19..697e4d4ae 100644 --- a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py +++ b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py @@ -16,8 +16,15 @@ class ChatState: state: dict[str, Any] +# Create agent with proper dependency type agent = Agent('openai:gpt-4o-mini', deps_type=ChatState) -app = agent.to_ag_ui() + +# Create AG-UI app with default state +def create_deps() -> ChatState: + """Create default dependencies for the agent.""" + return ChatState(state={}) + +app = agent.to_ag_ui(deps=create_deps) @agent.tool_plain From 019563d6c018bfa0639f27ca38ebb005d52ef9f9 Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 20:31:07 -0300 Subject: [PATCH 31/34] Add deps Signed-off-by: Luis Valdes --- .../examples/server/api/agentic_chat.py | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py index 697e4d4ae..04e755614 100644 --- a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py +++ b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py @@ -7,24 +7,26 @@ from typing import Any from zoneinfo import ZoneInfo -from pydantic_ai import Agent +from pydantic_ai import Agent, RunDeps @dataclass -class ChatState: - """State handler for the agentic chat agent.""" - state: dict[str, Any] +class State: + pass -# Create agent with proper dependency type -agent = Agent('openai:gpt-4o-mini', deps_type=ChatState) +@dataclass +class Deps(RunDeps): + """Dependencies that implement StateHandler protocol.""" + state: State -# Create AG-UI app with default state -def create_deps() -> ChatState: - """Create default dependencies for the agent.""" - return ChatState(state={}) -app = agent.to_ag_ui(deps=create_deps) +# Create agent with proper dependency type +agent = Agent[str, Deps]('openai:gpt-4o-mini', deps_type=Deps) + +# Create AG-UI app with proper dataclass instance +deps_instance = Deps(state=State()) +app = agent.to_ag_ui(deps=deps_instance) @agent.tool_plain From baf5ab66a5ec0aedcb87aa51cc4b7324822ec30a Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 20:36:08 -0300 Subject: [PATCH 32/34] fix: Remove import Signed-off-by: Luis Valdes --- .../pydantic-ai/examples/server/api/agentic_chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py index 04e755614..70e999c4f 100644 --- a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py +++ b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py @@ -7,7 +7,7 @@ from typing import Any from zoneinfo import ZoneInfo -from pydantic_ai import Agent, RunDeps +from pydantic_ai import Agent @dataclass @@ -16,7 +16,7 @@ class State: @dataclass -class Deps(RunDeps): +class Deps """Dependencies that implement StateHandler protocol.""" state: State From ad03b68e943ef3b6959bd24725034229c1559f1a Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Thu, 18 Sep 2025 20:42:44 -0300 Subject: [PATCH 33/34] Fix Signed-off-by: Luis Valdes --- .../pydantic-ai/examples/server/api/agentic_chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py index 70e999c4f..c2dedefc1 100644 --- a/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py +++ b/typescript-sdk/integrations/pydantic-ai/examples/server/api/agentic_chat.py @@ -16,7 +16,7 @@ class State: @dataclass -class Deps +class Deps: """Dependencies that implement StateHandler protocol.""" state: State From feba8aeaa509d50d8267b8d9b284d1148b0faaca Mon Sep 17 00:00:00 2001 From: Luis Valdes Date: Fri, 19 Sep 2025 12:42:45 -0300 Subject: [PATCH 34/34] chore: Add json file Signed-off-by: Luis Valdes --- typescript-sdk/apps/dojo/src/files.json | 42 ++++++++++++------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/typescript-sdk/apps/dojo/src/files.json b/typescript-sdk/apps/dojo/src/files.json index 358055a4f..7ae4fe3d0 100644 --- a/typescript-sdk/apps/dojo/src/files.json +++ b/typescript-sdk/apps/dojo/src/files.json @@ -46,7 +46,7 @@ }, { "name": "agentic_chat.py", - "content": "\"\"\"Agentic Chat feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom datetime import datetime\nfrom zoneinfo import ZoneInfo\n\nfrom pydantic_ai import Agent\n\nagent = Agent('openai:gpt-4o-mini')\napp = agent.to_ag_ui()\n\n\n@agent.tool_plain\nasync def current_time(timezone: str = 'UTC') -> str:\n \"\"\"Get the current time in ISO format.\n\n Args:\n timezone: The timezone to use.\n\n Returns:\n The current time in ISO format string.\n \"\"\"\n tz: ZoneInfo = ZoneInfo(timezone)\n return datetime.now(tz=tz).isoformat()\n", + "content": "\"\"\"Agentic Chat feature.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Any\nfrom zoneinfo import ZoneInfo\n\nfrom pydantic_ai import Agent\n\n\n@dataclass\nclass State:\n pass\n\n\n@dataclass\nclass Deps:\n \"\"\"Dependencies that implement StateHandler protocol.\"\"\"\n state: State\n\n\n# Create agent with proper dependency type\nagent = Agent[str, Deps]('openai:gpt-4o-mini', deps_type=Deps)\n\n# Create AG-UI app with proper dataclass instance\ndeps_instance = Deps(state=State())\napp = agent.to_ag_ui(deps=deps_instance)\n\n\n@agent.tool_plain\nasync def current_time(timezone: str = 'UTC') -> str:\n \"\"\"Get the current time in ISO format.\n\n Args:\n timezone: The timezone to use.\n\n Returns:\n The current time in ISO format string.\n \"\"\"\n tz: ZoneInfo = ZoneInfo(timezone)\n return datetime.now(tz=tz).isoformat()\n", "language": "python", "type": "file" } @@ -488,7 +488,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langchain_core.tools import tool\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\n\n@tool\ndef change_background(background: str) -> str: # pylint: disable=unused-argument\n \"\"\"\n Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\n\n Args:\n background: str: The background color to change to. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\n \"\"\" # pylint: disable=line-too-long\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n change_background,\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -520,7 +520,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -552,7 +552,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -584,7 +584,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", "language": "python", "type": "file" }, @@ -616,7 +616,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -648,7 +648,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n# This tool generates a haiku on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nGENERATE_HAIKU_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"description\": \"Generate a haiku in Japanese and its English translation\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"japanese\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in Japanese\"\n },\n \"english\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in English\"\n },\n \"image_names\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Names of 3 relevant images from the provided list\"\n }\n },\n \"required\": [\"japanese\", \"english\", \"image_names\"]\n }\n }\n}\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n GENERATE_HAIKU_TOOL,\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -680,7 +680,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", "language": "python", "type": "file" }, @@ -739,7 +739,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langchain_core.tools import tool\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\n\n@tool\ndef change_background(background: str) -> str: # pylint: disable=unused-argument\n \"\"\"\n Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\n\n Args:\n background: str: The background color to change to. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\n \"\"\" # pylint: disable=line-too-long\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n change_background,\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -765,7 +765,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -791,7 +791,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -817,7 +817,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", "language": "python", "type": "file" } @@ -843,7 +843,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -869,7 +869,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n# This tool generates a haiku on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nGENERATE_HAIKU_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"description\": \"Generate a haiku in Japanese and its English translation\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"japanese\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in Japanese\"\n },\n \"english\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in English\"\n },\n \"image_names\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Names of 3 relevant images from the provided list\"\n }\n },\n \"required\": [\"japanese\", \"english\", \"image_names\"]\n }\n }\n}\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n GENERATE_HAIKU_TOOL,\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" } @@ -895,7 +895,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()", "language": "python", "type": "file" } @@ -947,7 +947,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\n# Updated imports for LangGraph\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langchain_core.tools import tool\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any] = []\n\n\n@tool\ndef change_background(background: str) -> str: # pylint: disable=unused-argument\n \"\"\"\n Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\n\n Args:\n background: str: The background color to change to. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\n \"\"\" # pylint: disable=line-too-long\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see: \n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n # 1. Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n change_background,\n # your_tool_here\n ],\n\n # 2.1 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -979,7 +979,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating agentic generative UI using LangGraph.\n\"\"\"\n\nimport asyncio\nfrom typing import List, Any, Optional, Annotated\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in gerund form\")\n status: str = Field(description=\"The status of the step, always 'pending'\")\n\n\n\n# This tool simulates performing a task on the server.\n# The tool call will be streamed to the frontend as it is being generated.\n@tool\ndef generate_task_steps_generative_ui(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in gerund form (i.e. Digging hole, opening door, ...).\n \"\"\"\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[dict] = []\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant assisting with any task. \n When asked to do something, you MUST call the function `generate_task_steps_generative_ui`\n that was provided to you.\n If you called the function, you MUST NOT repeat the steps in your next response to the user.\n Just give a very brief summary (one sentence) of what you did with some emojis. \n Always say you actually did the steps, not merely generated them.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"generate_task_steps_generative_ui\",\n \"tool_argument\": \"steps\",\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n generate_task_steps_generative_ui\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"generate_task_steps_generative_ui\":\n steps = [\n {\"description\": step[\"description\"], \"status\": step[\"status\"]}\n for step in tool_call[\"args\"][\"steps\"]\n ]\n\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Steps executed.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n state[\"steps\"] = steps\n\n # Return Command to route to simulate_task_node\n for i, _ in enumerate(steps):\n # simulate executing the step\n await asyncio.sleep(1)\n steps[i][\"status\"] = \"completed\"\n # Update the state with the completed step using config\n await adispatch_custom_event(\n \"manually_emit_state\",\n state,\n config=config,\n )\n\n return Command(\n goto='start_node',\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -1011,7 +1011,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA LangGraph implementation of the human-in-the-loop agent.\n\"\"\"\n\nfrom typing import Dict, List, Any, Annotated, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command, interrupt\nfrom langgraph.graph import MessagesState\nfrom langchain_openai import ChatOpenAI\nfrom pydantic import BaseModel, Field\n\nclass Step(BaseModel):\n \"\"\"\n A step in a task.\n \"\"\"\n description: str = Field(description=\"The text of the step in imperative form\")\n status: str = Field(description=\"The status of the step, always 'enabled'\")\n\n@tool\ndef plan_execution_steps(\n steps: Annotated[ # pylint: disable=unused-argument\n List[Step],\n \"An array of 10 step objects, each containing text and status\"\n ]\n):\n \"\"\"\n Make up 10 steps (only a couple of words per step) that are required for a task.\n The step should be in imperative form (i.e. Dig hole, Open door, ...).\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n steps: List[Dict[str, str]] = []\n tools: List[Any] = []\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize steps list if not exists\n if \"steps\" not in state:\n state[\"steps\"] = []\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n # Return command to route to chat_node\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"steps\": state[\"steps\"],\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node where the agent processes messages and generates responses.\n If task steps are defined, the user can enable/disable them using interrupts.\n \"\"\"\n system_prompt = \"\"\"\n You are a helpful assistant that can perform any task.\n You MUST call the `plan_execution_steps` function when the user asks you to perform a task.\n Always make sure you will provide tasks based on the user query\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"steps\",\n \"tool\": \"plan_execution_steps\",\n \"tool_argument\": \"steps\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n plan_execution_steps\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls and len(response.tool_calls) > 0:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n if tool_call[\"name\"] == \"plan_execution_steps\":\n # Get the steps from the tool call\n steps_raw = tool_call[\"args\"][\"steps\"]\n\n # Set initial status to \"enabled\" for all steps\n steps_data = []\n\n # Handle different potential formats of steps data\n if isinstance(steps_raw, list):\n for step in steps_raw:\n if isinstance(step, dict) and \"description\" in step:\n steps_data.append({\n \"description\": step[\"description\"],\n \"status\": \"enabled\"\n })\n elif isinstance(step, str):\n steps_data.append({\n \"description\": step,\n \"status\": \"enabled\"\n })\n\n # If no steps were processed correctly, return to END with the updated messages\n if not steps_data:\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n # Update steps in state and emit to frontend\n state[\"steps\"] = steps_data\n\n # Add a tool response to satisfy OpenAI's requirements\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Task steps generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Move to the process_steps_node which will handle the interrupt and final response\n return Command(\n goto=\"process_steps_node\",\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n # If no tool calls or not plan_execution_steps, return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\nasync def process_steps_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This node handles the user interrupt for step customization and generates the final response.\n \"\"\"\n\n # Check if we already have a user_response in the state\n # This happens when the node restarts after an interrupt\n if \"user_response\" in state and state[\"user_response\"]:\n user_response = state[\"user_response\"]\n else:\n # Use LangGraph interrupt to get user input on steps\n # This will pause execution and wait for user input in the frontend\n user_response = interrupt({\"steps\": state[\"steps\"]})\n # Store the user response in state for when the node restarts\n state[\"user_response\"] = user_response\n\n # Generate the creative completion response\n final_prompt = \"\"\"\n Provide a textual description of how you are performing the task.\n If the user has disabled a step, you are not allowed to perform that step.\n However, you should find a creative workaround to perform the task, and if an essential step is disabled, you can even use\n some humor in the description of how you are performing the task.\n Don't just repeat a list of steps, come up with a creative but short description (3 sentences max) of how you are performing the task.\n \"\"\"\n\n final_response = await ChatOpenAI(model=\"gpt-4o\").ainvoke([\n SystemMessage(content=final_prompt),\n {\"role\": \"user\", \"content\": user_response}\n ], config)\n\n # Add the final response to messages\n messages = state[\"messages\"] + [final_response]\n\n # Clear the user_response from state to prepare for future interactions\n if \"user_response\" in state:\n state.pop(\"user_response\")\n\n # Return to END with the updated messages\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"steps\": state[\"steps\"],\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\n\n# Add nodes\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.add_node(\"process_steps_node\", process_steps_node)\n\n# Add edges\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"process_steps_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -1043,7 +1043,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", + "content": "\"\"\"\nA demo of predictive state updates using LangGraph.\n\"\"\"\n\nimport uuid\nfrom typing import List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\nfrom langchain_openai import ChatOpenAI\n\n@tool\ndef write_document_local(document: str): # pylint: disable=unused-argument\n \"\"\"\n Write a document. Use markdown formatting to format the document.\n It's good to format the document extensively so it's easy to read.\n You can use all kinds of markdown.\n However, do not use italic or strike-through formatting, it's reserved for another purpose.\n You MUST write the full document, even when changing only a few words.\n When making edits to the document, try to make them minimal - do not change every word.\n Keep stories SHORT!\n \"\"\"\n return document\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the agent.\n \"\"\"\n document: Optional[str] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: AgentState, config: RunnableConfig): # pylint: disable=unused-argument\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n if \"tools\" not in state:\n state[\"tools\"] = []\n\n return Command(\n goto=\"chat_node\",\n update={\n \"tools\": state[\"tools\"]\n }\n )\n\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node.\n \"\"\"\n\n system_prompt = f\"\"\"\n You are a helpful assistant for writing documents.\n To write the document, you MUST use the write_document_local tool.\n You MUST write the full document, even when changing only a few words.\n When you wrote the document, DO NOT repeat it as a message.\n Just briefly summarize the changes you made. 2 sentences max.\n This is the current state of the document: ----\\n {state.get('document')}\\n-----\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o\")\n\n # Define config for the model with emit_intermediate_state to stream tool calls to frontend\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document_local tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"document\",\n \"tool\": \"write_document_local\",\n \"tool_argument\": \"document\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n write_document_local\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model to generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Extract any tool calls from the response\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n tool_call = response.tool_calls[0]\n\n # Handle tool_call as a dictionary or an object\n if isinstance(tool_call, dict):\n tool_call_id = tool_call[\"id\"]\n tool_call_name = tool_call[\"name\"]\n tool_call_args = tool_call[\"args\"]\n else:\n # Handle as an object (backward compatibility)\n tool_call_id = tool_call.id\n tool_call_name = tool_call.name\n tool_call_args = tool_call.args\n\n if tool_call_name == \"write_document_local\":\n # Add the tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Document written.\",\n \"tool_call_id\": tool_call_id\n }\n\n # Add confirmation tool call\n confirm_tool_call = {\n \"role\": \"assistant\",\n \"content\": \"\",\n \"tool_calls\": [{\n \"id\": str(uuid.uuid4()),\n \"function\": {\n \"name\": \"confirm_changes\",\n \"arguments\": \"{}\"\n }\n }]\n }\n\n messages = messages + [tool_response, confirm_tool_call]\n\n # Return Command to route to end\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"document\": tool_call_args[\"document\"]\n }\n )\n\n # If no tool was called, go to end\n return Command(\n goto=END,\n update={\n \"messages\": messages\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n\n", "language": "python", "type": "file" }, @@ -1075,7 +1075,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nA demo of shared state between the agent and CopilotKit using LangGraph.\n\"\"\"\n\nimport json\nfrom enum import Enum\nfrom typing import Dict, List, Any, Optional\nimport os\n\n# LangGraph imports\nfrom pydantic import BaseModel, Field\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.callbacks.manager import adispatch_custom_event\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.tools import tool\nfrom langchain_openai import ChatOpenAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass SkillLevel(str, Enum):\n \"\"\"\n The level of skill required for the recipe.\n \"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\nclass SpecialPreferences(str, Enum):\n \"\"\"\n Special preferences for the recipe.\n \"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\nclass CookingTime(str, Enum):\n \"\"\"\n The cooking time of the recipe.\n \"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\nclass Ingredient(BaseModel):\n \"\"\"\n An ingredient.\n \"\"\"\n icon: str = Field(\n description=\"Icon: the actual emoji like 🥕\"\n )\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\nclass Recipe(BaseModel):\n \"\"\"\n A recipe.\n \"\"\"\n skill_level: SkillLevel = \\\n Field(description=\"The skill level required for the recipe\")\n special_preferences: List[SpecialPreferences] = \\\n Field(description=\"A list of special preferences for the recipe\")\n cooking_time: CookingTime = \\\n Field(description=\"The cooking time of the recipe\")\n ingredients: List[Ingredient] = \\\n Field(description=\n \"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon: the actual emoji like 🥕,\n name and amount.\n Like so: 🥕 Carrots (250g)\"\"\"\n )\n instructions: List[str] = \\\n Field(description=\n \"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = \\\n Field(description=\"A description of the changes made to the recipe\")\n\nclass GenerateRecipeArgs(BaseModel): # pylint: disable=missing-class-docstring\n recipe: Recipe\n\n@tool(args_schema=GenerateRecipeArgs)\ndef generate_recipe(recipe: Recipe): # pylint: disable=unused-argument\n \"\"\"\n Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \"\"\"\n\nclass AgentState(MessagesState):\n \"\"\"\n The state of the recipe.\n \"\"\"\n recipe: Optional[Dict[str, Any]] = None\n tools: List[Any] = []\n\n\nasync def start_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n This is the entry point for the flow.\n \"\"\"\n\n # Initialize recipe if not exists\n if \"recipe\" not in state or state[\"recipe\"] is None:\n state[\"recipe\"] = {\n \"skill_level\": SkillLevel.BEGINNER.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FIFTEEN_MIN.value,\n \"ingredients\": [{\"icon\": \"🍴\", \"name\": \"Sample Ingredient\", \"amount\": \"1 unit\"}],\n \"instructions\": [\"First step instruction\"]\n }\n if \"tools\" not in state:\n state[\"tools\"] = []\n # Emit the initial state to ensure it's properly shared with the frontend\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n return Command(\n goto=\"chat_node\",\n update={\n \"messages\": state[\"messages\"],\n \"recipe\": state[\"recipe\"],\n \"tools\": state[\"tools\"]\n }\n )\n\nasync def chat_node(state: Dict[str, Any], config: RunnableConfig):\n \"\"\"\n Standard chat node.\n \"\"\"\n # Create a safer serialization of the recipe\n recipe_json = \"No recipe yet\"\n if \"recipe\" in state and state[\"recipe\"] is not None:\n try:\n recipe_json = json.dumps(state[\"recipe\"], indent=2)\n except Exception as e: # pylint: disable=broad-exception-caught\n recipe_json = f\"Error serializing recipe: {str(e)}\"\n\n system_prompt = f\"\"\"You are a helpful assistant for creating recipes. \n This is the current state of the recipe: {recipe_json}\n You can improve the recipe by calling the generate_recipe tool.\n \n IMPORTANT:\n 1. Create a recipe using the existing ingredients and instructions. Make sure the recipe is complete.\n 2. For ingredients, append new ingredients to the existing ones.\n 3. For instructions, append new steps to the existing ones.\n 4. 'ingredients' is always an array of objects with 'icon', 'name', and 'amount' fields\n 5. 'instructions' is always an array of strings\n\n If you have just created or modified the recipe, just answer in one sentence what you did. dont describe the recipe, just say what you did.\n \"\"\"\n\n # Define the model\n model = ChatOpenAI(model=\"gpt-4o-mini\")\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # Use \"predict_state\" metadata to set up streaming for the write_document tool\n config[\"metadata\"][\"predict_state\"] = [{\n \"state_key\": \"recipe\",\n \"tool\": \"generate_recipe\",\n \"tool_argument\": \"recipe\"\n }]\n\n # Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []),\n generate_recipe\n ],\n # Disable parallel tool calls to avoid race conditions\n parallel_tool_calls=False,\n )\n\n # Run the model and generate a response\n response = await model_with_tools.ainvoke([\n SystemMessage(content=system_prompt),\n *state[\"messages\"],\n ], config)\n\n # Update messages with the response\n messages = state[\"messages\"] + [response]\n\n # Handle tool calls\n if hasattr(response, \"tool_calls\") and response.tool_calls:\n # Handle dicts or object (backward compatibility)\n tool_call = (response.tool_calls[0]\n if isinstance(response.tool_calls[0], dict)\n else vars(response.tool_calls[0]))\n\n # Check if args is already a dict or needs to be parsed\n tool_call_args = (tool_call[\"args\"]\n if isinstance(tool_call[\"args\"], dict)\n else json.loads(tool_call[\"args\"]))\n\n if tool_call[\"name\"] == \"generate_recipe\":\n # Update recipe state with tool_call_args\n recipe_data = tool_call_args[\"recipe\"]\n\n # If we have an existing recipe, update it\n if \"recipe\" in state and state[\"recipe\"] is not None:\n recipe = state[\"recipe\"]\n for key, value in recipe_data.items():\n if value is not None: # Only update fields that were provided\n recipe[key] = value\n else:\n # Create a new recipe\n recipe = {\n \"skill_level\": recipe_data.get(\"skill_level\", SkillLevel.BEGINNER.value),\n \"special_preferences\": recipe_data.get(\"special_preferences\", []),\n \"cooking_time\": recipe_data.get(\"cooking_time\", CookingTime.FIFTEEN_MIN.value),\n \"ingredients\": recipe_data.get(\"ingredients\", []),\n \"instructions\": recipe_data.get(\"instructions\", [])\n }\n\n # Add tool response to messages\n tool_response = {\n \"role\": \"tool\",\n \"content\": \"Recipe generated.\",\n \"tool_call_id\": tool_call[\"id\"]\n }\n\n messages = messages + [tool_response]\n\n # Explicitly emit the updated state to ensure it's shared with frontend\n state[\"recipe\"] = recipe\n await adispatch_custom_event(\n \"manually_emit_intermediate_state\",\n state,\n config=config,\n )\n\n # Return command with updated recipe\n return Command(\n goto=\"start_node\",\n update={\n \"messages\": messages,\n \"recipe\": recipe\n }\n )\n\n return Command(\n goto=END,\n update={\n \"messages\": messages,\n \"recipe\": state[\"recipe\"]\n }\n )\n\n\n# Define the graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"start_node\", start_node)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"start_node\")\nworkflow.add_edge(START, \"start_node\")\nworkflow.add_edge(\"start_node\", \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" }, @@ -1107,7 +1107,7 @@ }, { "name": "agent.py", - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n# This tool generates a haiku on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nGENERATE_HAIKU_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"description\": \"Generate a haiku in Japanese and its English translation\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"japanese\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in Japanese\"\n },\n \"english\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in English\"\n },\n \"image_names\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Names of 3 relevant images from the provided list\"\n }\n },\n \"required\": [\"japanese\", \"english\", \"image_names\"]\n }\n }\n}\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any] = []\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n GENERATE_HAIKU_TOOL,\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", "language": "python", "type": "file" },