diff --git a/apps/dojo/scripts/generate-content-json.ts b/apps/dojo/scripts/generate-content-json.ts index 03872e9a6..f7b71d524 100644 --- a/apps/dojo/scripts/generate-content-json.ts +++ b/apps/dojo/scripts/generate-content-json.ts @@ -374,6 +374,21 @@ const agentFilesMapper: Record< {}, ); }, + "aws-strands": (agentKeys: string[]) => { + return agentKeys.reduce( + (acc, agentId) => ({ + ...acc, + [agentId]: [ + path.join( + __dirname, + integrationsFolderPath, + `/aws-strands/python/examples/server/api/${agentId}.py`, + ) + ], + }), + {}, + ); + }, "microsoft-agent-framework-python": (agentKeys: string[]) => { return agentKeys.reduce( (acc, agentId) => ({ @@ -425,6 +440,8 @@ async function runGenerateContent() { const agentFilePaths = agentFilesMapper[agentConfig.id]?.( agentConfig.agentKeys, ); + + console.log(agentConfig.id, agentFilePaths); if (!agentFilePaths) { continue; } diff --git a/apps/dojo/src/files.json b/apps/dojo/src/files.json index 946544043..64bf18b19 100644 --- a/apps/dojo/src/files.json +++ b/apps/dojo/src/files.json @@ -2422,5 +2422,135 @@ "language": "csharp", "type": "file" } + ], + "aws-strands::agentic_chat": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React, { useState } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport {\n CopilotKit,\n useFrontendTool,\n} from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n const [background, setBackground] = useState(\"--copilot-kit-background-color\");\n\n useFrontendTool({\n name: \"change_background\",\n description:\n \"Change the background color of the chat. Can be anything that the CSS background attribute accepts. Regular colors, linear of radial gradients etc.\",\n parameters: [\n {\n name: \"background\",\n type: \"string\",\n description: \"The background. Prefer gradients. Only use when asked.\",\n },\n ],\n handler: ({ background }) => {\n setBackground(background);\n return {\n status: \"success\",\n message: `Background changed to ${background}`,\n };\n },\n });\n\n return (\n \n
\n \n
\n \n );\n};\n\nexport default AgenticChat;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n \n.copilotKitChat {\n background-color: #fff !important;\n}\n ", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agentic_chat.py", + "content": "\"\"\"Agentic Chat example for AWS Strands.\n\nSimple conversational agent with change_background frontend tool.\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom dotenv import load_dotenv\n\n# Suppress OpenTelemetry context warnings\nos.environ[\"OTEL_SDK_DISABLED\"] = \"true\"\nos.environ[\"OTEL_PYTHON_DISABLED_INSTRUMENTATIONS\"] = \"all\"\n\nfrom strands import Agent, tool\nfrom strands.models.gemini import GeminiModel\nfrom ag_ui_strands import StrandsAgent, create_strands_app\n\n# Load environment variables from .env file\nenv_path = Path(__file__).parent.parent.parent / '.env'\n\nload_dotenv(dotenv_path=env_path)\n\n# Debug: Print API key status (first 10 chars only for security)\napi_key = os.getenv(\"GOOGLE_API_KEY\", \"\")\n\n# Use Gemini model\nmodel = GeminiModel(\n client_args={\n \"api_key\": os.getenv(\"GOOGLE_API_KEY\", \"your-api-key-here\"),\n },\n model_id=\"gemini-2.5-flash\",\n params={\n \"temperature\": 0.7,\n \"max_output_tokens\": 2048,\n \"top_p\": 0.9,\n \"top_k\": 40\n }\n)\n\n\n# Define frontend tool - registered so LLM knows about it, but returns None\n# The actual execution happens on the frontend\n@tool\ndef change_background(background: str):\n \"\"\"\n Change the background color of the chat. Can be anything that the CSS background\n attribute accepts. Regular colors, linear or radial gradients etc.\n\n Args:\n background: The background color or gradient. Prefer gradients. Only use when asked.\n \"\"\"\n # Return None - frontend will handle the actual execution\n return None\n\nstrands_agent = Agent(\n model=model,\n tools=[change_background], # Register so LLM knows about it\n system_prompt=\"\"\"\n You are a helpful assistant.\n When the user greets you, always greet them back. Your greeting should always start with \"Hello\".\n Your greeting should also always ask (exact wording) \"how can I assist you?\"\n \"\"\",\n)\n\nagui_agent = StrandsAgent(\n agent=strands_agent,\n name=\"agentic_chat\",\n description=\"Conversational Strands agent with AG-UI streaming\",\n)\n\napp = create_strands_app(agui_agent, \"/\")\n", + "language": "python", + "type": "file" + } + ], + "aws-strands::backend_tool_rendering": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCopilotAction } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\n\ninterface AgenticChatProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticChat: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\nconst Chat = () => {\n useCopilotAction({\n name: \"get_weather\",\n available: \"disabled\",\n parameters: [{ name: \"location\", type: \"string\", required: true }],\n render: ({ args, result, status }) => {\n if (status !== \"complete\") {\n return (\n
\n ⚙️ Retrieving weather...\n
\n );\n }\n\n const weatherResult: WeatherToolResult = {\n temperature: result?.temperature || 0,\n conditions: result?.conditions || \"clear\",\n humidity: result?.humidity || 0,\n windSpeed: result?.wind_speed || 0,\n feelsLike: result?.feels_like || result?.temperature || 0,\n };\n\n const themeColor = getThemeColor(weatherResult.conditions);\n\n return (\n \n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\ninterface WeatherToolResult {\n temperature: number;\n conditions: string;\n humidity: number;\n windSpeed: number;\n feelsLike: number;\n}\n\nfunction getThemeColor(conditions: string): string {\n const conditionLower = conditions.toLowerCase();\n if (conditionLower.includes(\"clear\") || conditionLower.includes(\"sunny\")) {\n return \"#667eea\";\n }\n if (conditionLower.includes(\"rain\") || conditionLower.includes(\"storm\")) {\n return \"#4A5568\";\n }\n if (conditionLower.includes(\"cloud\")) {\n return \"#718096\";\n }\n if (conditionLower.includes(\"snow\")) {\n return \"#63B3ED\";\n }\n return \"#764ba2\";\n}\n\nfunction WeatherCard({\n location,\n themeColor,\n result,\n status,\n}: {\n location?: string;\n themeColor: string;\n result: WeatherToolResult;\n status: \"inProgress\" | \"executing\" | \"complete\";\n}) {\n return (\n \n
\n
\n
\n

\n {location}\n

\n

Current Weather

\n
\n \n
\n\n
\n
\n {result.temperature}° C\n \n {\" / \"}\n {((result.temperature * 9) / 5 + 32).toFixed(1)}° F\n \n
\n
{result.conditions}
\n
\n\n
\n
\n
\n

Humidity

\n

{result.humidity}%

\n
\n
\n

Wind

\n

{result.windSpeed} mph

\n
\n
\n

Feels Like

\n

{result.feelsLike}°

\n
\n
\n
\n
\n \n );\n}\n\nfunction WeatherIcon({ conditions }: { conditions: string }) {\n if (!conditions) return null;\n\n if (conditions.toLowerCase().includes(\"clear\") || conditions.toLowerCase().includes(\"sunny\")) {\n return ;\n }\n\n if (\n conditions.toLowerCase().includes(\"rain\") ||\n conditions.toLowerCase().includes(\"drizzle\") ||\n conditions.toLowerCase().includes(\"snow\") ||\n conditions.toLowerCase().includes(\"thunderstorm\")\n ) {\n return ;\n }\n\n if (\n conditions.toLowerCase().includes(\"fog\") ||\n conditions.toLowerCase().includes(\"cloud\") ||\n conditions.toLowerCase().includes(\"overcast\")\n ) {\n return ;\n }\n\n return ;\n}\n\n// Simple sun icon for the weather card\nfunction SunIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction RainIcon() {\n return (\n \n {/* Cloud */}\n \n {/* Rain drops */}\n \n \n );\n}\n\nfunction CloudIcon() {\n return (\n \n \n \n );\n}\n\nexport default AgenticChat;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# 🤖 Agentic Chat with Frontend Tools\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic chat** capabilities with **frontend\ntool integration**:\n\n1. **Natural Conversation**: Chat with your Copilot in a familiar chat interface\n2. **Frontend Tool Execution**: The Copilot can directly interacts with your UI\n by calling frontend functions\n3. **Seamless Integration**: Tools defined in the frontend and automatically\n discovered and made available to the agent\n\n## How to Interact\n\nTry asking your Copilot to:\n\n- \"Can you change the background color to something more vibrant?\"\n- \"Make the background a blue to purple gradient\"\n- \"Set the background to a sunset-themed gradient\"\n- \"Change it back to a simple light color\"\n\nYou can also chat about other topics - the agent will respond conversationally\nwhile having the ability to use your UI tools when appropriate.\n\n## ✨ Frontend Tool Integration in Action\n\n**What's happening technically:**\n\n- The React component defines a frontend function using `useCopilotAction`\n- CopilotKit automatically exposes this function to the agent\n- When you make a request, the agent determines whether to use the tool\n- The agent calls the function with the appropriate parameters\n- The UI immediately updates in response\n\n**What you'll see in this demo:**\n\n- The Copilot understands requests to change the background\n- It generates CSS values for colors and gradients\n- When it calls the tool, the background changes instantly\n- The agent provides a conversational response about the changes it made\n\nThis technique of exposing frontend functions to your Copilot can be extended to\nany UI manipulation you want to enable, from theme changes to data filtering,\nnavigation, or complex UI state management!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "backend_tool_rendering.py", + "content": "\"\"\"Backend Tool Rendering example for AWS Strands.\n\nThis example shows an agent with backend tool rendering capabilities.\nThe change_background tool is registered here so the LLM knows about it,\nbut the actual execution happens on the frontend via useFrontendTool.\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom dotenv import load_dotenv\n\n# Suppress OpenTelemetry context warnings from Strands SDK\nos.environ[\"OTEL_SDK_DISABLED\"] = \"true\"\nos.environ[\"OTEL_PYTHON_DISABLED_INSTRUMENTATIONS\"] = \"all\"\n\nfrom strands import Agent, tool\nfrom strands.models.gemini import GeminiModel\nfrom ag_ui_strands import StrandsAgent, create_strands_app\n\n# Load environment variables from .env file\nenv_path = Path(__file__).parent.parent.parent / '.env'\n\nload_dotenv(dotenv_path=env_path)\n\n# Use Gemini model\nmodel = GeminiModel(\n client_args={\n \"api_key\": os.getenv(\"GOOGLE_API_KEY\", \"your-api-key-here\"),\n },\n model_id=\"gemini-2.5-flash\",\n params={\n \"temperature\": 0.7,\n \"max_output_tokens\": 2048,\n \"top_p\": 0.9,\n \"top_k\": 40\n }\n)\n\n# Define backend tools for demonstration\n@tool\ndef render_chart(chart_type: str, data: str) -> dict:\n \"\"\"\n Render a chart with backend processing capabilities.\n \n Args:\n chart_type: Type of chart (bar, line, pie, etc.)\n data: Chart data in JSON format\n \n Returns:\n Chart data for frontend rendering\n \"\"\"\n return {\n \"chart_type\": chart_type,\n \"data\": data[:100],\n \"status\": \"rendered\"\n }\n\n@tool\ndef get_weather(location: str) -> dict:\n \"\"\"\n Get weather information for a location.\n \n Args:\n location: The location to get weather for\n \n Returns:\n Weather data with temperature, conditions, humidity, wind speed\n \"\"\"\n import random\n \n # Simulate different weather conditions\n conditions_list = [\"sunny\", \"cloudy\", \"rainy\", \"clear\", \"partly cloudy\"]\n \n return {\n \"temperature\": random.randint(60, 85),\n \"conditions\": random.choice(conditions_list),\n \"humidity\": random.randint(30, 80),\n \"wind_speed\": random.randint(5, 20),\n \"feels_like\": random.randint(58, 88)\n }\n\nstrands_agent = Agent(\n model=model,\n tools=[get_weather, render_chart],\n system_prompt=\"You are a helpful assistant with backend tool rendering capabilities. You can get weather information and render charts.\",\n)\n\nagui_agent = StrandsAgent(\n agent=strands_agent,\n name=\"backend_tool_rendering\",\n description=\"AWS Strands agent with backend tool rendering support\",\n)\n\napp = create_strands_app(agui_agent, \"/\")\n\n", + "language": "python", + "type": "file" + } + ], + "aws-strands::agentic_generative_ui": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { CopilotKit, useCoAgentStateRender } from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface AgenticGenerativeUIProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst AgenticGenerativeUI: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n return (\n \n \n \n );\n};\n\ninterface AgentState {\n steps: {\n description: string;\n status: \"pending\" | \"completed\";\n }[];\n}\n\nconst Chat = () => {\n const { theme } = useTheme();\n useCoAgentStateRender({\n name: \"agentic_generative_ui\",\n render: ({ state }) => {\n if (!state.steps || state.steps.length === 0) {\n return null;\n }\n\n const completedCount = state.steps.filter((step) => step.status === \"completed\").length;\n const progressPercentage = (completedCount / state.steps.length) * 100;\n\n return (\n
\n \n {/* Header */}\n
\n
\n

\n Task Progress\n

\n
\n {completedCount}/{state.steps.length} Complete\n
\n
\n\n {/* Progress Bar */}\n \n \n \n
\n
\n\n {/* Steps */}\n
\n {state.steps.map((step, index) => {\n const isCompleted = step.status === \"completed\";\n const isCurrentPending =\n step.status === \"pending\" &&\n index === state.steps.findIndex((s) => s.status === \"pending\");\n const isFuturePending = step.status === \"pending\" && !isCurrentPending;\n\n return (\n \n {/* Connector Line */}\n {index < state.steps.length - 1 && (\n \n )}\n\n {/* Status Icon */}\n \n {isCompleted ? (\n \n ) : isCurrentPending ? (\n \n ) : (\n \n )}\n
\n\n {/* Step Content */}\n
\n \n {step.description}\n
\n {isCurrentPending && (\n \n Processing...\n \n )}\n \n\n {/* Animated Background for Current Step */}\n {isCurrentPending && (\n \n )}\n \n );\n })}\n \n\n {/* Decorative Elements */}\n \n \n \n \n );\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\n// Enhanced Icons\nfunction CheckIcon() {\n return (\n \n \n \n );\n}\n\nfunction SpinnerIcon() {\n return (\n \n \n \n \n );\n}\n\nfunction ClockIcon({ theme }: { theme?: string }) {\n return (\n \n \n \n \n );\n}\n\nexport default AgenticGenerativeUI;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# 🚀 Agentic Generative UI Task Executor\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **agentic generative UI** capabilities:\n\n1. **Real-time Status Updates**: The Copilot provides live feedback as it works\n through complex tasks\n2. **Long-running Task Execution**: See how agents can handle extended processes\n with continuous feedback\n3. **Dynamic UI Generation**: The interface updates in real-time to reflect the\n agent's progress\n\n## How to Interact\n\nSimply ask your Copilot to perform any moderately complex task:\n\n- \"Make me a sandwich\"\n- \"Plan a vacation to Japan\"\n- \"Create a weekly workout routine\"\n\nThe Copilot will break down the task into steps and begin \"executing\" them,\nproviding real-time status updates as it progresses.\n\n## ✨ Agentic Generative UI in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and creates a detailed execution plan\n- Each step is processed sequentially with realistic timing\n- Status updates are streamed to the frontend using CopilotKit's streaming\n capabilities\n- The UI dynamically renders these updates without page refreshes\n- The entire flow is managed by the agent, requiring no manual intervention\n\n**What you'll see in this demo:**\n\n- The Copilot breaks your task into logical steps\n- A status indicator shows the current progress\n- Each step is highlighted as it's being executed\n- Detailed status messages explain what's happening at each moment\n- Upon completion, you receive a summary of the task execution\n\nThis pattern of providing real-time progress for long-running tasks is perfect\nfor scenarios where users benefit from transparency into complex processes -\nfrom data analysis to content creation, system configurations, or multi-stage\nworkflows!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "agentic_generative_ui.py", + "content": "\"\"\"Agentic Generative UI example for AWS Strands.\n\nDemonstrates streaming agent state updates to the frontend for real-time UI rendering.\n\"\"\"\nimport json\nimport os\nimport asyncio\nimport random\nimport uuid\nfrom typing import List, Dict, Any, Annotated\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nfrom pydantic import BaseModel, Field\n\nfrom strands import Agent, tool\nfrom strands.models.gemini import GeminiModel\nfrom ag_ui.core import (\n EventType,\n StateSnapshotEvent,\n StateDeltaEvent,\n TextMessageStartEvent,\n TextMessageContentEvent,\n TextMessageEndEvent,\n MessagesSnapshotEvent,\n AssistantMessage,\n)\nfrom ag_ui_strands import (\n StrandsAgent,\n create_strands_app,\n StrandsAgentConfig,\n ToolBehavior,\n PredictStateMapping,\n)\n\n# Suppress OpenTelemetry warnings\nos.environ[\"OTEL_SDK_DISABLED\"] = \"true\"\nos.environ[\"OTEL_PYTHON_DISABLED_INSTRUMENTATIONS\"] = \"all\"\n\n# Load environment variables\nenv_path = Path(__file__).parent.parent.parent / '.env'\nload_dotenv(dotenv_path=env_path)\n\n# Use Gemini model\nmodel = GeminiModel(\n client_args={\n \"api_key\": os.getenv(\"GOOGLE_API_KEY\", \"your-api-key-here\"),\n },\n model_id=\"gemini-2.5-flash\",\n params={\n \"temperature\": 0.3,\n \"max_output_tokens\": 1024,\n \"top_p\": 0.9,\n \"top_k\": 40\n }\n)\n\n\nclass TaskStep(BaseModel):\n \"\"\"Represents a single UI step.\"\"\"\n\n description: str = Field(description=\"Gerund phrase describing the action, e.g. 'Sketching layout'\")\n status: str = Field(description=\"Must be 'pending' when proposed\", default=\"pending\")\n\n\n@tool\ndef plan_task_steps(\n task: str,\n context: str = \"\",\n steps: Annotated[List[Any], Field(description=\"4-6 pending steps in gerund form\")] = None,\n) -> Dict[str, Any]:\n \"\"\"\n Plan the concrete steps required to accomplish a task.\n\n Args:\n task: Brief description of what the user wants to achieve.\n context: Optional additional instructions or constraints from the user.\n steps: Ordered list of pending steps in gerund form.\n\n Returns:\n JSON payload with the task summary and proposed steps.\n \"\"\"\n normalized_steps = _normalize_steps(steps) if steps else []\n if not normalized_steps:\n normalized_steps = _fallback_steps(task or \"the task\", context)\n\n return {\n \"task\": task,\n \"context\": context,\n \"steps\": normalized_steps,\n }\n\n\ndef _normalize_steps(raw_steps: Any) -> List[Dict[str, str]]:\n if not isinstance(raw_steps, list):\n return []\n normalized = []\n for step in raw_steps:\n if isinstance(step, TaskStep):\n normalized.append(step.model_dump())\n elif isinstance(step, dict) and \"description\" in step:\n normalized.append(\n {\n \"description\": str(step[\"description\"]),\n \"status\": step.get(\"status\") or \"pending\",\n }\n )\n elif isinstance(step, str) and step.strip():\n normalized.append({\"description\": step.strip(), \"status\": \"pending\"})\n return normalized\n\n\ndef _fallback_steps(task: str, context: str) -> List[Dict[str, str]]:\n \"\"\"Create a simple deterministic plan when the model forgets to provide steps.\"\"\"\n count = 6\n for token in context.split():\n if token.isdigit():\n count = max(4, min(10, int(token)))\n break\n\n templates = [\n \"Clarifying goals for {task}\",\n \"Gathering resources for {task}\",\n \"Preparing workspace for {task}\",\n \"Executing core work on {task}\",\n \"Reviewing results for {task}\",\n \"Wrapping up {task}\",\n \"Documenting learnings from {task}\",\n \"Celebrating completion of {task}\",\n ]\n\n plan = []\n for i in range(count):\n template = templates[i % len(templates)]\n plan.append(\n {\n \"description\": template.format(task=task).strip().capitalize(),\n \"status\": \"pending\",\n }\n )\n return plan\n\n\nasync def steps_state_from_result(context):\n result = context.result_data or {}\n steps = _normalize_steps(result.get(\"steps\"))\n if not steps:\n return None\n return {\"steps\": steps}\n\n\nasync def simulate_progress(context):\n \"\"\"Emit incremental state updates to mimic backend work.\"\"\"\n result = context.result_data or {}\n steps = _normalize_steps(result.get(\"steps\"))\n if not steps:\n return\n\n working_steps = [dict(step) for step in steps]\n\n # Initial snapshot (all pending)\n yield StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot={\"steps\": working_steps},\n )\n\n for index, _ in enumerate(working_steps):\n # Mark current step as in_progress then completed\n await asyncio.sleep(random.uniform(0.3, 0.8))\n working_steps[index][\"status\"] = \"in_progress\"\n yield StateDeltaEvent(\n type=EventType.STATE_DELTA,\n delta=[\n {\n \"op\": \"replace\",\n \"path\": f\"/steps/{index}/status\",\n \"value\": \"in_progress\",\n }\n ],\n )\n\n await asyncio.sleep(random.uniform(0.4, 1.0))\n working_steps[index][\"status\"] = \"completed\"\n yield StateDeltaEvent(\n type=EventType.STATE_DELTA,\n delta=[\n {\n \"op\": \"replace\",\n \"path\": f\"/steps/{index}/status\",\n \"value\": \"completed\",\n }\n ],\n )\n\n yield StateSnapshotEvent(\n type=EventType.STATE_SNAPSHOT,\n snapshot={\"steps\": working_steps},\n )\n\n # Emit a lightweight assistant confirmation so the UI always shows completion text\n summary = result.get(\"task\") or \"your task\"\n message_id = str(uuid.uuid4())\n text = f\"The plan for {summary} has been completed successfully.\"\n\n yield TextMessageStartEvent(\n type=EventType.TEXT_MESSAGE_START,\n message_id=message_id,\n role=\"assistant\",\n )\n yield TextMessageContentEvent(\n type=EventType.TEXT_MESSAGE_CONTENT,\n message_id=message_id,\n delta=text + \" ✅\",\n )\n yield TextMessageEndEvent(\n type=EventType.TEXT_MESSAGE_END,\n message_id=message_id,\n )\n\n # Persist the summary in the timeline so the UI keeps it\n assistant_msg = AssistantMessage(\n id=message_id,\n role=\"assistant\",\n content=text,\n )\n yield MessagesSnapshotEvent(\n type=EventType.MESSAGES_SNAPSHOT,\n messages=list(context.input_data.messages) + [assistant_msg],\n )\n\n\ndef build_state_context(input_data, user_message: str) -> str:\n \"\"\"Augment the user message with existing plan context to discourage replanning.\"\"\"\n state = getattr(input_data, \"state\", {}) or {}\n steps = state.get(\"steps\")\n if steps:\n steps_json = json.dumps(steps, indent=2)\n return (\n \"A plan is already in progress. NEVER call plan_task_steps again unless the user explicitly \"\n \"asks to restart. Discuss progress or ask clarifying questions instead.\\n\\n\"\n f\"Current steps:\\n{steps_json}\\n\\nUser: {user_message}\"\n )\n return user_message\n\n\ngenerative_ui_config = StrandsAgentConfig(\n state_context_builder=build_state_context,\n tool_behaviors={\n \"plan_task_steps\": ToolBehavior(\n predict_state=[\n PredictStateMapping(\n state_key=\"steps\",\n tool=\"plan_task_steps\",\n tool_argument=\"steps\",\n )\n ],\n state_from_result=steps_state_from_result,\n custom_result_handler=simulate_progress,\n stop_streaming_after_result=True,\n )\n }\n)\n\n\nsystem_prompt = \"\"\"\nYou are an energetic project assistant who decomposes user goals into action plans.\n\nPlanning rules:\n1. When the user asks for help with a task or making a plan, call `plan_task_steps` exactly once to create the plan.\n2. Do NOT call `plan_task_steps` again unless the user explicitly says to restart or discard the plan (or moves on to a new task).\n3. Generate 4-6 concise steps in gerund form (e.g., “Setting up repo”, “Testing prototype”) and leave their status as \"pending\".\n4. After the tool call, send a short confirmation (<= 2 sentences) plus one emoji describing what you planned.\n5. If the user is just chatting or reviewing progress, respond conversationally and DO NOT call the tool.\n6. If a plan already exists, reference the current steps and ask follow-up questions instead of creating a new plan, unless instructed otherwise.\n\"\"\"\n\n\nstrands_agent = Agent(\n model=model,\n tools=[plan_task_steps],\n system_prompt=system_prompt,\n)\n\nagui_agent = StrandsAgent(\n agent=strands_agent,\n name=\"agentic_generative_ui\",\n description=\"AWS Strands agent with generative UI and state streaming\",\n config=generative_ui_config,\n)\n\napp = create_strands_app(agui_agent, \"/\")\n", + "language": "python", + "type": "file" + } + ], + "aws-strands::shared_state": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport { CopilotKit, useCoAgent, useCopilotChat } from \"@copilotkit/react-core\";\nimport { CopilotChat, CopilotSidebar } from \"@copilotkit/react-ui\";\nimport React, { useState, useEffect, useRef } from \"react\";\nimport { Role, TextMessage } from \"@copilotkit/runtime-client-gql\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport { useMobileView } from \"@/utils/use-mobile-view\";\nimport { useMobileChat } from \"@/utils/use-mobile-chat\";\nimport { useURLParams } from \"@/contexts/url-params-context\";\n\ninterface SharedStateProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nexport default function SharedState({ params }: SharedStateProps) {\n const { integrationId } = React.use(params);\n const { isMobile } = useMobileView();\n const { chatDefaultOpen } = useURLParams();\n const defaultChatHeight = 50;\n const { isChatOpen, setChatHeight, setIsChatOpen, isDragging, chatHeight, handleDragStart } =\n useMobileChat(defaultChatHeight);\n\n const chatTitle = \"AI Recipe Assistant\";\n const chatDescription = \"Ask me to craft recipes\";\n const initialLabel = \"Hi 👋 How can I help with your recipe?\";\n\n return (\n \n
\n \n {isMobile ? (\n <>\n {/* Chat Toggle Button */}\n
\n
\n {\n if (!isChatOpen) {\n setChatHeight(defaultChatHeight); // Reset to good default when opening\n }\n setIsChatOpen(!isChatOpen);\n }}\n >\n
\n
\n
{chatTitle}
\n
{chatDescription}
\n
\n
\n \n \n \n \n
\n
\n \n\n {/* Pull-Up Chat Container */}\n \n {/* Drag Handle Bar */}\n \n
\n \n\n {/* Chat Header */}\n
\n
\n
\n

{chatTitle}

\n
\n setIsChatOpen(false)}\n className=\"p-2 hover:bg-gray-100 rounded-full transition-colors\"\n >\n \n \n \n \n
\n
\n\n {/* Chat Content - Flexible container for messages and input */}\n
\n \n
\n \n\n {/* Backdrop */}\n {isChatOpen && (\n
setIsChatOpen(false)} />\n )}\n \n ) : (\n \n )}\n
\n \n );\n}\n\nenum SkillLevel {\n BEGINNER = \"Beginner\",\n INTERMEDIATE = \"Intermediate\",\n ADVANCED = \"Advanced\",\n}\n\nenum CookingTime {\n FiveMin = \"5 min\",\n FifteenMin = \"15 min\",\n ThirtyMin = \"30 min\",\n FortyFiveMin = \"45 min\",\n SixtyPlusMin = \"60+ min\",\n}\n\nconst cookingTimeValues = [\n { label: CookingTime.FiveMin, value: 0 },\n { label: CookingTime.FifteenMin, value: 1 },\n { label: CookingTime.ThirtyMin, value: 2 },\n { label: CookingTime.FortyFiveMin, value: 3 },\n { label: CookingTime.SixtyPlusMin, value: 4 },\n];\n\nenum SpecialPreferences {\n HighProtein = \"High Protein\",\n LowCarb = \"Low Carb\",\n Spicy = \"Spicy\",\n BudgetFriendly = \"Budget-Friendly\",\n OnePotMeal = \"One-Pot Meal\",\n Vegetarian = \"Vegetarian\",\n Vegan = \"Vegan\",\n}\n\ninterface Ingredient {\n icon: string;\n name: string;\n amount: string;\n}\n\ninterface Recipe {\n title: string;\n skill_level: SkillLevel;\n cooking_time: CookingTime;\n special_preferences: string[];\n ingredients: Ingredient[];\n instructions: string[];\n}\n\ninterface RecipeAgentState {\n recipe: Recipe;\n}\n\nconst INITIAL_STATE: RecipeAgentState = {\n recipe: {\n title: \"Make Your Recipe\",\n skill_level: SkillLevel.INTERMEDIATE,\n cooking_time: CookingTime.FortyFiveMin,\n special_preferences: [],\n ingredients: [\n { icon: \"🥕\", name: \"Carrots\", amount: \"3 large, grated\" },\n { icon: \"🌾\", name: \"All-Purpose Flour\", amount: \"2 cups\" },\n ],\n instructions: [\"Preheat oven to 350°F (175°C)\"],\n },\n};\n\nfunction Recipe() {\n const { isMobile } = useMobileView();\n const { state: agentState, setState: setAgentState } = useCoAgent({\n name: \"shared_state\",\n initialState: INITIAL_STATE,\n });\n\n const [recipe, setRecipe] = useState(INITIAL_STATE.recipe);\n const { appendMessage, isLoading } = useCopilotChat();\n const [editingInstructionIndex, setEditingInstructionIndex] = useState(null);\n const newInstructionRef = useRef(null);\n\n const updateRecipe = (partialRecipe: Partial) => {\n setAgentState({\n ...agentState,\n recipe: {\n ...recipe,\n ...partialRecipe,\n },\n });\n setRecipe({\n ...recipe,\n ...partialRecipe,\n });\n };\n\n const newRecipeState = { ...recipe };\n const newChangedKeys = [];\n const changedKeysRef = useRef([]);\n\n for (const key in recipe) {\n if (\n agentState &&\n agentState.recipe &&\n (agentState.recipe as any)[key] !== undefined &&\n (agentState.recipe as any)[key] !== null\n ) {\n let agentValue = (agentState.recipe as any)[key];\n const recipeValue = (recipe as any)[key];\n\n // Check if agentValue is a string and replace \\n with actual newlines\n if (typeof agentValue === \"string\") {\n agentValue = agentValue.replace(/\\\\n/g, \"\\n\");\n }\n\n if (JSON.stringify(agentValue) !== JSON.stringify(recipeValue)) {\n (newRecipeState as any)[key] = agentValue;\n newChangedKeys.push(key);\n }\n }\n }\n\n if (newChangedKeys.length > 0) {\n changedKeysRef.current = newChangedKeys;\n } else if (!isLoading) {\n changedKeysRef.current = [];\n }\n\n useEffect(() => {\n setRecipe(newRecipeState);\n }, [JSON.stringify(newRecipeState)]);\n\n const handleTitleChange = (event: React.ChangeEvent) => {\n updateRecipe({\n title: event.target.value,\n });\n };\n\n const handleSkillLevelChange = (event: React.ChangeEvent) => {\n updateRecipe({\n skill_level: event.target.value as SkillLevel,\n });\n };\n\n const handleDietaryChange = (preference: string, checked: boolean) => {\n if (checked) {\n updateRecipe({\n special_preferences: [...recipe.special_preferences, preference],\n });\n } else {\n updateRecipe({\n special_preferences: recipe.special_preferences.filter((p) => p !== preference),\n });\n }\n };\n\n const handleCookingTimeChange = (event: React.ChangeEvent) => {\n updateRecipe({\n cooking_time: cookingTimeValues[Number(event.target.value)].label,\n });\n };\n\n const addIngredient = () => {\n // Pick a random food emoji from our valid list\n updateRecipe({\n ingredients: [...recipe.ingredients, { icon: \"🍴\", name: \"\", amount: \"\" }],\n });\n };\n\n const updateIngredient = (index: number, field: keyof Ingredient, value: string) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients[index] = {\n ...updatedIngredients[index],\n [field]: value,\n };\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const removeIngredient = (index: number) => {\n const updatedIngredients = [...recipe.ingredients];\n updatedIngredients.splice(index, 1);\n updateRecipe({ ingredients: updatedIngredients });\n };\n\n const addInstruction = () => {\n const newIndex = recipe.instructions.length;\n updateRecipe({\n instructions: [...recipe.instructions, \"\"],\n });\n // Set the new instruction as the editing one\n setEditingInstructionIndex(newIndex);\n\n // Focus the new instruction after render\n setTimeout(() => {\n const textareas = document.querySelectorAll(\".instructions-container textarea\");\n const newTextarea = textareas[textareas.length - 1] as HTMLTextAreaElement;\n if (newTextarea) {\n newTextarea.focus();\n }\n }, 50);\n };\n\n const updateInstruction = (index: number, value: string) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions[index] = value;\n updateRecipe({ instructions: updatedInstructions });\n };\n\n const removeInstruction = (index: number) => {\n const updatedInstructions = [...recipe.instructions];\n updatedInstructions.splice(index, 1);\n updateRecipe({ instructions: updatedInstructions });\n };\n\n // Simplified icon handler that defaults to a fork/knife for any problematic icons\n const getProperIcon = (icon: string | undefined): string => {\n // If icon is undefined return the default\n if (!icon) {\n return \"🍴\";\n }\n\n return icon;\n };\n\n return (\n \n {/* Recipe Title */}\n
\n \n\n
\n
\n 🕒\n t.label === recipe.cooking_time)?.value || 3}\n onChange={handleCookingTimeChange}\n style={{\n backgroundImage:\n \"url(\\\"data:image/svg+xml;charset=UTF-8,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' fill='none' stroke='%23555' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3e%3cpolyline points='6 9 12 15 18 9'%3e%3c/polyline%3e%3c/svg%3e\\\")\",\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"right 0px center\",\n backgroundSize: \"12px\",\n appearance: \"none\",\n WebkitAppearance: \"none\",\n }}\n >\n {cookingTimeValues.map((time) => (\n \n ))}\n \n
\n\n
\n 🏆\n \n {Object.values(SkillLevel).map((level) => (\n \n ))}\n \n
\n
\n
\n\n {/* Dietary Preferences */}\n
\n {changedKeysRef.current.includes(\"special_preferences\") && }\n

Dietary Preferences

\n
\n {Object.values(SpecialPreferences).map((option) => (\n \n ))}\n
\n
\n\n {/* Ingredients */}\n
\n {changedKeysRef.current.includes(\"ingredients\") && }\n
\n

Ingredients

\n \n + Add Ingredient\n \n
\n
\n {recipe.ingredients.map((ingredient, index) => (\n
\n
{getProperIcon(ingredient.icon)}
\n
\n updateIngredient(index, \"name\", e.target.value)}\n placeholder=\"Ingredient name\"\n className=\"ingredient-name-input\"\n />\n updateIngredient(index, \"amount\", e.target.value)}\n placeholder=\"Amount\"\n className=\"ingredient-amount-input\"\n />\n
\n removeIngredient(index)}\n aria-label=\"Remove ingredient\"\n >\n ×\n \n
\n ))}\n
\n
\n\n {/* Instructions */}\n
\n {changedKeysRef.current.includes(\"instructions\") && }\n
\n

Instructions

\n \n
\n
\n {recipe.instructions.map((instruction, index) => (\n
\n {/* Number Circle */}\n
{index + 1}
\n\n {/* Vertical Line */}\n {index < recipe.instructions.length - 1 &&
}\n\n {/* Instruction Content */}\n setEditingInstructionIndex(index)}\n >\n updateInstruction(index, e.target.value)}\n placeholder={!instruction ? \"Enter cooking instruction...\" : \"\"}\n onFocus={() => setEditingInstructionIndex(index)}\n onBlur={(e) => {\n // Only blur if clicking outside this instruction\n if (!e.relatedTarget || !e.currentTarget.contains(e.relatedTarget as Node)) {\n setEditingInstructionIndex(null);\n }\n }}\n />\n\n {/* Delete Button (only visible on hover) */}\n {\n e.stopPropagation(); // Prevent triggering parent onClick\n removeInstruction(index);\n }}\n aria-label=\"Remove instruction\"\n >\n ×\n \n
\n
\n ))}\n
\n
\n\n {/* Improve with AI Button */}\n
\n {\n if (!isLoading) {\n appendMessage(\n new TextMessage({\n content: \"Improve the recipe\",\n role: Role.User,\n }),\n );\n }\n }}\n disabled={isLoading}\n >\n {isLoading ? \"Please Wait...\" : \"Improve with AI\"}\n \n
\n \n );\n}\n\nfunction Ping() {\n return (\n \n \n \n \n );\n}\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitWindow {\n box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);\n}\n\n.copilotKitHeader {\n border-top-left-radius: 5px !important;\n background-color: #fff;\n color: #000;\n border-bottom: 0px;\n}\n\n/* Recipe App Styles */\n.app-container {\n min-height: 100vh;\n width: 100%;\n display: flex;\n align-items: center;\n justify-content: center;\n background-size: cover;\n background-position: center;\n background-repeat: no-repeat;\n background-attachment: fixed;\n position: relative;\n overflow: auto;\n}\n\n.recipe-card {\n background-color: rgba(255, 255, 255, 0.97);\n border-radius: 16px;\n box-shadow: 0 15px 30px rgba(0, 0, 0, 0.25), 0 5px 15px rgba(0, 0, 0, 0.15);\n width: 100%;\n max-width: 750px;\n margin: 20px auto;\n padding: 14px 32px;\n position: relative;\n z-index: 1;\n backdrop-filter: blur(5px);\n border: 1px solid rgba(255, 255, 255, 0.3);\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n animation: fadeIn 0.5s ease-out forwards;\n box-sizing: border-box;\n overflow: hidden;\n}\n\n.recipe-card:hover {\n transform: translateY(-5px);\n box-shadow: 0 20px 40px rgba(0, 0, 0, 0.3), 0 10px 20px rgba(0, 0, 0, 0.2);\n}\n\n/* Recipe Header */\n.recipe-header {\n margin-bottom: 24px;\n}\n\n.recipe-title-input {\n width: 100%;\n font-size: 24px;\n font-weight: bold;\n border: none;\n outline: none;\n padding: 8px 0;\n margin-bottom: 0px;\n}\n\n.recipe-meta {\n display: flex;\n align-items: center;\n gap: 20px;\n margin-top: 5px;\n margin-bottom: 14px;\n}\n\n.meta-item {\n display: flex;\n align-items: center;\n gap: 8px;\n color: #555;\n}\n\n.meta-icon {\n font-size: 20px;\n color: #777;\n}\n\n.meta-text {\n font-size: 15px;\n}\n\n/* Recipe Meta Selects */\n.meta-item select {\n border: none;\n background: transparent;\n font-size: 15px;\n color: #555;\n cursor: pointer;\n outline: none;\n padding-right: 18px;\n transition: color 0.2s, transform 0.1s;\n font-weight: 500;\n}\n\n.meta-item select:hover,\n.meta-item select:focus {\n color: #FF5722;\n}\n\n.meta-item select:active {\n transform: scale(0.98);\n}\n\n.meta-item select option {\n color: #333;\n background-color: white;\n font-weight: normal;\n padding: 8px;\n}\n\n/* Section Container */\n.section-container {\n margin-bottom: 20px;\n position: relative;\n width: 100%;\n}\n\n.section-title {\n font-size: 20px;\n font-weight: 700;\n margin-bottom: 20px;\n color: #333;\n position: relative;\n display: inline-block;\n}\n\n.section-title:after {\n content: \"\";\n position: absolute;\n bottom: -8px;\n left: 0;\n width: 40px;\n height: 3px;\n background-color: #ff7043;\n border-radius: 3px;\n}\n\n/* Dietary Preferences */\n.dietary-options {\n display: flex;\n flex-wrap: wrap;\n gap: 10px 16px;\n margin-bottom: 16px;\n width: 100%;\n}\n\n.dietary-option {\n display: flex;\n align-items: center;\n gap: 6px;\n font-size: 14px;\n cursor: pointer;\n margin-bottom: 4px;\n}\n\n.dietary-option input {\n cursor: pointer;\n}\n\n/* Ingredients */\n.ingredients-container {\n display: flex;\n flex-wrap: wrap;\n gap: 10px;\n margin-bottom: 15px;\n width: 100%;\n box-sizing: border-box;\n}\n\n.ingredient-card {\n display: flex;\n align-items: center;\n background-color: rgba(255, 255, 255, 0.9);\n border-radius: 12px;\n padding: 12px;\n margin-bottom: 10px;\n box-shadow: 0 4px 10px rgba(0, 0, 0, 0.08);\n position: relative;\n transition: all 0.2s ease;\n border: 1px solid rgba(240, 240, 240, 0.8);\n width: calc(33.333% - 7px);\n box-sizing: border-box;\n}\n\n.ingredient-card:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 15px rgba(0, 0, 0, 0.12);\n}\n\n.ingredient-card .remove-button {\n position: absolute;\n right: 10px;\n top: 10px;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 24px;\n height: 24px;\n line-height: 1;\n}\n\n.ingredient-card:hover .remove-button {\n display: block;\n}\n\n.ingredient-icon {\n font-size: 24px;\n margin-right: 12px;\n display: flex;\n align-items: center;\n justify-content: center;\n width: 40px;\n height: 40px;\n background-color: #f7f7f7;\n border-radius: 50%;\n flex-shrink: 0;\n}\n\n.ingredient-content {\n flex: 1;\n display: flex;\n flex-direction: column;\n gap: 3px;\n min-width: 0;\n}\n\n.ingredient-name-input,\n.ingredient-amount-input {\n border: none;\n background: transparent;\n outline: none;\n width: 100%;\n padding: 0;\n text-overflow: ellipsis;\n overflow: hidden;\n white-space: nowrap;\n}\n\n.ingredient-name-input {\n font-weight: 500;\n font-size: 14px;\n}\n\n.ingredient-amount-input {\n font-size: 13px;\n color: #666;\n}\n\n.ingredient-name-input::placeholder,\n.ingredient-amount-input::placeholder {\n color: #aaa;\n}\n\n.remove-button {\n background: none;\n border: none;\n color: #999;\n font-size: 20px;\n cursor: pointer;\n padding: 0;\n width: 28px;\n height: 28px;\n display: flex;\n align-items: center;\n justify-content: center;\n margin-left: 10px;\n}\n\n.remove-button:hover {\n color: #FF5722;\n}\n\n/* Instructions */\n.instructions-container {\n display: flex;\n flex-direction: column;\n gap: 6px;\n position: relative;\n margin-bottom: 12px;\n width: 100%;\n}\n\n.instruction-item {\n position: relative;\n display: flex;\n width: 100%;\n box-sizing: border-box;\n margin-bottom: 8px;\n align-items: flex-start;\n}\n\n.instruction-number {\n display: flex;\n align-items: center;\n justify-content: center;\n min-width: 26px;\n height: 26px;\n background-color: #ff7043;\n color: white;\n border-radius: 50%;\n font-weight: 600;\n flex-shrink: 0;\n box-shadow: 0 2px 4px rgba(255, 112, 67, 0.3);\n z-index: 1;\n font-size: 13px;\n margin-top: 2px;\n}\n\n.instruction-line {\n position: absolute;\n left: 13px; /* Half of the number circle width */\n top: 22px;\n bottom: -18px;\n width: 2px;\n background: linear-gradient(to bottom, #ff7043 60%, rgba(255, 112, 67, 0.4));\n z-index: 0;\n}\n\n.instruction-content {\n background-color: white;\n border-radius: 10px;\n padding: 10px 14px;\n margin-left: 12px;\n flex-grow: 1;\n transition: all 0.2s ease;\n box-shadow: 0 2px 6px rgba(0, 0, 0, 0.08);\n border: 1px solid rgba(240, 240, 240, 0.8);\n position: relative;\n width: calc(100% - 38px);\n box-sizing: border-box;\n display: flex;\n align-items: center;\n}\n\n.instruction-content-editing {\n background-color: #fff9f6;\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12), 0 0 0 2px rgba(255, 112, 67, 0.2);\n}\n\n.instruction-content:hover {\n transform: translateY(-2px);\n box-shadow: 0 6px 16px rgba(0, 0, 0, 0.12);\n}\n\n.instruction-textarea {\n width: 100%;\n background: transparent;\n border: none;\n resize: vertical;\n font-family: inherit;\n font-size: 14px;\n line-height: 1.4;\n min-height: 20px;\n outline: none;\n padding: 0;\n margin: 0;\n}\n\n.instruction-delete-btn {\n position: absolute;\n background: none;\n border: none;\n color: #ccc;\n font-size: 16px;\n cursor: pointer;\n display: none;\n padding: 0;\n width: 20px;\n height: 20px;\n line-height: 1;\n top: 50%;\n transform: translateY(-50%);\n right: 8px;\n}\n\n.instruction-content:hover .instruction-delete-btn {\n display: flex;\n align-items: center;\n justify-content: center;\n}\n\n/* Action Button */\n.action-container {\n display: flex;\n justify-content: center;\n margin-top: 40px;\n padding-bottom: 20px;\n position: relative;\n}\n\n.improve-button {\n background-color: #ff7043;\n border: none;\n color: white;\n border-radius: 30px;\n font-size: 18px;\n font-weight: 600;\n padding: 14px 28px;\n cursor: pointer;\n transition: all 0.3s ease;\n box-shadow: 0 4px 15px rgba(255, 112, 67, 0.4);\n display: flex;\n align-items: center;\n justify-content: center;\n text-align: center;\n position: relative;\n min-width: 180px;\n}\n\n.improve-button:hover {\n background-color: #ff5722;\n transform: translateY(-2px);\n box-shadow: 0 8px 20px rgba(255, 112, 67, 0.5);\n}\n\n.improve-button.loading {\n background-color: #ff7043;\n opacity: 0.8;\n cursor: not-allowed;\n padding-left: 42px; /* Reduced padding to bring text closer to icon */\n padding-right: 22px; /* Balance the button */\n justify-content: flex-start; /* Left align text for better alignment with icon */\n}\n\n.improve-button.loading:after {\n content: \"\"; /* Add space between icon and text */\n display: inline-block;\n width: 8px; /* Width of the space */\n}\n\n.improve-button:before {\n content: \"\";\n background-image: url(\"data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='white' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpath d='M12 2v4M12 18v4M4.93 4.93l2.83 2.83M16.24 16.24l2.83 2.83M2 12h4M18 12h4M4.93 19.07l2.83-2.83M16.24 7.76l2.83-2.83'/%3E%3C/svg%3E\");\n width: 20px; /* Slightly smaller icon */\n height: 20px;\n background-repeat: no-repeat;\n background-size: contain;\n position: absolute;\n left: 16px; /* Slightly adjusted */\n top: 50%;\n transform: translateY(-50%);\n display: none;\n}\n\n.improve-button.loading:before {\n display: block;\n animation: spin 1.5s linear infinite;\n}\n\n@keyframes spin {\n 0% { transform: translateY(-50%) rotate(0deg); }\n 100% { transform: translateY(-50%) rotate(360deg); }\n}\n\n/* Ping Animation */\n.ping-animation {\n position: absolute;\n display: flex;\n width: 12px;\n height: 12px;\n top: 0;\n right: 0;\n}\n\n.ping-circle {\n position: absolute;\n display: inline-flex;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: #38BDF8;\n opacity: 0.75;\n animation: ping 1.5s cubic-bezier(0, 0, 0.2, 1) infinite;\n}\n\n.ping-dot {\n position: relative;\n display: inline-flex;\n width: 12px;\n height: 12px;\n border-radius: 50%;\n background-color: #0EA5E9;\n}\n\n@keyframes ping {\n 75%, 100% {\n transform: scale(2);\n opacity: 0;\n }\n}\n\n/* Instruction hover effects */\n.instruction-item:hover .instruction-delete-btn {\n display: flex !important;\n}\n\n/* Add some subtle animations */\n@keyframes fadeIn {\n from { opacity: 0; transform: translateY(20px); }\n to { opacity: 1; transform: translateY(0); }\n}\n\n/* Better center alignment for the recipe card */\n.recipe-card-container {\n display: flex;\n justify-content: center;\n width: 100%;\n position: relative;\n z-index: 1;\n margin: 0 auto;\n box-sizing: border-box;\n}\n\n/* Add Buttons */\n.add-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 8px;\n padding: 10px 16px;\n cursor: pointer;\n font-weight: 500;\n display: inline-block;\n font-size: 14px;\n margin-bottom: 0;\n}\n\n.add-step-button {\n background-color: transparent;\n color: #FF5722;\n border: 1px dashed #FF5722;\n border-radius: 6px;\n padding: 6px 12px;\n cursor: pointer;\n font-weight: 500;\n font-size: 13px;\n}\n\n/* Section Headers */\n.section-header {\n display: flex;\n justify-content: space-between;\n align-items: center;\n margin-bottom: 12px;\n}", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# 🍳 Shared State Recipe Creator\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **shared state** functionality - a powerful\nfeature that enables bidirectional data flow between:\n\n1. **Frontend → Agent**: UI controls update the agent's context in real-time\n2. **Agent → Frontend**: The Copilot's recipe creations instantly update the UI\n components\n\nIt's like having a cooking buddy who not only listens to what you want but also\nupdates your recipe card as you chat - no refresh needed! ✨\n\n## How to Interact\n\nMix and match any of these parameters (or none at all - it's up to you!):\n\n- **Skill Level**: Beginner to expert 👨‍🍳\n- **Cooking Time**: Quick meals or slow cooking ⏱️\n- **Special Preferences**: Dietary needs, flavor profiles, health goals 🥗\n- **Ingredients**: Items you want to include 🧅🥩🍄\n- **Instructions**: Any specific steps\n\nThen chat with your Copilot chef with prompts like:\n\n- \"I'm a beginner cook. Can you make me a quick dinner?\"\n- \"I need something spicy with chicken that takes under 30 minutes!\"\n\n## ✨ Shared State Magic in Action\n\n**What's happening technically:**\n\n- The UI and Copilot agent share the same state object (**Agent State = UI\n State**)\n- Changes from either side automatically update the other\n- Neither side needs to manually request updates from the other\n\n**What you'll see in this demo:**\n\n- Set cooking time to 20 minutes in the UI and watch the Copilot immediately\n respect your time constraint\n- Add ingredients through the UI and see them appear in your recipe\n- When the Copilot suggests new ingredients, watch them automatically appear in\n the UI ingredients list\n- Change your skill level and see how the Copilot adapts its instructions in\n real-time\n\nThis synchronized state creates a seamless experience where the agent always has\nyour current preferences, and any updates to the recipe are instantly reflected\nin both places.\n\nThis shared state pattern can be applied to any application where you want your\nUI and Copilot to work together in perfect harmony!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "shared_state.py", + "content": "\"\"\"Shared State Agent - Recipe collaboration between agent and UI.\"\"\"\n# Force reload - no tools version\nimport os\nimport json\nfrom typing import Dict, Any, List\nfrom enum import Enum\nfrom pydantic import BaseModel, Field\nfrom strands import Agent, tool\nfrom strands.models.gemini import GeminiModel\nfrom ag_ui_strands import StrandsAgent, create_strands_app, StrandsAgentConfig, ToolBehavior\n\n\nclass SkillLevel(str, Enum):\n \"\"\"The level of skill required for the recipe.\"\"\"\n BEGINNER = \"Beginner\"\n INTERMEDIATE = \"Intermediate\"\n ADVANCED = \"Advanced\"\n\n\nclass SpecialPreferences(str, Enum):\n \"\"\"Special preferences for the recipe.\"\"\"\n HIGH_PROTEIN = \"High Protein\"\n LOW_CARB = \"Low Carb\"\n SPICY = \"Spicy\"\n BUDGET_FRIENDLY = \"Budget-Friendly\"\n ONE_POT_MEAL = \"One-Pot Meal\"\n VEGETARIAN = \"Vegetarian\"\n VEGAN = \"Vegan\"\n\n\nclass CookingTime(str, Enum):\n \"\"\"The cooking time of the recipe.\"\"\"\n FIVE_MIN = \"5 min\"\n FIFTEEN_MIN = \"15 min\"\n THIRTY_MIN = \"30 min\"\n FORTY_FIVE_MIN = \"45 min\"\n SIXTY_PLUS_MIN = \"60+ min\"\n\n\nclass Ingredient(BaseModel):\n \"\"\"An ingredient.\"\"\"\n icon: str = Field(description=\"Icon: the actual emoji like 🥕\")\n name: str = Field(description=\"The name of the ingredient\")\n amount: str = Field(description=\"The amount of the ingredient\")\n\n\nclass Recipe(BaseModel):\n \"\"\"A recipe.\"\"\"\n title: str = Field(description=\"The title of the recipe\", default=\"Make Your Recipe\")\n skill_level: str = Field(description=\"The skill level required for the recipe\")\n special_preferences: List[str] = Field(description=\"A list of special preferences for the recipe\")\n cooking_time: str = Field(description=\"The cooking time of the recipe\")\n ingredients: List[Dict[str, str]] = Field(\n description=\"\"\"Entire list of ingredients for the recipe, including the new ingredients\n and the ones that are already in the recipe: Icon (emoji like 🥕), name and amount.\n Like so: {\\\"icon\\\": \\\"🥕\\\", \\\"name\\\": \\\"Carrots\\\", \\\"amount\\\": \\\"250g\\\"}\"\"\"\n )\n instructions: List[str] = Field(\n description=\"\"\"Entire list of instructions for the recipe,\n including the new instructions and the ones that are already there\"\"\"\n )\n changes: str = Field(description=\"A description of the changes made to the recipe\", default=\"\")\n\n\n@tool\ndef generate_recipe(recipe: Recipe):\n \"\"\"Using the existing (if any) ingredients and instructions, proceed with the recipe to finish it.\n Make sure the recipe is complete. ALWAYS provide the entire recipe, not just the changes.\n \n Args:\n recipe: The complete updated recipe with all fields\n \"\"\"\n # Return success message - the recipe data is captured from tool arguments\n return \"Recipe updated successfully\"\n\n\n# Initialize the recipe state\nINITIAL_RECIPE_STATE = {\n \"title\": \"Make Your Recipe\",\n \"skill_level\": SkillLevel.INTERMEDIATE.value,\n \"special_preferences\": [],\n \"cooking_time\": CookingTime.FORTY_FIVE_MIN.value,\n \"ingredients\": [\n {\"icon\": \"🥕\", \"name\": \"Carrots\", \"amount\": \"3 large, grated\"},\n {\"icon\": \"🌾\", \"name\": \"All-Purpose Flour\", \"amount\": \"2 cups\"},\n ],\n \"instructions\": [\"Preheat oven to 350°F (175°C)\"],\n \"changes\": \"\"\n}\n\n\ndef build_recipe_prompt(input_data, user_message: str) -> str:\n \"\"\"Inject the current recipe state into the prompt.\"\"\"\n state_dict = getattr(input_data, \"state\", None)\n if isinstance(state_dict, dict) and \"recipe\" in state_dict:\n recipe_json = json.dumps(state_dict[\"recipe\"], indent=2)\n return (\n f\"Current recipe state:\\n{recipe_json}\\n\\n\"\n f\"User request: {user_message}\\n\\n\"\n \"Please update the recipe by calling the registered tool.\"\n )\n return user_message\n\n\nasync def recipe_state_from_args(context):\n \"\"\"Emit recipe snapshot as soon as tool arguments are available.\"\"\"\n try:\n tool_input = context.tool_input\n if isinstance(tool_input, str):\n tool_input = json.loads(tool_input)\n recipe_data = tool_input.get(\"recipe\", tool_input)\n return {\"recipe\": recipe_data}\n except Exception:\n return None\n\n\nasync def recipe_state_from_result(context):\n \"\"\"Update recipe state based on tool result payload.\"\"\"\n if isinstance(context.result_data, dict):\n return {\"recipe\": context.result_data}\n return None\n\n\nshared_state_config = StrandsAgentConfig(\n state_context_builder=build_recipe_prompt,\n tool_behaviors={\n \"generate_recipe\": ToolBehavior(\n skip_messages_snapshot=True,\n state_from_args=recipe_state_from_args,\n state_from_result=recipe_state_from_result,\n )\n },\n)\n\n\n# Create the Strands agent\nmodel = GeminiModel(\n client_args={\n \"api_key\": os.getenv(\"GOOGLE_API_KEY\", \"your-api-key-here\"),\n },\n model_id=\"gemini-2.5-flash\",\n params={\n \"temperature\": 0.7,\n \"max_output_tokens\": 2048,\n \"top_p\": 0.9,\n \"top_k\": 40\n }\n)\n\nsystem_prompt = \"\"\"You are a helpful recipe assistant. When asked to improve or modify a recipe:\n\n1. Call the generate_recipe tool ONCE with the COMPLETE updated recipe\n2. Include ALL fields: title, skill_level, special_preferences, cooking_time, ingredients, instructions, and changes\n3. After calling the tool, respond to the user with a brief confirmation of what you changed (1-2 sentences)\n4. Do NOT call the tool multiple times in a row\n5. Keep existing elements that aren't being changed\n\nBe creative and helpful!\"\"\"\n\nstrands_agent = Agent(\n model=model,\n system_prompt=system_prompt,\n tools=[generate_recipe] # Tool to update recipe state\n)\n\n# Create the AG-UI Strands agent wrapper\nagent = StrandsAgent(\n agent=strands_agent,\n name=\"shared_state\",\n description=\"A recipe assistant that collaborates with you to create amazing recipes\",\n config=shared_state_config,\n)\n\n# Create the FastAPI app\napp = create_strands_app(agent)\n\n", + "language": "python", + "type": "file" + } + ], + "aws-strands::human_in_the_loop": [ + { + "name": "page.tsx", + "content": "\"use client\";\nimport React, { useState, useEffect } from \"react\";\nimport \"@copilotkit/react-ui/styles.css\";\nimport \"./style.css\";\nimport {\n CopilotKit,\n useHumanInTheLoop,\n useLangGraphInterrupt,\n} from \"@copilotkit/react-core\";\nimport { CopilotChat } from \"@copilotkit/react-ui\";\nimport { useTheme } from \"next-themes\";\n\ninterface HumanInTheLoopProps {\n params: Promise<{\n integrationId: string;\n }>;\n}\n\nconst HumanInTheLoop: React.FC = ({ params }) => {\n const { integrationId } = React.use(params);\n\n return (\n \n \n \n );\n};\n\ninterface Step {\n description: string;\n status: \"disabled\" | \"enabled\" | \"executing\";\n}\n\n// Shared UI Components\nconst StepContainer = ({ theme, children }: { theme?: string; children: React.ReactNode }) => (\n
\n \n {children}\n
\n \n);\n\nconst StepHeader = ({\n theme,\n enabledCount,\n totalCount,\n status,\n showStatus = false,\n}: {\n theme?: string;\n enabledCount: number;\n totalCount: number;\n status?: string;\n showStatus?: boolean;\n}) => (\n
\n
\n

\n Select Steps\n

\n
\n
\n {enabledCount}/{totalCount} Selected\n
\n {showStatus && (\n \n {status === \"executing\" ? \"Ready\" : \"Waiting\"}\n
\n )}\n
\n
\n\n \n 0 ? (enabledCount / totalCount) * 100 : 0}%` }}\n />\n \n \n);\n\nconst StepItem = ({\n step,\n theme,\n status,\n onToggle,\n disabled = false,\n}: {\n step: { description: string; status: string };\n theme?: string;\n status?: string;\n onToggle: () => void;\n disabled?: boolean;\n}) => (\n \n \n \n);\n\nconst ActionButton = ({\n variant,\n theme,\n disabled,\n onClick,\n children,\n}: {\n variant: \"primary\" | \"secondary\" | \"success\" | \"danger\";\n theme?: string;\n disabled?: boolean;\n onClick: () => void;\n children: React.ReactNode;\n}) => {\n const baseClasses = \"px-6 py-3 rounded-lg font-semibold transition-all duration-200\";\n const enabledClasses = \"hover:scale-105 shadow-md hover:shadow-lg\";\n const disabledClasses = \"opacity-50 cursor-not-allowed\";\n\n const variantClasses = {\n primary:\n \"bg-gradient-to-r from-purple-500 to-purple-700 hover:from-purple-600 hover:to-purple-800 text-white shadow-lg hover:shadow-xl\",\n secondary:\n theme === \"dark\"\n ? \"bg-slate-700 hover:bg-slate-600 text-white border border-slate-600 hover:border-slate-500\"\n : \"bg-gray-100 hover:bg-gray-200 text-gray-800 border border-gray-300 hover:border-gray-400\",\n success:\n \"bg-gradient-to-r from-green-500 to-emerald-600 hover:from-green-600 hover:to-emerald-700 text-white shadow-lg hover:shadow-xl\",\n danger:\n \"bg-gradient-to-r from-red-500 to-red-600 hover:from-red-600 hover:to-red-700 text-white shadow-lg hover:shadow-xl\",\n };\n\n return (\n \n {children}\n \n );\n};\n\nconst DecorativeElements = ({\n theme,\n variant = \"default\",\n}: {\n theme?: string;\n variant?: \"default\" | \"success\" | \"danger\";\n}) => (\n <>\n \n \n \n);\nconst InterruptHumanInTheLoop: React.FC<{\n event: { value: { steps: Step[] } };\n resolve: (value: string) => void;\n}> = ({ event, resolve }) => {\n const { theme } = useTheme();\n\n // Parse and initialize steps data\n let initialSteps: Step[] = [];\n if (event.value && event.value.steps && Array.isArray(event.value.steps)) {\n initialSteps = event.value.steps.map((step: any) => ({\n description: typeof step === \"string\" ? step : step.description || \"\",\n status: typeof step === \"object\" && step.status ? step.status : \"enabled\",\n }));\n }\n\n const [localSteps, setLocalSteps] = useState(initialSteps);\n const enabledCount = localSteps.filter((step) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handlePerformSteps = () => {\n const selectedSteps = localSteps\n .filter((step) => step.status === \"enabled\")\n .map((step) => step.description);\n resolve(\"The user selected the following steps: \" + selectedSteps.join(\", \"));\n };\n\n return (\n \n \n\n
\n {localSteps.map((step, index) => (\n handleStepToggle(index)}\n />\n ))}\n
\n\n
\n \n \n Perform Steps\n \n {enabledCount}\n \n \n
\n\n \n
\n );\n};\n\nconst Chat = ({ integrationId }: { integrationId: string }) => {\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // This hook won't do anything for other integrations.\n useLangGraphInterrupt({\n render: ({ event, resolve }) => ,\n });\n useHumanInTheLoop({\n name: \"generate_task_steps\",\n description: \"Generates a list of steps for the user to perform\",\n parameters: [\n {\n name: \"steps\",\n type: \"object[]\",\n attributes: [\n {\n name: \"description\",\n type: \"string\",\n },\n {\n name: \"status\",\n type: \"string\",\n enum: [\"enabled\", \"disabled\", \"executing\"],\n },\n ],\n },\n ],\n // Langgraph uses it's own hook to handle human-in-the-loop interactions via langgraph interrupts,\n // so don't use this action for langgraph integration.\n available: [\"langgraph\", \"langgraph-fastapi\", \"langgraph-typescript\"].includes(integrationId)\n ? \"disabled\"\n : \"enabled\",\n render: ({ args, respond, status }) => {\n return ;\n },\n });\n\n return (\n
\n
\n \n
\n
\n );\n};\n\nconst StepsFeedback = ({ args, respond, status }: { args: any; respond: any; status: any }) => {\n const { theme } = useTheme();\n const [localSteps, setLocalSteps] = useState([]);\n const [accepted, setAccepted] = useState(null);\n\n useEffect(() => {\n if (status === \"executing\" && localSteps.length === 0) {\n setLocalSteps(args.steps);\n }\n }, [status, args.steps, localSteps]);\n\n if (args.steps === undefined || args.steps.length === 0) {\n return <>;\n }\n\n const steps = localSteps.length > 0 ? localSteps : args.steps;\n const enabledCount = steps.filter((step: any) => step.status === \"enabled\").length;\n\n const handleStepToggle = (index: number) => {\n setLocalSteps((prevSteps) =>\n prevSteps.map((step, i) =>\n i === index\n ? { ...step, status: step.status === \"enabled\" ? \"disabled\" : \"enabled\" }\n : step,\n ),\n );\n };\n\n const handleReject = () => {\n if (respond) {\n setAccepted(false);\n respond({ accepted: false });\n }\n };\n\n const handleConfirm = () => {\n if (respond) {\n setAccepted(true);\n respond({ accepted: true, steps: localSteps.filter((step) => step.status === \"enabled\") });\n }\n };\n\n return (\n \n \n\n
\n {steps.map((step: any, index: any) => (\n handleStepToggle(index)}\n disabled={status !== \"executing\"}\n />\n ))}\n
\n\n {/* Action Buttons - Different logic from InterruptHumanInTheLoop */}\n {accepted === null && (\n
\n \n \n Reject\n \n \n \n Confirm\n \n {enabledCount}\n \n \n
\n )}\n\n {/* Result State - Unique to StepsFeedback */}\n {accepted !== null && (\n
\n \n {accepted ? \"✓\" : \"✗\"}\n {accepted ? \"Accepted\" : \"Rejected\"}\n
\n \n )}\n\n \n
\n );\n};\n\nexport default HumanInTheLoop;\n", + "language": "typescript", + "type": "file" + }, + { + "name": "style.css", + "content": ".copilotKitInput {\n border-bottom-left-radius: 0.75rem;\n border-bottom-right-radius: 0.75rem;\n border-top-left-radius: 0.75rem;\n border-top-right-radius: 0.75rem;\n border: 1px solid var(--copilot-kit-separator-color) !important;\n}\n\n.copilotKitChat {\n background-color: #fff !important;\n}\n", + "language": "css", + "type": "file" + }, + { + "name": "README.mdx", + "content": "# 🤝 Human-in-the-Loop Task Planner\n\n## What This Demo Shows\n\nThis demo showcases CopilotKit's **human-in-the-loop** capabilities:\n\n1. **Collaborative Planning**: The Copilot generates task steps and lets you\n decide which ones to perform\n2. **Interactive Decision Making**: Select or deselect steps to customize the\n execution plan\n3. **Adaptive Responses**: The Copilot adapts its execution based on your\n choices, even handling missing steps\n\n## How to Interact\n\nTry these steps to experience the demo:\n\n1. Ask your Copilot to help with a task, such as:\n\n - \"Make me a sandwich\"\n - \"Plan a weekend trip\"\n - \"Organize a birthday party\"\n - \"Start a garden\"\n\n2. Review the suggested steps provided by your Copilot\n\n3. Select or deselect steps using the checkboxes to customize the plan\n\n - Try removing essential steps to see how the Copilot adapts!\n\n4. Click \"Execute Plan\" to see the outcome based on your selections\n\n## ✨ Human-in-the-Loop Magic in Action\n\n**What's happening technically:**\n\n- The agent analyzes your request and breaks it down into logical steps\n- These steps are presented to you through a dynamic UI component\n- Your selections are captured as user input\n- The agent considers your choices when executing the plan\n- The agent adapts to missing steps with creative problem-solving\n\n**What you'll see in this demo:**\n\n- The Copilot provides a detailed, step-by-step plan for your task\n- You have complete control over which steps to include\n- If you remove essential steps, the Copilot provides entertaining and creative\n workarounds\n- The final execution reflects your choices, showing how human input shapes the\n outcome\n- Each response is tailored to your specific selections\n\nThis human-in-the-loop pattern creates a powerful collaborative experience where\nboth human judgment and AI capabilities work together to achieve better results\nthan either could alone!\n", + "language": "markdown", + "type": "file" + }, + { + "name": "human_in_the_loop.py", + "content": "\"\"\"Human in the Loop example for AWS Strands.\n\nThis example demonstrates how to create a Strands agent with a generate_task_steps tool\nfor human-in-the-loop interactions, where users can review and approve task steps before execution.\n\"\"\"\nimport os\nfrom pathlib import Path\nfrom typing import List, Literal\nfrom dotenv import load_dotenv\nfrom pydantic import BaseModel, Field\n\n# Suppress OpenTelemetry context warnings\nos.environ[\"OTEL_SDK_DISABLED\"] = \"true\"\nos.environ[\"OTEL_PYTHON_DISABLED_INSTRUMENTATIONS\"] = \"all\"\n\nfrom strands import Agent, tool\nfrom strands.models.gemini import GeminiModel\nfrom ag_ui_strands import StrandsAgent, create_strands_app\n\n# Load environment variables from .env file\nenv_path = Path(__file__).parent.parent.parent / '.env'\nload_dotenv(dotenv_path=env_path)\n\n# Use Gemini model\nmodel = GeminiModel(\n client_args={\n \"api_key\": os.getenv(\"GOOGLE_API_KEY\", \"your-api-key-here\"),\n },\n model_id=\"gemini-2.5-flash\",\n params={\n \"temperature\": 0.7,\n \"max_output_tokens\": 2048,\n \"top_p\": 0.9,\n \"top_k\": 40\n }\n)\n\n\nclass Step(BaseModel):\n \"\"\"A single step in a task plan.\"\"\"\n\n description: str = Field(\n ...,\n description=\"A brief description of the step in imperative form\",\n optional=False\n )\n status: Literal[\"enabled\", \"disabled\"] = Field(\n default=\"enabled\",\n description=\"The status of the step\",\n optional=False,\n )\n\n\n@tool\ndef generate_task_steps(\n steps: List[Step],\n) -> str:\n \"\"\"Generate a list of steps for the user to review and approve.\n\n This tool creates a task plan that will be displayed to the user for review.\n The user can enable/disable steps before confirming execution.\n The user can approve or disapprove the plan. That result will come back to you as a json object\n - when disapproved: `{ accepted: false }`\n - when approved: `{ accepted: true, steps: [{{steps that are approved}}] }`\n\n Note that the approved list of steps comes back, it may not be the entire list.\n\n Args:\n steps: A list of 10 step objects, each containing a description and status.\n Each step should be brief (a few words) and in imperative form\n (e.g., \"Dig hole\", \"Open door\", \"Mix ingredients\").\n\n Returns:\n A confirmation message.\n \"\"\"\n return f\"Generated {len(steps)} steps for user review\"\n\n\nstrands_agent = Agent(\n model=model,\n tools=[generate_task_steps],\n system_prompt=\"\"\"You are a task planning assistant specialized in creating clear, actionable step-by-step plans.\n\n**Your Primary Role:**\n- Break down any user request into exactly 10 clear, actionable steps\n- Generate steps that require human review and approval\n- Execute only human-approved steps\n\n**When a user requests help with a task:**\n1. ALWAYS use the `generate_task_steps` tool to create a breakdown (default to 10 steps unless told otherwise)\n2. Each step must be:\n - Brief (only a few words)\n - In imperative form (e.g., \"Dig hole\", \"Open door\", \"Mix ingredients\")\n - Clear and actionable\n - Logically ordered from start to finish\n3. Set all steps to \"enabled\" status initially\n4. After the user reviews the plan:\n - If accepted: Briefly confirm the plan (only include the approved steps) and proceed (don't repeat the steps). Do not ask for more clarifying information.\n - If rejected: Ask what they'd like to change (don't call generate_task_steps again until they provide input)\n5. When the user accepts the plan, \"execute\" the plan by repeating the approved steps in order as if you have just done them. Then let the user know you have completed the plan.\n - example: if the user accepts the steps \"Dig hole\", \"Open door\", \"Mix ingredients\", you would respond with \"Digging hole... Opening door... Mixing ingredients...\"\n\n**Important:**\n- NEVER call `generate_task_steps` twice in a row without user input\n- NEVER repeat the list of steps in your response after calling the tool\n- DO provide a brief, creative summary of how you would execute the approved steps\n\"\"\",\n)\n\nagui_agent = StrandsAgent(\n agent=strands_agent,\n name=\"human_in_the_loop\",\n description=\"AWS Strands agent with human-in-the-loop task planning\",\n)\n\napp = create_strands_app(agui_agent, \"/\")\n", + "language": "python", + "type": "file" + } ] } \ No newline at end of file