Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions apps/dojo/src/agents.ts
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,34 @@ export const agentsIntegrations: AgentIntegrationConfig[] = [
};
},
},
{
id: "microsoft-agent-framework-python",
agents: async () => {
return {
agentic_chat: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/agentic_chat`,
}),
backend_tool_rendering: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/backend_tool_rendering`,
}),
human_in_the_loop: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/human_in_the_loop`,
}),
agentic_generative_ui: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/agentic_generative_ui`,
}),
shared_state: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/shared_state`,
}),
tool_based_generative_ui: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/tool_based_generative_ui`,
}),
predictive_state_updates: new HttpAgent({
url: `${envVars.agentFrameworkPythonUrl}/predictive_state_updates`,
}),
};
},
},
{
id: "a2a",
agents: async () => {
Expand Down
2 changes: 2 additions & 0 deletions apps/dojo/src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ type envVars = {
crewAiUrl: string;
pydanticAIUrl: string;
adkMiddlewareUrl: string;
agentFrameworkPythonUrl: string;
a2aMiddlewareBuildingsManagementUrl: string;
a2aMiddlewareFinanceUrl: string;
a2aMiddlewareItUrl: string;
Expand Down Expand Up @@ -39,6 +40,7 @@ export default function getEnvVars(): envVars {
crewAiUrl: process.env.CREW_AI_URL || 'http://localhost:9002',
pydanticAIUrl: process.env.PYDANTIC_AI_URL || 'http://localhost:9000',
adkMiddlewareUrl: process.env.ADK_MIDDLEWARE_URL || 'http://localhost:8000',
agentFrameworkPythonUrl: process.env.AGENT_FRAMEWORK_PYTHON_URL || 'http://localhost:8888',
springAiUrl: process.env.SPRING_AI_URL || 'http://localhost:8080',
a2aMiddlewareBuildingsManagementUrl: process.env.A2A_MIDDLEWARE_BUILDINGS_MANAGEMENT_URL || 'http://localhost:9001',
a2aMiddlewareFinanceUrl: process.env.A2A_MIDDLEWARE_FINANCE_URL || 'http://localhost:9002',
Expand Down
2 changes: 1 addition & 1 deletion apps/dojo/src/files.json
Original file line number Diff line number Diff line change
Expand Up @@ -1641,7 +1641,7 @@
},
{
"name": "tool_based_generative_ui.py",
"content": "\"\"\"\nAn example demonstrating tool-based generative UI.\n\"\"\"\n\nfrom crewai.flow.flow import Flow, start\nfrom litellm import completion\nfrom ..sdk import copilotkit_stream, CopilotKitState\n\n\n# This tool generates a haiku on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nGENERATE_HAIKU_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"description\": \"Generate a haiku in Japanese and its English translation\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"japanese\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in Japanese\"\n },\n \"english\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in English\"\n },\n \"image_names\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Names of 3 relevant images from the provided list\"\n }\n },\n \"required\": [\"japanese\", \"english\", \"image_names\"]\n }\n }\n}\n\n\nclass ToolBasedGenerativeUIFlow(Flow[CopilotKitState]):\n \"\"\"\n A flow that demonstrates tool-based generative UI.\n \"\"\"\n\n @start()\n async def chat(self):\n \"\"\"\n The main function handling chat and tool calls.\n \"\"\"\n system_prompt = \"You assist the user in generating a haiku. When generating a haiku using the 'generate_haiku' tool, you MUST also select exactly 3 image filenames from the following list that are most relevant to the haiku's content or theme. Return the filenames in the 'image_names' parameter. Dont provide the relavent image names in your final response to the user. \"\n\n\n # 1. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 1.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 1.2 Bind the available tools to the model\n tools=[ GENERATE_HAIKU_TOOL ],\n\n # 1.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n message = response.choices[0].message\n\n # 2. Append the message to the messages in state\n self.state.messages.append(message)\n\n # 3. If there are tool calls, append a tool message to the messages in state\n if message.tool_calls:\n self.state.messages.append(\n {\n \"tool_call_id\": message.tool_calls[0].id,\n \"role\": \"tool\",\n \"content\": \"Haiku generated.\"\n }\n )\n",
"content": "\"\"\"\nAn example demonstrating tool-based generative UI.\n\"\"\"\n\nfrom crewai.flow.flow import Flow, start\nfrom litellm import completion\nfrom ag_ui.core import MessagesSnapshotEvent, EventType\nfrom ..sdk import copilotkit_stream, CopilotKitState\n\n\n# This tool generates a haiku on the server.\n# The tool call will be streamed to the frontend as it is being generated.\nGENERATE_HAIKU_TOOL = {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"generate_haiku\",\n \"description\": \"Generate a haiku in Japanese and its English translation\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"japanese\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in Japanese\"\n },\n \"english\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"An array of three lines of the haiku in English\"\n },\n \"image_names\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\"\n },\n \"description\": \"Names of 3 relevant images from the provided list\"\n }\n },\n \"required\": [\"japanese\", \"english\", \"image_names\"]\n }\n }\n}\n\n\nclass ToolBasedGenerativeUIFlow(Flow[CopilotKitState]):\n \"\"\"\n A flow that demonstrates tool-based generative UI.\n \"\"\"\n\n @start()\n async def chat(self):\n \"\"\"\n The main function handling chat and tool calls.\n \"\"\"\n system_prompt = \"You assist the user in generating a haiku. When generating a haiku using the 'generate_haiku' tool, you MUST also select exactly 3 image filenames from the following list that are most relevant to the haiku's content or theme. Return the filenames in the 'image_names' parameter. Dont provide the relavent image names in your final response to the user. \"\n\n\n # 1. Run the model and stream the response\n # Note: In order to stream the response, wrap the completion call in\n # copilotkit_stream and set stream=True.\n response = await copilotkit_stream(\n completion(\n\n # 1.1 Specify the model to use\n model=\"openai/gpt-4o\",\n messages=[\n {\n \"role\": \"system\", \n \"content\": system_prompt\n },\n *self.state.messages\n ],\n\n # 1.2 Bind the available tools to the model\n tools=[ GENERATE_HAIKU_TOOL ],\n\n # 1.3 Disable parallel tool calls to avoid race conditions,\n # enable this for faster performance if you want to manage\n # the complexity of running tool calls in parallel.\n parallel_tool_calls=False,\n stream=True\n )\n )\n message = response.choices[0].message\n\n # 2. Append the message to the messages in state\n self.state.messages.append(message)\n\n # 3. If there are tool calls, append a tool message to the messages in state\n if message.tool_calls:\n self.state.messages.append(\n {\n \"tool_call_id\": message.tool_calls[0].id,\n \"role\": \"tool\",\n \"content\": \"Haiku generated.\"\n }\n )\n \n # 4. Emit MessagesSnapshotEvent to notify frontend about tool result\n yield MessagesSnapshotEvent(\n type=EventType.MESSAGES_SNAPSHOT,\n messages=self.state.messages\n )\n",
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was auto-generated when I started the Dojo frontend. Should I keep or revert?

"language": "python",
"type": "file"
}
Expand Down
13 changes: 13 additions & 0 deletions apps/dojo/src/menu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,19 @@ export const menuIntegrations: MenuIntegrationConfig[] = [
"tool_based_generative_ui",
],
},
{
id: "microsoft-agent-framework-python",
name: "Microsoft Agent Framework (Python)",
features: [
"agentic_chat",
"backend_tool_rendering",
"human_in_the_loop",
"agentic_generative_ui",
"predictive_state_updates",
"shared_state",
"tool_based_generative_ui",
],
},
{
id: "a2a",
name: "A2A",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Environment variables for Microsoft Agent Framework examples

# Azure OpenAI Configuration
AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/
AZURE_OPENAI_API_KEY=your_azure_openai_api_key_here
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=your_deployment_name

# OpenAI Configuration (alternative to Azure OpenAI)
# OPENAI_API_KEY=your_openai_api_key_here
# OPENAI_CHAT_MODEL_ID=your_chat_model_id_here

# Server Configuration
PORT=8888
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg

# Virtual environments
venv/
ENV/
env/
.venv

# Poetry
poetry.lock

# Environment variables
.env
.env.local

# IDEs
.vscode/
.idea/
*.swp
*.swo
*~

# OS
.DS_Store
Thumbs.db

# Logs
*.log
119 changes: 119 additions & 0 deletions integrations/microsoft-agent-framework/python/examples/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
# Microsoft Agent Framework AG-UI Integration

This directory contains examples for using the Microsoft Agent Framework with the AG-UI protocol in the Dojo application.

## Prerequisites

- Python 3.10 or higher
- [uv](https://docs.astral.sh/uv/) for dependency management
- An OpenAI API key or Azure OpenAI endpoint

## Installation

1. Install dependencies:

```bash
cd integrations/microsoft-agent-framework/python/examples
uv sync
```

2. Create a `.env` file based on `.env.example`:

```bash
cp .env.example .env
```

3. Add your API credentials to `.env`:

```bash
# For OpenAI
OPENAI_API_KEY=your_api_key_here
OPENAI_CHAT_MODEL_ID=your_model_here

# Or for Azure OpenAI
AZURE_OPENAI_ENDPOINT=your_endpoint_here
AZURE_OPENAI_API_KEY=your_api_key_here
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=your_deployment_here
```

## Running the Examples

### 1. Start the Backend Server

In the examples directory, start the Dojo backend server:

```bash
cd integrations/microsoft-agent-framework/python/examples
uv run dev
```

The server will start on `http://localhost:8888` by default.

### 2. Start the Dojo Frontend

In a separate terminal, start the Dojo web application:

```bash
cd apps/dojo
pnpm dev
```

The Dojo frontend will be available at `http://localhost:3000`.

### 3. Connect to Your Agent

1. Open `http://localhost:3000` in your browser
2. Configure the server URL to `http://localhost:8888`
3. Select one "Microsoft Agent Framework (Python)" from the dropdown
4. Start exploring the samples

## Available Endpoints

The server exposes the following example agents demonstrating all 7 AG-UI features:

- `/agentic_chat` - Basic conversational agent with tool calling (Feature 1: Agentic Chat)
- `/backend_tool_rendering` - Agent demonstrating backend tool rendering (Feature 2: Backend Tool Rendering)
- `/human_in_the_loop` - Agent with human-in-the-loop workflows (Feature 3: Human in the Loop)
- `/agentic_generative_ui` - Agent that breaks down tasks into steps with streaming updates (Feature 4: Agentic Generative UI)
- `/tool_based_generative_ui` - Agent that generates custom UI components (Feature 5: Tool-based Generative UI)
- `/shared_state` - Agent with bidirectional state synchronization (Feature 6: Shared State)
- `/predictive_state_updates` - Agent with predictive state updates during tool execution (Feature 7: Predictive State Updates)

## Project Structure

```
examples/
├── agents/
│ ├── agentic_chat/ # Feature 1: Basic chat agent
│ ├── backend_tool_rendering/ # Feature 2: Backend tool rendering
│ ├── human_in_the_loop/ # Feature 3: Human-in-the-loop
│ ├── agentic_generative_ui/ # Feature 4: Streaming state updates
│ ├── tool_based_generative_ui/ # Feature 5: Custom UI components
│ ├── shared_state/ # Feature 6: Bidirectional state sync
│ ├── predictive_state_updates/ # Feature 7: Predictive state updates
│ └── dojo.py # FastAPI application setup
├── pyproject.toml # Dependencies and scripts
├── .env.example # Environment variable template
└── README.md # This file
```

## Development

To add a new example agent:

1. Create a new directory under `agents/`
2. Add an `agent.py` file with your agent implementation
3. Import and register it in `agents/dojo.py`

## Dependencies

This integration uses:

- `agent-framework-ag-ui` - Microsoft Agent Framework AG-UI adapter
- `fastapi` - Web framework for the server
- `uvicorn` - ASGI server
- `python-dotenv` - Environment variable management

## License

MIT
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Agent package."""
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
"""Microsoft Agent Framework Python Dojo Example Server.

This provides a FastAPI application that demonstrates how to use the
Microsoft Agent Framework with the AG-UI protocol. It includes examples for
each of the AG-UI dojo features:
- Agentic Chat
- Human in the Loop
- Backend Tool Rendering
- Agentic Generative UI
- Tool-based Generative UI
- Shared State
- Predictive State Updates

All agent implementations are from the agent-framework-ag-ui package examples.
Reference: https://github.com/microsoft/agent-framework/tree/main/python/packages/ag-ui/examples/agents
"""

import os
import uvicorn
from fastapi import FastAPI
from dotenv import load_dotenv

from agent_framework_ag_ui import add_agent_framework_fastapi_endpoint
from agent_framework_ag_ui_examples.agents import (
simple_agent,
weather_agent,
human_in_the_loop_agent,
task_steps_agent_wrapped,
ui_generator_agent,
recipe_agent,
document_writer_agent,
)

load_dotenv()

app = FastAPI(title="Microsoft Agent Framework Python Dojo")

# Agentic Chat - simple_agent
add_agent_framework_fastapi_endpoint(app, simple_agent, "/agentic_chat")

# Backend Tool Rendering - weather_agent
add_agent_framework_fastapi_endpoint(app, weather_agent, "/backend_tool_rendering")

# Human in the Loop - human_in_the_loop_agent with state configuration
add_agent_framework_fastapi_endpoint(
app,
human_in_the_loop_agent,
"/human_in_the_loop",
)

# Agentic Generative UI - task_steps_agent_wrapped
add_agent_framework_fastapi_endpoint(app, task_steps_agent_wrapped, "/agentic_generative_ui")

# Tool-based Generative UI - ui_generator_agent
add_agent_framework_fastapi_endpoint(app, ui_generator_agent, "/tool_based_generative_ui")

# Shared State - recipe_agent
add_agent_framework_fastapi_endpoint(app, recipe_agent, "/shared_state")

# Predictive State Updates - document_writer_agent
add_agent_framework_fastapi_endpoint(app, document_writer_agent, "/predictive_state_updates")


def main():
"""Main function to start the FastAPI server."""
port = int(os.getenv("PORT", "8888"))
uvicorn.run(app, host="0.0.0.0", port=port)


if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
[project]
name = "microsoft-agent-framework-dojo"
version = "0.1.0"
description = "Microsoft Agent Framework Python examples for AG-UI Dojo"
authors = [{ name = "Microsoft Agent Framework Team" }]
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"agent-framework-ag-ui>=1.0.0b251106.post1",
"agent-framework-core>=1.0.0b251105",
"python-dotenv>=1.0.0",
]

[tool.hatch.metadata]
allow-direct-references = true

[project.scripts]
dev = "agents.dojo:main"

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[tool.hatch.build.targets.wheel]
packages = ["agents"]
Loading