Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions ag2-agents/payment-approval/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
OPENAI_API_KEY=your-openai-api-key-here

# Optional
LLM_MODEL=gpt-4o-mini
OPENAI_BASE_URL=https://api.openai.com/v1
26 changes: 26 additions & 0 deletions ag2-agents/payment-approval/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# AG2 Two-Agent Payment Approval

Demonstrates AG2's `human_input_mode="ALWAYS"` pattern as an approval gate
before triggering a Skyfire payment — the first example in this repo requiring
explicit user confirmation before a financial action.

Two agents collaborate:
- **researcher** — investigates the recipient and produces a risk assessment
- **payment_executor** — presents the assessment, pauses for human confirmation,
then executes or aborts based on the response

## Key AG2 Features

- **`human_input_mode="ALWAYS"`** — executor pauses before every response; human
types "yes" to proceed or "no" to abort — no custom routing logic needed
- **Two-agent `initiate_chat`** — researcher hands off to executor via the
natural conversation flow; the shared message history carries the assessment

## Quick Start

```bash
cd ag2-agents/payment-approval
pip install -r requirements.txt
cp .env.example .env
python main.py
```
70 changes: 70 additions & 0 deletions ag2-agents/payment-approval/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
"""
AG2 two-agent payment approval with human-in-the-loop gate.

researcher — investigates the recipient and produces a risk assessment
executor — presents the assessment and waits for explicit human confirmation
before executing the Skyfire payment

human_input_mode="ALWAYS" on executor is the approval gate: the agent
pauses before every response so the human can type "yes" to proceed or
"no" to abort. No custom routing logic required.
"""
import os
from dotenv import load_dotenv
from autogen import ConversableAgent, LLMConfig

load_dotenv()

llm_config = LLMConfig(
{
"model": os.getenv("LLM_MODEL", "gpt-4o-mini"),
"api_key": os.environ["OPENAI_API_KEY"],
"base_url": os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1"),
},
temperature=0.2,
cache_seed=None,
)

researcher = ConversableAgent(
name="researcher",
system_message=(
"You are a payment risk analyst. Investigate the payment recipient using available "
"tools: check their Fetch.ai address history, reputation, and any known flags. "
"Produce a concise risk assessment with a clear recommendation (proceed / do not proceed). "
"End your assessment with ASSESSMENT COMPLETE."
),
llm_config=llm_config,
is_termination_msg=lambda m: "ASSESSMENT COMPLETE" in (m.get("content") or ""),
)

executor = ConversableAgent(
name="payment_executor",
system_message=(
"You handle payment execution. Present the researcher's risk assessment clearly, "
"state the exact payment details (recipient, amount, reason), then ask the human "
"to confirm. If the human approves, call the skyfire_send tool to execute the payment. "
"If the human declines, acknowledge and terminate. End with TERMINATE."
),
llm_config=llm_config,
human_input_mode="ALWAYS", # pauses before every response — the human types yes/no
is_termination_msg=lambda m: "TERMINATE" in (m.get("content") or ""),
)


def run_payment_approval(recipient: str, amount: float, reason: str) -> None:
researcher.initiate_chat(
executor,
message=(
f"Payment request: {amount} USDC to {recipient} — reason: '{reason}'. "
f"Investigate the recipient and produce a risk assessment."
),
max_turns=6,
)


if __name__ == "__main__":
run_payment_approval(
recipient="alice.fetch",
amount=50.0,
reason="research report delivery",
)
2 changes: 2 additions & 0 deletions ag2-agents/payment-approval/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
ag2[openai]>=0.11.0
python-dotenv>=1.0.0
9 changes: 9 additions & 0 deletions ag2-agents/payment-approval/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
"""
Ensure payment-approval/ is on sys.path so 'import main' works in tests.
"""
import sys
import os

parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if parent not in sys.path:
sys.path.insert(0, parent)
23 changes: 23 additions & 0 deletions ag2-agents/payment-approval/tests/test_payment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import os
os.environ.setdefault("OPENAI_API_KEY", "test-key")

def test_agents_instantiate():
"""Verify agent setup without initiating a chat."""
import main as m
assert m.researcher.name == "researcher"
assert m.executor.name == "payment_executor"

def test_executor_human_input_mode():
"""Executor must have human_input_mode=ALWAYS — the approval gate."""
import main as m
assert m.executor.human_input_mode == "ALWAYS"

def test_researcher_termination_condition():
import main as m
assert m.researcher._is_termination_msg({"content": "Risk: low. ASSESSMENT COMPLETE"}) is True
assert m.researcher._is_termination_msg({"content": "Still investigating..."}) is False

def test_executor_termination_condition():
import main as m
assert m.executor._is_termination_msg({"content": "Payment aborted. TERMINATE"}) is True
assert m.executor._is_termination_msg({"content": "Please confirm."}) is False
12 changes: 12 additions & 0 deletions ag2-agents/research-synthesis-team/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
OPENAI_API_KEY=your-openai-api-key-here
AGENTVERSE_API_KEY=your-agentverse-api-key-here

# Optional
LLM_MODEL=gpt-4o-mini
OPENAI_BASE_URL=https://api.openai.com/v1
AGENT_PORT=8008
AGENTVERSE_URL=https://agentverse.ai

# Leave empty to use DuckDuckGo search (default, no API key needed).
# Set to a Fetch.ai MCP gateway URL to use MCP tools instead.
MCP_SERVER_URL=
14 changes: 14 additions & 0 deletions ag2-agents/research-synthesis-team/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
FROM python:3.11-slim

WORKDIR /app

COPY . .

RUN apt-get update && apt-get install -y gcc \
&& pip install --no-cache-dir --upgrade pip \
&& pip install --no-cache-dir -r requirements.txt \
&& apt-get clean && rm -rf /var/lib/apt/lists/*

ENV PYTHONUNBUFFERED=1
EXPOSE 8008
CMD ["python", "main.py"]
44 changes: 44 additions & 0 deletions ag2-agents/research-synthesis-team/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# AG2 Research Synthesis Team

![ag2](https://img.shields.io/badge/ag2-00ADD8) ![uagents](https://img.shields.io/badge/uagents-4A90E2) ![a2a](https://img.shields.io/badge/a2a-000000) ![innovationlab](https://img.shields.io/badge/innovationlab-3D8BD3)

A multi-agent research pipeline using [AG2](https://github.com/ag2ai/ag2) (formerly AutoGen)
integrated with the Fetch.ai uAgents ecosystem via the A2A protocol.

## Architecture

Four specialists collaborate under GroupChat with LLM-driven speaker selection, wrapped as
an A2A executor and exposed as a discoverable agent on Agentverse.

```
User / ASI:One / other uAgent
SingleA2AAdapter (port 8008) → Agentverse
AG2ResearchExecutor (A2A AgentExecutor)
GroupChat (AG2)
├── web_researcher — DuckDuckGo search, gathers sources
├── financial_analyst — market data, metrics, trends
├── tech_analyst — technical feasibility, risks
└── synthesizer — final structured report
```

## Quick Start

```bash
pip install -r requirements.txt
cp .env.example .env # add OPENAI_API_KEY and AGENTVERSE_API_KEY
python main.py
```

No additional API keys needed for search — DuckDuckGo is used by default.

To use a Fetch.ai MCP gateway instead, set `MCP_SERVER_URL` in `.env`.

## AG2 Features Demonstrated

- **`GroupChat` with `speaker_selection_method="auto"`** — LLM-driven dynamic speaker selection
- **`DuckDuckGoSearchTool`** — built-in web search, no API key required
- **Native MCP client** — optional override via `MCP_SERVER_URL` for richer tool access
- **`A2A AgentExecutor`** — same integration pattern used by other examples in this repo
47 changes: 47 additions & 0 deletions ag2-agents/research-synthesis-team/agent_executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
"""
Wraps the AG2 GroupChat workflow as an A2A AgentExecutor
for use with SingleA2AAdapter (Pattern B).
"""
from a2a.server.agent_execution import AgentExecutor, RequestContext
from a2a.server.events import EventQueue
from a2a.types import Part, TextPart
from a2a.utils import new_agent_text_message
from autogen import LLMConfig
from typing_extensions import override

from workflow import run_research


class AG2ResearchExecutor(AgentExecutor):
"""A2A-compatible executor wrapping the AG2 GroupChat workflow."""

def __init__(self, llm_config: LLMConfig, mcp_url: str | None = None):
self.llm_config = llm_config
self.mcp_url = mcp_url

@override
async def execute(self, context: RequestContext, event_queue: EventQueue) -> None:
message_content = ""
for part in context.message.parts:
if isinstance(part, Part):
if isinstance(part.root, TextPart):
message_content = part.root.text
break

if not message_content:
await event_queue.enqueue_event(
new_agent_text_message("Error: No message content received.")
)
return

try:
result = await run_research(message_content, self.llm_config, self.mcp_url)
await event_queue.enqueue_event(new_agent_text_message(result))
except Exception as e:
await event_queue.enqueue_event(
new_agent_text_message(f"Research failed: {e}")
)

@override
async def cancel(self, context: RequestContext, event_queue: EventQueue) -> None:
raise Exception("Cancel not supported for this agent executor.")
84 changes: 84 additions & 0 deletions ag2-agents/research-synthesis-team/agents.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
"""
AG2 (formerly AutoGen) research synthesis team.
Four specialists collaborate under GroupChat with LLM-driven speaker selection.
"""
from autogen import AssistantAgent, LLMConfig


def build_agents(llm_config: LLMConfig) -> list[AssistantAgent]:
web_researcher = AssistantAgent(
name="web_researcher",
system_message=(
"You are a web research specialist with access to search tools.\n\n"
"WORKFLOW:\n"
"1. Use the search tool to find relevant sources on the assigned topic.\n"
"2. Run at least 2-3 different search queries to cover the topic broadly.\n"
"3. Compile your findings into a structured response.\n\n"
"OUTPUT FORMAT (mandatory):\n"
"- Provide a markdown table of sources found:\n"
" | # | Title | URL | Key Finding |\n"
"- Minimum 5 sources with direct links.\n"
"- Below the table, write a 200-word summary of the most important findings.\n"
"- Flag any conflicting information between sources."
),
llm_config=llm_config,
)
financial_analyst = AssistantAgent(
name="financial_analyst",
system_message=(
"You are a financial analyst. Analyse market data, trends, and economic "
"implications of the research topic using the sources provided by the "
"web researcher.\n\n"
"OUTPUT FORMAT (mandatory):\n"
"- Provide a financial summary table:\n"
" | Metric | Value | Source | Trend |\n"
"- Include: market size, growth rate, key players, funding, and revenue "
" figures where available. Use 'N/A' when data is unavailable.\n"
"- Write a 150-word analysis of financial risks and opportunities.\n"
"- Be quantitative — use numbers, percentages, and dollar amounts."
),
llm_config=llm_config,
)
tech_analyst = AssistantAgent(
name="tech_analyst",
system_message=(
"You are a technology analyst. Evaluate the technical landscape of the "
"research topic using the sources provided by the web researcher.\n\n"
"OUTPUT FORMAT (mandatory):\n"
"- Provide a technology assessment table:\n"
" | Technology/Component | Maturity | Adoption | Risk Level | Notes |\n"
"- Maturity values: Emerging, Growing, Mature, Declining.\n"
"- Risk Level: Low, Medium, High.\n"
"- Write a 150-word analysis covering: technical feasibility, innovation "
" potential, and key technical challenges.\n"
"- Identify the top 3 technical risks with mitigation strategies."
),
llm_config=llm_config,
)
synthesizer = AssistantAgent(
name="synthesizer",
system_message=(
"You are a synthesis expert. Once all specialists have contributed, "
"produce a final structured report combining all perspectives.\n\n"
"MANDATORY REPORT STRUCTURE:\n"
"## Executive Summary\n"
"3-5 bullet points covering the most important findings.\n\n"
"## Research Findings\n"
"Key sources and discoveries from the web researcher.\n\n"
"## Financial Analysis\n"
"Market data, financial metrics, and economic outlook.\n\n"
"## Technical Analysis\n"
"Technology landscape, maturity assessment, and risks.\n\n"
"## Conclusions & Recommendations\n"
"3-5 actionable recommendations ranked by priority.\n\n"
"## Sources\n"
"Consolidated list of all sources cited, as [Title](URL).\n\n"
"RULES:\n"
"- Do NOT repeat raw data from specialists — synthesize and add insight.\n"
"- Total report length: 500-800 words.\n"
"- End your response with TERMINATE."
),
llm_config=llm_config,
is_termination_msg=lambda m: "TERMINATE" in (m.get("content") or ""),
)
return [web_researcher, financial_analyst, tech_analyst, synthesizer]
43 changes: 43 additions & 0 deletions ag2-agents/research-synthesis-team/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
"""
Fetch.ai uAgent exposing the AG2 research team via A2A protocol (Pattern B).
Discoverable on Agentverse; callable from ASI:One or other uAgents.
"""
import os
from dotenv import load_dotenv
from uagents_adapter import SingleA2AAdapter
from autogen import LLMConfig

from agent_executor import AG2ResearchExecutor

load_dotenv()

llm_config = LLMConfig(
{
"model": os.getenv("LLM_MODEL", "gpt-4o-mini"),
"api_key": os.getenv("OPENAI_API_KEY", ""),
"base_url": os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1"),
},
temperature=0.3,
cache_seed=None,
)

executor = AG2ResearchExecutor(
llm_config=llm_config,
mcp_url=os.getenv("MCP_SERVER_URL"), # optional: Fetch.ai MCP gateway
)

adapter = SingleA2AAdapter(
agent_executor=executor,
name="AG2 Research Synthesis Team",
description=(
"Multi-agent research team using AG2 (formerly AutoGen). "
"Four specialists (web researcher, financial analyst, tech analyst, synthesizer) "
"collaborate to produce comprehensive research reports on any topic."
),
port=int(os.getenv("AGENT_PORT", "8008")),
agentverse_url=os.getenv("AGENTVERSE_URL", "https://agentverse.ai"),
mailbox_api_key=os.getenv("AGENTVERSE_API_KEY", ""),
)

if __name__ == "__main__":
adapter.run()
6 changes: 6 additions & 0 deletions ag2-agents/research-synthesis-team/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
ag2[openai,mcp,duckduckgo]>=0.11.0
a2a-sdk
mcp>=1.0.0
uagents>=0.20.0
uagents-adapter>=0.4.0
python-dotenv>=1.0.0
Loading