Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions examples/basic/mcp_basic_agent/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,12 +46,13 @@
# or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml
app = MCPApp(name="mcp_basic_agent") # settings=settings)


@app.tool()
async def example_usage()->str:
async def example_usage() -> str:
"""
An example function/tool that uses an agent with access to the fetch and filesystem
mcp servers. The agent will read the contents of mcp_agent.config.yaml, print the
first 2 paragraphs of the mcp homepage, and summarize the paragraphs into a tweet.
An example function/tool that uses an agent with access to the fetch and filesystem
mcp servers. The agent will read the contents of mcp_agent.config.yaml, print the
first 2 paragraphs of the mcp homepage, and summarize the paragraphs into a tweet.
The example uses both OpenAI, Anthropic, and simulates a multi-turn conversation.
"""
async with app.run() as agent_app:
Expand Down Expand Up @@ -113,6 +114,7 @@ async def example_usage()->str:

return result


async def display_token_summary(app_ctx: MCPApp, agent: Agent | None = None):
"""Display comprehensive token usage summary using app/agent convenience APIs."""
summary: TokenSummary = await app_ctx.get_token_summary()
Expand Down
34 changes: 20 additions & 14 deletions examples/basic/mcp_model_selector/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,9 @@
app = MCPApp(name="llm_selector")
model_selector = ModelSelector()


@app.tool
async def example_usage()->str:
async def example_usage() -> str:
"""
An example function/tool that demonstrates MCP's ModelPreferences type
to select a model based on speed, cost, and intelligence priorities.
Expand All @@ -31,7 +32,7 @@ async def example_usage()->str:
"Smartest OpenAI model:",
data={"model_preferences": model_preferences, "model": model},
)
result+="Smartest OpenAI model: " + model.name
result += "Smartest OpenAI model: " + model.name

model_preferences = ModelPreferences(
costPriority=0.25, speedPriority=0.25, intelligencePriority=0.5
Expand All @@ -44,7 +45,7 @@ async def example_usage()->str:
"Most balanced OpenAI model:",
data={"model_preferences": model_preferences, "model": model},
)
result+="\nMost balanced OpenAI model: " + model.name
result += "\nMost balanced OpenAI model: " + model.name

model_preferences = ModelPreferences(
costPriority=0.3, speedPriority=0.6, intelligencePriority=0.1
Expand All @@ -57,7 +58,7 @@ async def example_usage()->str:
"Fastest and cheapest OpenAI model:",
data={"model_preferences": model_preferences, "model": model},
)
result+="\nFastest and cheapest OpenAI model: " + model.name
result += "\nFastest and cheapest OpenAI model: " + model.name

model_preferences = ModelPreferences(
costPriority=0.1, speedPriority=0.1, intelligencePriority=0.8
Expand All @@ -70,7 +71,7 @@ async def example_usage()->str:
"Smartest Anthropic model:",
data={"model_preferences": model_preferences, "model": model},
)
result+="\nSmartest Anthropic model: " + model.name
result += "\nSmartest Anthropic model: " + model.name

model_preferences = ModelPreferences(
costPriority=0.8, speedPriority=0.1, intelligencePriority=0.1
Expand All @@ -83,7 +84,7 @@ async def example_usage()->str:
"Cheapest Anthropic model:",
data={"model_preferences": model_preferences, "model": model},
)
result+="\nCheapest Anthropic model: " + model.name
result += "\nCheapest Anthropic model: " + model.name

model_preferences = ModelPreferences(
costPriority=0.1,
Expand All @@ -101,7 +102,7 @@ async def example_usage()->str:
"Select fastest model between gpt-4o/mini/sonnet/haiku:",
data={"model_preferences": model_preferences, "model": model},
)
result+="\nSelect fastest model between gpt-4o/mini/sonnet/haiku: " + model.name
result += "\nSelect fastest model between gpt-4o/mini/sonnet/haiku: " + model.name

model_preferences = ModelPreferences(
costPriority=0.15,
Expand All @@ -119,7 +120,7 @@ async def example_usage()->str:
"Most balanced model between gpt-4o/mini/sonnet/haiku:",
data={"model_preferences": model_preferences, "model": model},
)
result+="\nMost balanced model between gpt-4o/mini/sonnet/haiku: " + model.name
result += "\nMost balanced model between gpt-4o/mini/sonnet/haiku: " + model.name

# Examples showcasing new filtering capabilities
print("\n[bold cyan]Testing new filtering capabilities:[/bold cyan]")
Expand All @@ -139,7 +140,7 @@ async def example_usage()->str:
"context_window": model.context_window,
},
)
result+="\nBest model with context window >100k tokens: " + model.name
result += "\nBest model with context window >100k tokens: " + model.name

# Example 2: Models with tool calling support
model_preferences = ModelPreferences(
Expand All @@ -156,7 +157,7 @@ async def example_usage()->str:
"tool_calling": model.tool_calling,
},
)
result+="\nBest model with tool calling support: " + model.name
result += "\nBest model with tool calling support: " + model.name

# Example 3: Models with structured outputs (JSON mode)
model_preferences = ModelPreferences(
Expand All @@ -173,7 +174,7 @@ async def example_usage()->str:
"structured_outputs": model.structured_outputs,
},
)
result+="\nBest model with structured outputs support: " + model.name
result += "\nBest model with structured outputs support: " + model.name

# Example 4: Models with medium context window (50k-150k tokens) and tool calling
model_preferences = ModelPreferences(
Expand All @@ -194,7 +195,9 @@ async def example_usage()->str:
"tool_calling": model.tool_calling,
},
)
result+="\nBest model with 50k-150k context window and tool calling: " + model.name
result += (
"\nBest model with 50k-150k context window and tool calling: " + model.name
)

# Example 5: Fast models with both tool calling and structured outputs
model_preferences = ModelPreferences(
Expand All @@ -213,9 +216,12 @@ async def example_usage()->str:
"speed": model.metrics.speed.tokens_per_second,
},
)
result+="\nFastest model with both tool calling and structured outputs: " + model.name
result += (
"\nFastest model with both tool calling and structured outputs: " + model.name
)

return result

return result

if __name__ == "__main__":
import time
Expand Down
51 changes: 27 additions & 24 deletions src/mcp_agent/cli/cloud/commands/app/workflows/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,9 @@

import typer
from rich.console import Group
from rich.padding import Padding
from rich.panel import Panel
from rich.prompt import Prompt
from rich.syntax import Syntax
from rich.table import Table
from rich.text import Text

from mcp_agent.cli.auth import load_api_key_credentials
Expand Down Expand Up @@ -245,16 +243,12 @@ def get_start_time(run: WorkflowRun):
reverse=True,
)

table = Table(title="Workflow Runs", show_lines=False, border_style="blue")
table.add_column("Name", style="white", overflow="fold")
table.add_column("Workflow ID", style="bold cyan", no_wrap=True)
table.add_column("Run ID", style="blue", overflow="fold")
table.add_column("Status", overflow="fold")
table.add_column("Start Time", style="magenta", overflow="fold")
table.add_column("End Time", style="yellow", overflow="fold")
console.print(f"\n[bold blue] Workflow Runs ({len(sorted_runs)})[/bold blue]")

for i, run in enumerate(sorted_runs):
if i > 0:
console.print()

for idx, run in enumerate(sorted_runs):
is_last_row = idx == len(sorted_runs) - 1
start = getattr(run.temporal, "start_time", None)
start_str = (
datetime.fromtimestamp(start).strftime("%Y-%m-%d %H:%M:%S")
Expand All @@ -271,22 +265,31 @@ def get_start_time(run: WorkflowRun):

status = run.status.lower()
if status == "completed":
status_text = f"[green]{status}[/green]"
elif status == "error":
status_text = f"[red]{status}[/red]"
status_text = f"[green]🟢 {status}[/green]"
elif status == "error" or status == "failed":
status_text = f"[red]🔴 {status}[/red]"
elif status == "running":
status_text = f"[yellow]🔄 {status}[/yellow]"
else:
status_text = status

table.add_row(
run.name or "-",
run.temporal.workflow_id if run.temporal else "N/A",
Padding(run.id, (0, 0, 0 if is_last_row else 1, 0)),
status_text,
start_str,
end_str,
status_text = f"❓ {status}"

console.print(
f"[bold cyan]{run.name or 'Unnamed Workflow'}[/bold cyan] {status_text}"
)
console.print(f" Run ID: {run.id}")

if run.temporal and run.temporal.workflow_id:
console.print(f" Workflow ID: {run.temporal.workflow_id}")

console.print(f" Started: {start_str}")
if end_str != "N/A":
console.print(f" Completed: {end_str}")

console.print(table)
# Show execution time if available
if hasattr(run.temporal, "execution_time") and run.temporal.execution_time:
duration = end - start if (start and end) else None
if duration:
console.print(f" Duration: {duration:.2f}s")

except Exception as e:
print_error(f"Error fetching workflow runs: {str(e)}")
11 changes: 7 additions & 4 deletions src/mcp_agent/cli/cloud/commands/logger/tail/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,10 @@

from mcp_agent.cli.exceptions import CLIError
from mcp_agent.cli.auth import load_credentials, UserCredentials
from mcp_agent.cli.cloud.commands.utils import setup_authenticated_client, resolve_server
from mcp_agent.cli.cloud.commands.utils import (
setup_authenticated_client,
resolve_server,
)
from mcp_agent.cli.core.api_client import UnauthenticatedError
from mcp_agent.cli.utils.ux import print_error
from mcp_agent.cli.mcp_app.api_client import MCPApp, MCPAppConfiguration
Expand Down Expand Up @@ -135,7 +138,7 @@ def tail_logs(

client = setup_authenticated_client()
server = resolve_server(client, app_identifier)

try:
if follow:
asyncio.run(
Expand Down Expand Up @@ -183,7 +186,7 @@ async def _fetch_logs(
"""Fetch logs one-time via HTTP API."""

# Extract app_id and config_id from the server object
if hasattr(server, 'appId'): # MCPApp
if hasattr(server, "appId"): # MCPApp
app_id = server.appId
config_id = None
else: # MCPAppConfiguration
Expand Down Expand Up @@ -264,7 +267,7 @@ async def _stream_logs(
# Get server URL directly from the server object
if not server.appServerInfo or not server.appServerInfo.serverUrl:
raise CLIError("Server URL not available - server may not be deployed")

server_url = server.appServerInfo.serverUrl

parsed = urlparse(server_url)
Expand Down
22 changes: 20 additions & 2 deletions src/mcp_agent/cli/mcp_app/mcp_client.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import ast
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is this used for?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the ast.literal_eval deserialization (stringified python case) below

import asyncio
import json
from contextlib import asynccontextmanager
Expand Down Expand Up @@ -62,10 +63,13 @@ class WorkflowRunState(BaseModel):
class WorkflowRunResult(BaseModel):
"""The result of a workflow run."""

kind: str
"""The kind/type of result returned by the workflow run."""

value: str
"""The value returned by the workflow run, if any."""

metadata: dict
metadata: Optional[dict[str, Any]] = None
"""Metadata associated with the workflow run result."""

start_time: Optional[float] = None
Expand Down Expand Up @@ -99,6 +103,9 @@ class WorkflowRunTemporal(BaseModel):
close_time: Optional[float] = None
"""The time when the workflow run completed."""

execution_time: Optional[float] = None
"""The total time taken for the workflow run."""


class WorkflowRun(BaseModel):
"""An execution instance of a workflow definition."""
Expand Down Expand Up @@ -187,9 +194,20 @@ async def list_workflow_runs(self) -> ListWorkflowRunsResult:
# Assuming the content is a JSON string representing a WorkflowRun item dict
try:
run_data = json.loads(item.text)
if "result" in run_data and isinstance(run_data["result"], str):
try:
# Could be stringified python dict instead of valid JSON
run_data["result"] = ast.literal_eval(run_data["result"])
except (ValueError, SyntaxError) as e:
try:
run_data["result"] = json.loads(run_data["result"])
except json.JSONDecodeError:
raise ValueError(
f"Invalid workflow run result data: {e}"
) from e
Comment on lines +197 to +207
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Fix deserialization + normalize result shape for Pydantic.
Great fallback to literal_eval→json. Add a final normalization so result is always a dict with kind/value to avoid ValidationError when servers return plain strings.

                     run_data = json.loads(item.text)
                     if "result" in run_data and isinstance(run_data["result"], str):
                         try:
                             # Could be stringified python dict instead of valid JSON
                             run_data["result"] = ast.literal_eval(run_data["result"])
                         except (ValueError, SyntaxError) as e:
                             try:
                                 run_data["result"] = json.loads(run_data["result"])
                             except json.JSONDecodeError:
                                 raise ValueError(
                                     f"Invalid workflow run result data: {e}"
                                 ) from e
+                    # Normalize result for model compatibility
+                    if "result" in run_data:
+                        res = run_data["result"]
+                        if isinstance(res, str):
+                            run_data["result"] = {"kind": "text", "value": res}
+                        elif isinstance(res, dict):
+                            run_data["result"].setdefault("kind", "text")
@@
-                except json.JSONDecodeError as e:
-                    raise ValueError(f"Invalid workflow run data: {e}") from e
+                except json.JSONDecodeError as e:
+                    raise ValueError(f"Invalid workflow run data: {e}") from e

Also applies to: 210-210

runs.append(WorkflowRun(**run_data))
except json.JSONDecodeError as e:
raise ValueError(f"Invalid workflow run data: {e}")
raise ValueError(f"Invalid workflow run data: {e}") from e

return ListWorkflowRunsResult(workflow_runs=runs)

Expand Down
Loading