diff --git a/examples/basic/mcp_basic_agent/main.py b/examples/basic/mcp_basic_agent/main.py index c7626733e..2d65bf724 100644 --- a/examples/basic/mcp_basic_agent/main.py +++ b/examples/basic/mcp_basic_agent/main.py @@ -46,12 +46,13 @@ # or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml app = MCPApp(name="mcp_basic_agent") # settings=settings) + @app.tool() -async def example_usage()->str: +async def example_usage() -> str: """ - An example function/tool that uses an agent with access to the fetch and filesystem - mcp servers. The agent will read the contents of mcp_agent.config.yaml, print the - first 2 paragraphs of the mcp homepage, and summarize the paragraphs into a tweet. + An example function/tool that uses an agent with access to the fetch and filesystem + mcp servers. The agent will read the contents of mcp_agent.config.yaml, print the + first 2 paragraphs of the mcp homepage, and summarize the paragraphs into a tweet. The example uses both OpenAI, Anthropic, and simulates a multi-turn conversation. """ async with app.run() as agent_app: @@ -113,6 +114,7 @@ async def example_usage()->str: return result + async def display_token_summary(app_ctx: MCPApp, agent: Agent | None = None): """Display comprehensive token usage summary using app/agent convenience APIs.""" summary: TokenSummary = await app_ctx.get_token_summary() diff --git a/examples/basic/mcp_model_selector/main.py b/examples/basic/mcp_model_selector/main.py index 7c272851c..3289cc847 100644 --- a/examples/basic/mcp_model_selector/main.py +++ b/examples/basic/mcp_model_selector/main.py @@ -10,8 +10,9 @@ app = MCPApp(name="llm_selector") model_selector = ModelSelector() + @app.tool -async def example_usage()->str: +async def example_usage() -> str: """ An example function/tool that demonstrates MCP's ModelPreferences type to select a model based on speed, cost, and intelligence priorities. @@ -31,7 +32,7 @@ async def example_usage()->str: "Smartest OpenAI model:", data={"model_preferences": model_preferences, "model": model}, ) - result+="Smartest OpenAI model: " + model.name + result += "Smartest OpenAI model: " + model.name model_preferences = ModelPreferences( costPriority=0.25, speedPriority=0.25, intelligencePriority=0.5 @@ -44,7 +45,7 @@ async def example_usage()->str: "Most balanced OpenAI model:", data={"model_preferences": model_preferences, "model": model}, ) - result+="\nMost balanced OpenAI model: " + model.name + result += "\nMost balanced OpenAI model: " + model.name model_preferences = ModelPreferences( costPriority=0.3, speedPriority=0.6, intelligencePriority=0.1 @@ -57,7 +58,7 @@ async def example_usage()->str: "Fastest and cheapest OpenAI model:", data={"model_preferences": model_preferences, "model": model}, ) - result+="\nFastest and cheapest OpenAI model: " + model.name + result += "\nFastest and cheapest OpenAI model: " + model.name model_preferences = ModelPreferences( costPriority=0.1, speedPriority=0.1, intelligencePriority=0.8 @@ -70,7 +71,7 @@ async def example_usage()->str: "Smartest Anthropic model:", data={"model_preferences": model_preferences, "model": model}, ) - result+="\nSmartest Anthropic model: " + model.name + result += "\nSmartest Anthropic model: " + model.name model_preferences = ModelPreferences( costPriority=0.8, speedPriority=0.1, intelligencePriority=0.1 @@ -83,7 +84,7 @@ async def example_usage()->str: "Cheapest Anthropic model:", data={"model_preferences": model_preferences, "model": model}, ) - result+="\nCheapest Anthropic model: " + model.name + result += "\nCheapest Anthropic model: " + model.name model_preferences = ModelPreferences( costPriority=0.1, @@ -101,7 +102,7 @@ async def example_usage()->str: "Select fastest model between gpt-4o/mini/sonnet/haiku:", data={"model_preferences": model_preferences, "model": model}, ) - result+="\nSelect fastest model between gpt-4o/mini/sonnet/haiku: " + model.name + result += "\nSelect fastest model between gpt-4o/mini/sonnet/haiku: " + model.name model_preferences = ModelPreferences( costPriority=0.15, @@ -119,7 +120,7 @@ async def example_usage()->str: "Most balanced model between gpt-4o/mini/sonnet/haiku:", data={"model_preferences": model_preferences, "model": model}, ) - result+="\nMost balanced model between gpt-4o/mini/sonnet/haiku: " + model.name + result += "\nMost balanced model between gpt-4o/mini/sonnet/haiku: " + model.name # Examples showcasing new filtering capabilities print("\n[bold cyan]Testing new filtering capabilities:[/bold cyan]") @@ -139,7 +140,7 @@ async def example_usage()->str: "context_window": model.context_window, }, ) - result+="\nBest model with context window >100k tokens: " + model.name + result += "\nBest model with context window >100k tokens: " + model.name # Example 2: Models with tool calling support model_preferences = ModelPreferences( @@ -156,7 +157,7 @@ async def example_usage()->str: "tool_calling": model.tool_calling, }, ) - result+="\nBest model with tool calling support: " + model.name + result += "\nBest model with tool calling support: " + model.name # Example 3: Models with structured outputs (JSON mode) model_preferences = ModelPreferences( @@ -173,7 +174,7 @@ async def example_usage()->str: "structured_outputs": model.structured_outputs, }, ) - result+="\nBest model with structured outputs support: " + model.name + result += "\nBest model with structured outputs support: " + model.name # Example 4: Models with medium context window (50k-150k tokens) and tool calling model_preferences = ModelPreferences( @@ -194,7 +195,9 @@ async def example_usage()->str: "tool_calling": model.tool_calling, }, ) - result+="\nBest model with 50k-150k context window and tool calling: " + model.name + result += ( + "\nBest model with 50k-150k context window and tool calling: " + model.name + ) # Example 5: Fast models with both tool calling and structured outputs model_preferences = ModelPreferences( @@ -213,9 +216,12 @@ async def example_usage()->str: "speed": model.metrics.speed.tokens_per_second, }, ) - result+="\nFastest model with both tool calling and structured outputs: " + model.name + result += ( + "\nFastest model with both tool calling and structured outputs: " + model.name + ) + + return result - return result if __name__ == "__main__": import time diff --git a/src/mcp_agent/cli/cloud/commands/app/workflows/main.py b/src/mcp_agent/cli/cloud/commands/app/workflows/main.py index 456981383..135d82984 100644 --- a/src/mcp_agent/cli/cloud/commands/app/workflows/main.py +++ b/src/mcp_agent/cli/cloud/commands/app/workflows/main.py @@ -5,11 +5,9 @@ import typer from rich.console import Group -from rich.padding import Padding from rich.panel import Panel from rich.prompt import Prompt from rich.syntax import Syntax -from rich.table import Table from rich.text import Text from mcp_agent.cli.auth import load_api_key_credentials @@ -245,16 +243,12 @@ def get_start_time(run: WorkflowRun): reverse=True, ) - table = Table(title="Workflow Runs", show_lines=False, border_style="blue") - table.add_column("Name", style="white", overflow="fold") - table.add_column("Workflow ID", style="bold cyan", no_wrap=True) - table.add_column("Run ID", style="blue", overflow="fold") - table.add_column("Status", overflow="fold") - table.add_column("Start Time", style="magenta", overflow="fold") - table.add_column("End Time", style="yellow", overflow="fold") + console.print(f"\n[bold blue] Workflow Runs ({len(sorted_runs)})[/bold blue]") + + for i, run in enumerate(sorted_runs): + if i > 0: + console.print() - for idx, run in enumerate(sorted_runs): - is_last_row = idx == len(sorted_runs) - 1 start = getattr(run.temporal, "start_time", None) start_str = ( datetime.fromtimestamp(start).strftime("%Y-%m-%d %H:%M:%S") @@ -271,22 +265,31 @@ def get_start_time(run: WorkflowRun): status = run.status.lower() if status == "completed": - status_text = f"[green]{status}[/green]" - elif status == "error": - status_text = f"[red]{status}[/red]" + status_text = f"[green]🟢 {status}[/green]" + elif status == "error" or status == "failed": + status_text = f"[red]🔴 {status}[/red]" + elif status == "running": + status_text = f"[yellow]🔄 {status}[/yellow]" else: - status_text = status - - table.add_row( - run.name or "-", - run.temporal.workflow_id if run.temporal else "N/A", - Padding(run.id, (0, 0, 0 if is_last_row else 1, 0)), - status_text, - start_str, - end_str, + status_text = f"❓ {status}" + + console.print( + f"[bold cyan]{run.name or 'Unnamed Workflow'}[/bold cyan] {status_text}" ) + console.print(f" Run ID: {run.id}") + + if run.temporal and run.temporal.workflow_id: + console.print(f" Workflow ID: {run.temporal.workflow_id}") + + console.print(f" Started: {start_str}") + if end_str != "N/A": + console.print(f" Completed: {end_str}") - console.print(table) + # Show execution time if available + if hasattr(run.temporal, "execution_time") and run.temporal.execution_time: + duration = end - start if (start and end) else None + if duration: + console.print(f" Duration: {duration:.2f}s") except Exception as e: print_error(f"Error fetching workflow runs: {str(e)}") diff --git a/src/mcp_agent/cli/cloud/commands/logger/tail/main.py b/src/mcp_agent/cli/cloud/commands/logger/tail/main.py index 8389946d4..57b3cda09 100644 --- a/src/mcp_agent/cli/cloud/commands/logger/tail/main.py +++ b/src/mcp_agent/cli/cloud/commands/logger/tail/main.py @@ -17,7 +17,10 @@ from mcp_agent.cli.exceptions import CLIError from mcp_agent.cli.auth import load_credentials, UserCredentials -from mcp_agent.cli.cloud.commands.utils import setup_authenticated_client, resolve_server +from mcp_agent.cli.cloud.commands.utils import ( + setup_authenticated_client, + resolve_server, +) from mcp_agent.cli.core.api_client import UnauthenticatedError from mcp_agent.cli.utils.ux import print_error from mcp_agent.cli.mcp_app.api_client import MCPApp, MCPAppConfiguration @@ -135,7 +138,7 @@ def tail_logs( client = setup_authenticated_client() server = resolve_server(client, app_identifier) - + try: if follow: asyncio.run( @@ -183,7 +186,7 @@ async def _fetch_logs( """Fetch logs one-time via HTTP API.""" # Extract app_id and config_id from the server object - if hasattr(server, 'appId'): # MCPApp + if hasattr(server, "appId"): # MCPApp app_id = server.appId config_id = None else: # MCPAppConfiguration @@ -264,7 +267,7 @@ async def _stream_logs( # Get server URL directly from the server object if not server.appServerInfo or not server.appServerInfo.serverUrl: raise CLIError("Server URL not available - server may not be deployed") - + server_url = server.appServerInfo.serverUrl parsed = urlparse(server_url) diff --git a/src/mcp_agent/cli/mcp_app/mcp_client.py b/src/mcp_agent/cli/mcp_app/mcp_client.py index aacb4b35c..883fcab1b 100644 --- a/src/mcp_agent/cli/mcp_app/mcp_client.py +++ b/src/mcp_agent/cli/mcp_app/mcp_client.py @@ -1,3 +1,4 @@ +import ast import asyncio import json from contextlib import asynccontextmanager @@ -62,10 +63,13 @@ class WorkflowRunState(BaseModel): class WorkflowRunResult(BaseModel): """The result of a workflow run.""" + kind: str + """The kind/type of result returned by the workflow run.""" + value: str """The value returned by the workflow run, if any.""" - metadata: dict + metadata: Optional[dict[str, Any]] = None """Metadata associated with the workflow run result.""" start_time: Optional[float] = None @@ -99,6 +103,9 @@ class WorkflowRunTemporal(BaseModel): close_time: Optional[float] = None """The time when the workflow run completed.""" + execution_time: Optional[float] = None + """The total time taken for the workflow run.""" + class WorkflowRun(BaseModel): """An execution instance of a workflow definition.""" @@ -187,9 +194,20 @@ async def list_workflow_runs(self) -> ListWorkflowRunsResult: # Assuming the content is a JSON string representing a WorkflowRun item dict try: run_data = json.loads(item.text) + if "result" in run_data and isinstance(run_data["result"], str): + try: + # Could be stringified python dict instead of valid JSON + run_data["result"] = ast.literal_eval(run_data["result"]) + except (ValueError, SyntaxError) as e: + try: + run_data["result"] = json.loads(run_data["result"]) + except json.JSONDecodeError: + raise ValueError( + f"Invalid workflow run result data: {e}" + ) from e runs.append(WorkflowRun(**run_data)) except json.JSONDecodeError as e: - raise ValueError(f"Invalid workflow run data: {e}") + raise ValueError(f"Invalid workflow run data: {e}") from e return ListWorkflowRunsResult(workflow_runs=runs)