Skip to content

Commit acd9347

Browse files
Preparing for MCP-Agent Cloud
- rename file to main.py - remove description from FastMCP
1 parent 130672d commit acd9347

File tree

1 file changed

+1
-118
lines changed
  • examples/mcp_agent_server/asyncio

1 file changed

+1
-118
lines changed

examples/mcp_agent_server/asyncio/basic_agent_server.py renamed to examples/mcp_agent_server/asyncio/main.py

Lines changed: 1 addition & 118 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929
# Note: This is purely optional:
3030
# if not provided, a default FastMCP server will be created by MCPApp using create_mcp_server_for_app()
31-
mcp = FastMCP(name="basic_agent_server", description="My basic agent server example.")
31+
mcp = FastMCP(name="basic_agent_server")
3232

3333
# Define the MCPApp instance. The server created for this app will advertise the
3434
# MCP logging capability and forward structured logs upstream to connected clients.
@@ -250,123 +250,6 @@ async def grade_story_async(story: str, app_ctx: Optional[AppContext] = None) ->
250250
return result
251251

252252

253-
# Add custom tool to get token usage for a workflow
254-
@mcp.tool(
255-
name="get_token_usage",
256-
structured_output=True,
257-
description="""
258-
Get detailed token usage information for a specific workflow run.
259-
260-
This provides a comprehensive breakdown of token usage including:
261-
- Total tokens used across all LLM calls within the workflow
262-
- Breakdown by model provider and specific models
263-
- Hierarchical usage tree showing usage at each level (workflow -> agent -> llm)
264-
- Total cost estimate based on model pricing
265-
266-
Args:
267-
workflow_id: Optional workflow ID (if multiple workflows have the same name)
268-
run_id: Optional ID of the workflow run to get token usage for
269-
workflow_name: Optional name of the workflow (used as fallback)
270-
271-
Returns:
272-
Detailed token usage information for the specific workflow run
273-
""",
274-
)
275-
async def get_workflow_token_usage(
276-
workflow_id: str | None = None,
277-
run_id: str | None = None,
278-
workflow_name: str | None = None,
279-
) -> Dict[str, Any]:
280-
"""Get token usage information for a specific workflow run."""
281-
context = app.context
282-
283-
if not context.token_counter:
284-
return {
285-
"error": "Token counter not available",
286-
"message": "Token tracking is not enabled for this application",
287-
}
288-
289-
# Find the specific workflow node
290-
workflow_node = await context.token_counter.get_workflow_node(
291-
name=workflow_name, workflow_id=workflow_id, run_id=run_id
292-
)
293-
294-
if not workflow_node:
295-
return {
296-
"error": "Workflow not found",
297-
"message": f"Could not find workflow with run_id='{run_id}'",
298-
}
299-
300-
# Get the aggregated usage for this workflow
301-
workflow_usage = workflow_node.aggregate_usage()
302-
303-
# Calculate cost for this workflow
304-
workflow_cost = context.token_counter._calculate_node_cost(workflow_node)
305-
306-
# Build the response
307-
result = {
308-
"workflow": {
309-
"name": workflow_node.name,
310-
"run_id": workflow_node.metadata.get("run_id"),
311-
"workflow_id": workflow_node.metadata.get("workflow_id"),
312-
},
313-
"usage": {
314-
"input_tokens": workflow_usage.input_tokens,
315-
"output_tokens": workflow_usage.output_tokens,
316-
"total_tokens": workflow_usage.total_tokens,
317-
},
318-
"cost": round(workflow_cost, 4),
319-
"model_breakdown": {},
320-
"usage_tree": workflow_node.to_dict(),
321-
}
322-
323-
# Get model breakdown for this workflow
324-
model_usage = {}
325-
326-
def collect_model_usage(node: TokenNode):
327-
"""Recursively collect model usage from a node tree"""
328-
if node.usage.model_name:
329-
model_name = node.usage.model_name
330-
provider = node.usage.model_info.provider if node.usage.model_info else None
331-
332-
# Use tuple as key to handle same model from different providers
333-
model_key = (model_name, provider)
334-
335-
if model_key not in model_usage:
336-
model_usage[model_key] = {
337-
"model_name": model_name,
338-
"provider": provider,
339-
"input_tokens": 0,
340-
"output_tokens": 0,
341-
"total_tokens": 0,
342-
}
343-
344-
model_usage[model_key]["input_tokens"] += node.usage.input_tokens
345-
model_usage[model_key]["output_tokens"] += node.usage.output_tokens
346-
model_usage[model_key]["total_tokens"] += node.usage.total_tokens
347-
348-
for child in node.children:
349-
collect_model_usage(child)
350-
351-
collect_model_usage(workflow_node)
352-
353-
# Calculate costs for each model and format for output
354-
for (model_name, provider), usage in model_usage.items():
355-
cost = context.token_counter.calculate_cost(
356-
model_name, usage["input_tokens"], usage["output_tokens"], provider
357-
)
358-
359-
# Create display key with provider info if available
360-
display_key = f"{model_name} ({provider})" if provider else model_name
361-
362-
result["model_breakdown"][display_key] = {
363-
**usage,
364-
"cost": round(cost, 4),
365-
}
366-
367-
return result
368-
369-
370253
async def main():
371254
parser = argparse.ArgumentParser()
372255
parser.add_argument(

0 commit comments

Comments
 (0)