diff --git a/examples/workflows/workflow_intent_classifier/README.md b/examples/workflows/workflow_intent_classifier/README.md index d09333940..dd5ee58de 100644 --- a/examples/workflows/workflow_intent_classifier/README.md +++ b/examples/workflows/workflow_intent_classifier/README.md @@ -51,3 +51,65 @@ Run your MCP Agent app: ```bash uv run main.py ``` + +## `4` [Beta] Deploy to the cloud + +### `a.` Log in to [MCP Agent Cloud](https://docs.mcp-agent.com/cloud/overview) + +```bash +uv run mcp-agent login +``` + +### `b.` Update your `mcp_agent.secrets.yaml` to mark your developer secrets (keys) + +```yaml +openai: + api_key: !developer_secret +# Other secrets as needed +``` + +### `c.` Deploy your agent with a single command +```bash +uv run mcp-agent deploy workflow-intent-classifier +``` + +### `d.` Connect to your deployed agent as an MCP server through any MCP client + +#### Claude Desktop Integration + +Configure Claude Desktop to access your agent servers by updating your `~/.claude-desktop/config.json`: + +```json +"my-agent-server": { + "command": "/path/to/npx", + "args": [ + "mcp-remote", + "https://[your-agent-server-id].deployments.mcp-agent-cloud.lastmileai.dev/sse", + "--header", + "Authorization: Bearer ${BEARER_TOKEN}" + ], + "env": { + "BEARER_TOKEN": "your-mcp-agent-cloud-api-token" + } +} +``` + +#### MCP Inspector + +Use MCP Inspector to explore and test your agent servers: + +```bash +npx @modelcontextprotocol/inspector +``` + +Make sure to fill out the following settings: + +| Setting | Value | +|---|---| +| *Transport Type* | *SSE* | +| *SSE* | *https://[your-agent-server-id].deployments.mcp-agent-cloud.lastmileai.dev/sse* | +| *Header Name* | *Authorization* | +| *Bearer Token* | *your-mcp-agent-cloud-api-token* | + +> [!TIP] +> In the Configuration, change the request timeout to a longer time period. Since your agents are making LLM calls, it is expected that it should take longer than simple API calls. diff --git a/examples/workflows/workflow_intent_classifier/main.py b/examples/workflows/workflow_intent_classifier/main.py index c43ad4631..5203c071b 100644 --- a/examples/workflows/workflow_intent_classifier/main.py +++ b/examples/workflows/workflow_intent_classifier/main.py @@ -12,8 +12,15 @@ app = MCPApp(name="intent_classifier") - -async def example_usage(): +@app.tool +async def example_usage()->str: + ''' + this is an example function/tool call that uses the intent classification workflow. + It uses both the OpenAI embedding intent classifier and the OpenAI LLM intent classifier + ''' + + results="" + async with app.run() as intent_app: logger = intent_app.logger context = intent_app.context @@ -35,12 +42,13 @@ async def example_usage(): context=context, ) - results = await embedding_intent_classifier.classify( + output = await embedding_intent_classifier.classify( request="Hello, how are you?", top_k=1, ) - logger.info("Embedding-based Intent classification results:", data=results) + logger.info("Embedding-based Intent classification results:", data=output) + results="Embedding-based Intent classification results: " + ", ".join(r.intent for r in output) llm_intent_classifier = OpenAILLMIntentClassifier( intents=[ @@ -58,13 +66,15 @@ async def example_usage(): context=context, ) - results = await llm_intent_classifier.classify( + output = await llm_intent_classifier.classify( request="Hello, how are you?", top_k=1, ) - logger.info("LLM-based Intent classification results:", data=results) + logger.info("LLM-based Intent classification results:", data=output) + results+="LLM-based Intent classification results: " + ", ".join(r.intent for r in output) + return results if __name__ == "__main__": import time