Skip to content

Commit 0d1e9bf

Browse files
authored
Add prompt.ask cancel handling (#457)
1 parent a3ba378 commit 0d1e9bf

File tree

5 files changed

+55
-38
lines changed

5 files changed

+55
-38
lines changed

examples/basic/mcp_basic_agent/main.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -46,12 +46,13 @@
4646
# or loaded from mcp_agent.config.yaml/mcp_agent.secrets.yaml
4747
app = MCPApp(name="mcp_basic_agent") # settings=settings)
4848

49+
4950
@app.tool()
50-
async def example_usage()->str:
51+
async def example_usage() -> str:
5152
"""
52-
An example function/tool that uses an agent with access to the fetch and filesystem
53-
mcp servers. The agent will read the contents of mcp_agent.config.yaml, print the
54-
first 2 paragraphs of the mcp homepage, and summarize the paragraphs into a tweet.
53+
An example function/tool that uses an agent with access to the fetch and filesystem
54+
mcp servers. The agent will read the contents of mcp_agent.config.yaml, print the
55+
first 2 paragraphs of the mcp homepage, and summarize the paragraphs into a tweet.
5556
The example uses both OpenAI, Anthropic, and simulates a multi-turn conversation.
5657
"""
5758
async with app.run() as agent_app:
@@ -113,6 +114,7 @@ async def example_usage()->str:
113114

114115
return result
115116

117+
116118
async def display_token_summary(app_ctx: MCPApp, agent: Agent | None = None):
117119
"""Display comprehensive token usage summary using app/agent convenience APIs."""
118120
summary: TokenSummary = await app_ctx.get_token_summary()

examples/basic/mcp_model_selector/main.py

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,9 @@
1010
app = MCPApp(name="llm_selector")
1111
model_selector = ModelSelector()
1212

13+
1314
@app.tool
14-
async def example_usage()->str:
15+
async def example_usage() -> str:
1516
"""
1617
An example function/tool that demonstrates MCP's ModelPreferences type
1718
to select a model based on speed, cost, and intelligence priorities.
@@ -31,7 +32,7 @@ async def example_usage()->str:
3132
"Smartest OpenAI model:",
3233
data={"model_preferences": model_preferences, "model": model},
3334
)
34-
result+="Smartest OpenAI model: " + model.name
35+
result += "Smartest OpenAI model: " + model.name
3536

3637
model_preferences = ModelPreferences(
3738
costPriority=0.25, speedPriority=0.25, intelligencePriority=0.5
@@ -44,7 +45,7 @@ async def example_usage()->str:
4445
"Most balanced OpenAI model:",
4546
data={"model_preferences": model_preferences, "model": model},
4647
)
47-
result+="\nMost balanced OpenAI model: " + model.name
48+
result += "\nMost balanced OpenAI model: " + model.name
4849

4950
model_preferences = ModelPreferences(
5051
costPriority=0.3, speedPriority=0.6, intelligencePriority=0.1
@@ -57,7 +58,7 @@ async def example_usage()->str:
5758
"Fastest and cheapest OpenAI model:",
5859
data={"model_preferences": model_preferences, "model": model},
5960
)
60-
result+="\nFastest and cheapest OpenAI model: " + model.name
61+
result += "\nFastest and cheapest OpenAI model: " + model.name
6162

6263
model_preferences = ModelPreferences(
6364
costPriority=0.1, speedPriority=0.1, intelligencePriority=0.8
@@ -70,7 +71,7 @@ async def example_usage()->str:
7071
"Smartest Anthropic model:",
7172
data={"model_preferences": model_preferences, "model": model},
7273
)
73-
result+="\nSmartest Anthropic model: " + model.name
74+
result += "\nSmartest Anthropic model: " + model.name
7475

7576
model_preferences = ModelPreferences(
7677
costPriority=0.8, speedPriority=0.1, intelligencePriority=0.1
@@ -83,7 +84,7 @@ async def example_usage()->str:
8384
"Cheapest Anthropic model:",
8485
data={"model_preferences": model_preferences, "model": model},
8586
)
86-
result+="\nCheapest Anthropic model: " + model.name
87+
result += "\nCheapest Anthropic model: " + model.name
8788

8889
model_preferences = ModelPreferences(
8990
costPriority=0.1,
@@ -101,7 +102,7 @@ async def example_usage()->str:
101102
"Select fastest model between gpt-4o/mini/sonnet/haiku:",
102103
data={"model_preferences": model_preferences, "model": model},
103104
)
104-
result+="\nSelect fastest model between gpt-4o/mini/sonnet/haiku: " + model.name
105+
result += "\nSelect fastest model between gpt-4o/mini/sonnet/haiku: " + model.name
105106

106107
model_preferences = ModelPreferences(
107108
costPriority=0.15,
@@ -119,7 +120,7 @@ async def example_usage()->str:
119120
"Most balanced model between gpt-4o/mini/sonnet/haiku:",
120121
data={"model_preferences": model_preferences, "model": model},
121122
)
122-
result+="\nMost balanced model between gpt-4o/mini/sonnet/haiku: " + model.name
123+
result += "\nMost balanced model between gpt-4o/mini/sonnet/haiku: " + model.name
123124

124125
# Examples showcasing new filtering capabilities
125126
print("\n[bold cyan]Testing new filtering capabilities:[/bold cyan]")
@@ -139,7 +140,7 @@ async def example_usage()->str:
139140
"context_window": model.context_window,
140141
},
141142
)
142-
result+="\nBest model with context window >100k tokens: " + model.name
143+
result += "\nBest model with context window >100k tokens: " + model.name
143144

144145
# Example 2: Models with tool calling support
145146
model_preferences = ModelPreferences(
@@ -156,7 +157,7 @@ async def example_usage()->str:
156157
"tool_calling": model.tool_calling,
157158
},
158159
)
159-
result+="\nBest model with tool calling support: " + model.name
160+
result += "\nBest model with tool calling support: " + model.name
160161

161162
# Example 3: Models with structured outputs (JSON mode)
162163
model_preferences = ModelPreferences(
@@ -173,7 +174,7 @@ async def example_usage()->str:
173174
"structured_outputs": model.structured_outputs,
174175
},
175176
)
176-
result+="\nBest model with structured outputs support: " + model.name
177+
result += "\nBest model with structured outputs support: " + model.name
177178

178179
# Example 4: Models with medium context window (50k-150k tokens) and tool calling
179180
model_preferences = ModelPreferences(
@@ -194,7 +195,9 @@ async def example_usage()->str:
194195
"tool_calling": model.tool_calling,
195196
},
196197
)
197-
result+="\nBest model with 50k-150k context window and tool calling: " + model.name
198+
result += (
199+
"\nBest model with 50k-150k context window and tool calling: " + model.name
200+
)
198201

199202
# Example 5: Fast models with both tool calling and structured outputs
200203
model_preferences = ModelPreferences(
@@ -213,9 +216,12 @@ async def example_usage()->str:
213216
"speed": model.metrics.speed.tokens_per_second,
214217
},
215218
)
216-
result+="\nFastest model with both tool calling and structured outputs: " + model.name
219+
result += (
220+
"\nFastest model with both tool calling and structured outputs: " + model.name
221+
)
222+
223+
return result
217224

218-
return result
219225

220226
if __name__ == "__main__":
221227
import time

src/mcp_agent/cli/cloud/commands/app/status/main.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -140,12 +140,15 @@ async def print_mcp_server_details(server_url: str, api_key: str) -> None:
140140
console.print(f"[cyan]{key}[/cyan]: {description}")
141141

142142
if sys.stdout.isatty():
143-
choice = Prompt.ask(
144-
"\nWhat would you like to display?",
145-
choices=list(choices.keys()),
146-
default="0",
147-
show_choices=False,
148-
)
143+
try:
144+
choice = Prompt.ask(
145+
"\nWhat would you like to display?",
146+
choices=list(choices.keys()),
147+
default="0",
148+
show_choices=False,
149+
)
150+
except (EOFError, KeyboardInterrupt):
151+
return
149152
else:
150153
console.print("Choosing 0 (Show All)")
151154
choice = "0"

src/mcp_agent/cli/cloud/commands/app/workflows/main.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -118,17 +118,20 @@ async def print_mcp_server_workflow_details(server_url: str, api_key: str) -> No
118118
for key, description in choices.items():
119119
console.print(f"[cyan]{key}[/cyan]: {description}")
120120

121-
choice = Prompt.ask(
122-
"\nWhat would you like to display?",
123-
choices=list(choices.keys()),
124-
default="0",
125-
show_choices=False,
126-
)
121+
try:
122+
choice = Prompt.ask(
123+
"\nWhat would you like to display?",
124+
choices=list(choices.keys()),
125+
default="0",
126+
show_choices=False,
127+
)
127128

128-
if choice in ["0", "1"]:
129-
await print_workflows_list(mcp_client_session)
130-
if choice in ["0", "2"]:
131-
await print_runs_list(mcp_client_session)
129+
if choice in ["0", "1"]:
130+
await print_workflows_list(mcp_client_session)
131+
if choice in ["0", "2"]:
132+
await print_runs_list(mcp_client_session)
133+
except (EOFError, KeyboardInterrupt):
134+
return
132135

133136
except Exception as e:
134137
raise CLIError(

src/mcp_agent/cli/cloud/commands/logger/tail/main.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,10 @@
1717

1818
from mcp_agent.cli.exceptions import CLIError
1919
from mcp_agent.cli.auth import load_credentials, UserCredentials
20-
from mcp_agent.cli.cloud.commands.utils import setup_authenticated_client, resolve_server
20+
from mcp_agent.cli.cloud.commands.utils import (
21+
setup_authenticated_client,
22+
resolve_server,
23+
)
2124
from mcp_agent.cli.core.api_client import UnauthenticatedError
2225
from mcp_agent.cli.utils.ux import print_error
2326
from mcp_agent.cli.mcp_app.api_client import MCPApp, MCPAppConfiguration
@@ -135,7 +138,7 @@ def tail_logs(
135138

136139
client = setup_authenticated_client()
137140
server = resolve_server(client, app_identifier)
138-
141+
139142
try:
140143
if follow:
141144
asyncio.run(
@@ -183,7 +186,7 @@ async def _fetch_logs(
183186
"""Fetch logs one-time via HTTP API."""
184187

185188
# Extract app_id and config_id from the server object
186-
if hasattr(server, 'appId'): # MCPApp
189+
if hasattr(server, "appId"): # MCPApp
187190
app_id = server.appId
188191
config_id = None
189192
else: # MCPAppConfiguration
@@ -264,7 +267,7 @@ async def _stream_logs(
264267
# Get server URL directly from the server object
265268
if not server.appServerInfo or not server.appServerInfo.serverUrl:
266269
raise CLIError("Server URL not available - server may not be deployed")
267-
270+
268271
server_url = server.appServerInfo.serverUrl
269272

270273
parsed = urlparse(server_url)

0 commit comments

Comments
 (0)