forked from modelcontextprotocol/rust-sdk
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprompt_stdio.rs
More file actions
423 lines (388 loc) · 14.2 KB
/
prompt_stdio.rs
File metadata and controls
423 lines (388 loc) · 14.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
//! MCP Server demonstrating prompt functionality
//!
//! This example shows how to use prompt macros to create an MCP server
//! that provides various types of prompts to LLM clients.
//!
//! Run with MCP Inspector:
//! ```bash
//! npx @modelcontextprotocol/inspector cargo run -p mcp-server-examples --example servers_prompt_stdio
//! ```
use std::sync::Arc;
use anyhow::Result;
use rmcp::{
ErrorData as McpError, RoleServer, ServerHandler, ServiceExt,
handler::server::{router::prompt::PromptRouter, wrapper::Parameters},
model::*,
prompt, prompt_handler, prompt_router,
schemars::JsonSchema,
service::RequestContext,
transport::stdio,
};
use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use tracing_subscriber::{self, EnvFilter};
/// Arguments for the code review prompt
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[schemars(description = "Code review parameters")]
pub struct CodeReviewArgs {
#[schemars(description = "Programming language of the code")]
pub language: String,
#[schemars(description = "Path to the file or code snippet")]
pub file_path: String,
#[schemars(description = "Focus areas for the review")]
pub focus_areas: Option<Vec<String>>,
}
/// Arguments for the data analysis prompt
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[schemars(description = "Data analysis parameters")]
pub struct DataAnalysisArgs {
#[schemars(description = "Type of data: 'csv', 'json', 'logs', etc.")]
pub data_type: String,
#[schemars(description = "What kind of analysis to perform")]
pub analysis_type: String,
#[schemars(description = "Additional context about the data")]
pub context: Option<String>,
}
/// Arguments for the writing assistant prompt
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[schemars(description = "Writing assistant parameters")]
pub struct WritingAssistantArgs {
#[schemars(description = "Type of content: 'email', 'documentation', 'blog', etc.")]
pub content_type: String,
#[schemars(description = "Target audience")]
pub audience: String,
#[schemars(description = "Writing tone: 'formal', 'casual', 'technical', etc.")]
pub tone: Option<String>,
#[schemars(description = "Key points to cover")]
pub key_points: Vec<String>,
}
/// Arguments for the debug assistant prompt
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
#[schemars(description = "Debug assistant parameters")]
pub struct DebugAssistantArgs {
#[schemars(description = "Error message or symptom")]
pub error_message: String,
#[schemars(description = "Technology stack involved")]
pub stack: Vec<String>,
#[schemars(description = "Steps already tried")]
pub tried_solutions: Option<Vec<String>>,
}
/// Simple prompt server demonstrating various prompt patterns
#[derive(Clone)]
pub struct PromptServer {
/// Stores user preferences that can be used in prompts
user_preferences: Arc<RwLock<UserPreferences>>,
prompt_router: PromptRouter<PromptServer>,
}
#[derive(Debug, Clone)]
struct UserPreferences {
preferred_language: String,
expertise_level: String,
}
impl Default for UserPreferences {
fn default() -> Self {
Self {
preferred_language: "English".to_string(),
expertise_level: "intermediate".to_string(),
}
}
}
impl PromptServer {
pub fn new() -> Self {
Self {
user_preferences: Arc::new(RwLock::new(UserPreferences::default())),
prompt_router: Self::prompt_router(),
}
}
}
impl Default for PromptServer {
fn default() -> Self {
Self::new()
}
}
#[prompt_router]
impl PromptServer {
/// Simple greeting prompt without parameters
#[prompt(
name = "greeting",
description = "A simple greeting prompt to start conversations"
)]
async fn greeting(&self) -> Vec<PromptMessage> {
vec![
PromptMessage::new_text(
PromptMessageRole::User,
"Hello! I'd like to start our conversation.",
),
PromptMessage::new_text(
PromptMessageRole::Assistant,
"Hello! I'm here to help. What would you like to discuss today?",
),
]
}
/// Code review prompt with typed parameters
#[prompt(
name = "code_review",
description = "Structured code review with language-specific best practices"
)]
async fn code_review(
&self,
Parameters(args): Parameters<CodeReviewArgs>,
) -> Result<GetPromptResult, McpError> {
let prefs = self.user_preferences.read().await;
let focus_areas = args
.focus_areas
.unwrap_or_else(|| vec!["correctness".to_string(), "performance".to_string()]);
let messages = vec![
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"You are an expert {} code reviewer. The user's expertise level is {}.",
args.language, prefs.expertise_level
),
),
PromptMessage::new_text(
PromptMessageRole::User,
format!(
"Please review the {} code at '{}'. Focus on: {}",
args.language,
args.file_path,
focus_areas.join(", ")
),
),
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"I'll review your {} code focusing on {}. Let me analyze the code at '{}'...",
args.language,
focus_areas.join(" and "),
args.file_path
),
),
];
Ok(GetPromptResult {
description: Some(format!(
"Code review for {} file focusing on {}",
args.language,
focus_areas.join(", ")
)),
messages,
})
}
/// Data analysis prompt demonstrating context usage
#[prompt(
name = "data_analysis",
description = "Analyze data with context-aware suggestions"
)]
async fn data_analysis(
&self,
Parameters(args): Parameters<DataAnalysisArgs>,
ctx: RequestContext<RoleServer>,
) -> Result<Vec<PromptMessage>, McpError> {
// Log the request for debugging
tracing::info!("Data analysis prompt requested for {} data", args.data_type);
// Could use ctx to check for capabilities or metadata
let _request_id = &ctx.id;
let context = args
.context
.unwrap_or_else(|| "General analysis requested".to_string());
Ok(vec![
PromptMessage::new_text(
PromptMessageRole::User,
format!(
"I have {} data that needs {} analysis. Context: {}",
args.data_type, args.analysis_type, context
),
),
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"I'll help you analyze your {} data using {} techniques. Based on your context, \
I'll focus on providing actionable insights.",
args.data_type, args.analysis_type
),
),
])
}
/// Writing assistant with multiple conversation turns
#[prompt(
name = "writing_assistant",
description = "Multi-turn writing assistance with style guidance"
)]
async fn writing_assistant(
&self,
Parameters(args): Parameters<WritingAssistantArgs>,
) -> GetPromptResult {
let tone = args.tone.unwrap_or_else(|| "professional".to_string());
let mut messages = vec![
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"You are a writing assistant helping create {} content for {}. \
Use a {} tone.",
args.content_type, args.audience, tone
),
),
PromptMessage::new_text(
PromptMessageRole::User,
format!(
"I need help writing {} for {}. Key points to cover: {}",
args.content_type,
args.audience,
args.key_points.join(", ")
),
),
PromptMessage::new_text(
PromptMessageRole::Assistant,
"I'll help you create that content. Let me structure it based on your key points.",
),
];
// Add a message for each key point
for (i, point) in args.key_points.iter().enumerate() {
messages.push(PromptMessage::new_text(
PromptMessageRole::User,
format!("For point {}: {}, what would you suggest?", i + 1, point),
));
messages.push(PromptMessage::new_text(
PromptMessageRole::Assistant,
format!("For '{}', I recommend...", point),
));
}
GetPromptResult {
description: Some(format!(
"Writing {} for {} audience with {} tone",
args.content_type, args.audience, tone
)),
messages,
}
}
/// Debug assistant demonstrating error handling patterns
#[prompt(
name = "debug_assistant",
description = "Interactive debugging help with solution tracking"
)]
async fn debug_assistant(
&self,
Parameters(args): Parameters<DebugAssistantArgs>,
) -> Result<GetPromptResult, McpError> {
if args.stack.is_empty() {
return Err(McpError::invalid_params(
"Technology stack cannot be empty",
None,
));
}
let mut messages = vec![
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"You are a debugging expert for {}. Help diagnose and fix issues.",
args.stack.join(", ")
),
),
PromptMessage::new_text(
PromptMessageRole::User,
format!(
"I'm encountering this error: {}\nStack: {}",
args.error_message,
args.stack.join(", ")
),
),
];
// Add tried solutions if any
if let Some(tried) = args.tried_solutions
&& !tried.is_empty()
{
messages.push(PromptMessage::new_text(
PromptMessageRole::User,
format!("I've already tried: {}", tried.join(", ")),
));
messages.push(PromptMessage::new_text(
PromptMessageRole::Assistant,
"I see you've already attempted some solutions. Let me suggest different approaches.",
));
}
messages.push(PromptMessage::new_text(
PromptMessageRole::Assistant,
"Let's debug this systematically. First, let me understand the error context better.",
));
Ok(GetPromptResult {
description: Some(format!(
"Debugging {} error in {}",
args.error_message.chars().take(50).collect::<String>(),
args.stack.first().map(|s| s.as_str()).unwrap_or("unknown")
)),
messages,
})
}
/// Learning path prompt that uses server state
#[prompt(
name = "learning_path",
description = "Generate a personalized learning path based on user preferences"
)]
async fn learning_path(&self) -> Result<Vec<PromptMessage>, McpError> {
let prefs = self.user_preferences.read().await;
Ok(vec![
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"Create a learning path for someone at {} level who prefers {} language explanations.",
prefs.expertise_level, prefs.preferred_language
),
),
PromptMessage::new_text(
PromptMessageRole::User,
"What should I learn next to improve my programming skills?",
),
PromptMessage::new_text(
PromptMessageRole::Assistant,
format!(
"Based on your {} expertise level, I recommend the following learning path...",
prefs.expertise_level
),
),
])
}
}
#[prompt_handler]
impl ServerHandler for PromptServer {
fn get_info(&self) -> ServerInfo {
ServerInfo {
capabilities: ServerCapabilities::builder().enable_prompts().build(),
server_info: Implementation::from_build_env(),
instructions: Some(
"This server provides various prompt templates for code review, data analysis, \
writing assistance, debugging help, and personalized learning paths. \
All prompts are designed to provide structured, context-aware assistance."
.to_string(),
),
..Default::default()
}
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Initialize tracing
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env().add_directive(tracing::Level::INFO.into()))
.init();
println!("MCP Prompt Server Example");
println!("=======================");
println!();
println!("This server demonstrates various prompt patterns:");
println!("- Simple prompts without parameters");
println!("- Prompts with typed parameters");
println!("- Prompts using server state");
println!("- Multi-turn conversation prompts");
println!("- Error handling in prompts");
println!();
println!("To test with MCP Inspector:");
println!(
"npx @modelcontextprotocol/inspector cargo run -p mcp-server-examples --example servers_prompt_stdio"
);
println!();
let server = PromptServer::new();
let service = server.serve(stdio()).await.inspect_err(|e| {
tracing::error!("Server error: {:?}", e);
})?;
service.waiting().await?;
Ok(())
}