Skip to content

Commit b381781

Browse files
TOOL-519: Implement basic MCP client POC
TOOL-519: Improve client start functions and rework errors. TOOL-519: Implement mock LLM client prototype and conversation state TOOL-519: Add mock prompt message. TOOL-519: Finalize MCP client MOCK POC TOOL-519: fix cargo warnings TOOL-519: Minor tweaks.
1 parent 119e817 commit b381781

File tree

14 files changed

+577
-210
lines changed

14 files changed

+577
-210
lines changed

application/apps/indexer/mcp/Cargo.toml

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,26 @@ version = "0.1.0"
44
edition = "2024"
55

66
[lints]
7-
workspace = true
7+
workspace = true
88

99
[dependencies]
10+
serde.workspace = true
11+
anyhow.workspace = true
1012
axum = { version = "0.7", features = ["macros"] }
1113
rmcp = { version = "0.11", features = [
14+
"client",
15+
"macros",
16+
"reqwest",
17+
"schemars",
1218
"server",
1319
"transport-io",
14-
"schemars",
15-
"transport-streamable-http-server-session",
20+
"transport-streamable-http-client-reqwest",
1621
"transport-streamable-http-server",
22+
"transport-streamable-http-server-session",
1723
] }
1824
schemars = "1.1"
1925
tokio.workspace = true
2026
tokio-util.workspace = true
2127
log.workspace = true
22-
serde.workspace = true
23-
anyhow.workspace = true
28+
reqwest = "0.12.25"
29+
thiserror.workspace = true
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
// Simple model for storing the state of a chat conversation inside the MCP client
2+
3+
use rmcp::{
4+
model::Content,
5+
serde_json::{Map, Value},
6+
};
7+
8+
#[derive(Debug)]
9+
pub enum ChatMessage {
10+
ClientToLlm(ClientToLlm),
11+
LlmToClient(LlmToClient),
12+
}
13+
14+
#[derive(Debug)]
15+
pub enum ClientToLlm {
16+
SystemPrompt { content: String },
17+
UserPrompt { content: String },
18+
ToolResult { content: Vec<Content> },
19+
}
20+
21+
#[derive(Debug)]
22+
pub enum LlmToClient {
23+
ToolCall {
24+
tool_name: String,
25+
arguments: Option<Map<String, Value>>,
26+
},
27+
System {
28+
message: String,
29+
},
30+
FinalResponse {
31+
content: String,
32+
},
33+
}
34+
pub struct Conversation {
35+
chat_messages: Vec<ChatMessage>,
36+
// TODO:[MCP] keep track of steps?
37+
// TODO:[MCP] conversation ID=?
38+
}
39+
40+
impl Conversation {
41+
/// Create a new conversation with an initial system prompt
42+
/// # Arguments
43+
/// * `system_prompt`: The system prompt to initialize the conversation with
44+
pub fn new(system_prompt: String) -> Self {
45+
Self {
46+
chat_messages: vec![ChatMessage::ClientToLlm(ClientToLlm::SystemPrompt {
47+
content: system_prompt,
48+
})],
49+
}
50+
}
51+
52+
pub fn chat_messages(&self) -> &[ChatMessage] {
53+
&self.chat_messages
54+
}
55+
56+
pub fn add_chat_message(&mut self, message: ChatMessage) {
57+
self.chat_messages.push(message);
58+
}
59+
}
Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
// A mock LLM client for testing purposes
2+
// It simulates LLM behavior without making actual API calls / without a HTTP client
3+
// The logic for processing messages is as follows:
4+
// - User prompts received from the chipmunk core will cause the mock LLM to emit a ToolCall message:
5+
// LlmToClient::ToolCall ("apply_search_filter") with the prompt content as filters
6+
// - ToolResult messages will be answered with a FinalResponse message containing the tool result
7+
8+
use log::warn;
9+
use rmcp::serde_json::{self, json};
10+
use tokio::time::{Duration, sleep};
11+
12+
use crate::client::conversation::LlmToClient;
13+
use crate::{
14+
client::{
15+
conversation::{ChatMessage, ClientToLlm, Conversation},
16+
llm::LlmClient,
17+
},
18+
types::{McpError, SearchFilter},
19+
};
20+
pub struct MockLlmClient;
21+
22+
// Abstraction of LLM clients using the LlmClient trait
23+
impl LlmClient for MockLlmClient {
24+
async fn process(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
25+
MockLlmClient::process(self, conversation).await
26+
}
27+
}
28+
29+
impl MockLlmClient {
30+
// Process a conversation take appropriate action based on the last message in the conversation
31+
// For the mock client, we have hardcoded logic to simulate LLM behaviour
32+
pub async fn process(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
33+
warn!(
34+
"🟢 Mock LLM client processing message: {:?}",
35+
conversation.chat_messages().last()
36+
);
37+
match conversation.chat_messages().last() {
38+
Some(ChatMessage::ClientToLlm(message)) => match message {
39+
ClientToLlm::SystemPrompt { .. } => Err(McpError::Generic {
40+
message: "Mock LLM client received a system prompt; nothing to do".into(),
41+
}),
42+
ClientToLlm::UserPrompt { content } => {
43+
// Simulate LLM reasoning duration
44+
warn!("⏰ Mock LLM client waits 5s ...");
45+
sleep(Duration::from_secs(5)).await;
46+
47+
let filters = vec![SearchFilter {
48+
value: content.clone(),
49+
is_regex: false,
50+
ignore_case: true,
51+
is_word: false,
52+
}];
53+
54+
let arguments: Option<serde_json::Map<String, serde_json::Value>> =
55+
json!({ "filters": filters }).as_object().cloned();
56+
57+
// Return a ToolCall
58+
Ok(LlmToClient::ToolCall {
59+
tool_name: "apply_search_filter".into(),
60+
arguments,
61+
})
62+
}
63+
ClientToLlm::ToolResult { content } => {
64+
// Simulate LLM reasoning duration
65+
warn!("⏰ Mock LLM client waits 5s ...");
66+
sleep(Duration::from_secs(5)).await;
67+
68+
// Return a FinalResponse
69+
Ok(LlmToClient::FinalResponse {
70+
content: format!("Final LLM Answer: Tool result {:?}", content),
71+
})
72+
}
73+
},
74+
_ => Err(McpError::Generic {
75+
message: "Mock LLM client received unsupported request".into(),
76+
}),
77+
}
78+
}
79+
}
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
use crate::{
2+
client::conversation::{Conversation, LlmToClient},
3+
types::McpError,
4+
};
5+
6+
pub mod mock;
7+
8+
/// Configuration for creating an LLM client
9+
#[derive(Debug, Clone)]
10+
pub enum LlmConfig {
11+
Mock,
12+
// Other LLM configurations can be added here. They may need other parameters like:
13+
// - API keys
14+
// - Model names
15+
// - temperature settings
16+
// - feature flags, ect.
17+
// // E.g.:
18+
// OpenAi { api_key: String, model: String },
19+
}
20+
21+
// LLM client trait that LLM clients must implement
22+
// Note: this causes the following warning:
23+
// use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified
24+
// note: you can suppress this lint if you plan to use the trait only in your own code, or do not care about auto traits like `Send` on the `Future`
25+
// note: `#[warn(async_fn_in_trait)]` on by default
26+
// note: `#[warn(async_fn_in_trait)]` on by default
27+
// We suppress the warning for now as all LLM clients are internal to the MCP client module.
28+
#[allow(async_fn_in_trait)]
29+
pub trait LlmClient {
30+
async fn process(&self, conversation: &Conversation) -> Result<LlmToClient, McpError>;
31+
}
32+
33+
// LLM wrapper struct for abstracting over different LLM clients
34+
pub struct Llm<C: LlmClient> {
35+
client: C,
36+
}
37+
38+
// Implementation of the LLM wrapper
39+
impl<C: LlmClient> Llm<C> {
40+
pub fn new(client: C) -> Self {
41+
Self { client }
42+
}
43+
44+
pub async fn process(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
45+
self.client.process(conversation).await
46+
}
47+
}
48+
49+
// Implementation of LLM creation from configuration for the mock client
50+
// TODO:[MCP] Can this be moved to the client modules? Via trait?
51+
impl Llm<mock::MockLlmClient> {
52+
pub fn from_config(config: LlmConfig) -> Self {
53+
match config {
54+
LlmConfig::Mock => Llm::new(mock::MockLlmClient),
55+
}
56+
}
57+
}

application/apps/indexer/mcp/src/client/messages.rs

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,16 @@
22
#[derive(Debug)]
33
pub enum McpClientToChipmunk {
44
Response { response: String },
5+
// TODO:[MCP] add other message types as needed. E.g.:
6+
// If the LLM wants to use a tool it should be approved by the user
7+
// ToolApprovalRequest { tool_name, arguments }
58
}
69

7-
/// Messages chipmunk to the MCP client
10+
/// Messages from chipmunk to the MCP client
811
#[derive(Debug, Clone)]
912
pub enum McpChipmunkToClient {
10-
Prompt { prompt: String },
13+
UserPrompt { prompt: String },
14+
// TODO:[MCP] add other message types as needed. E.g.:
15+
// SystemPrompt { prompt: String },
16+
// ToolApprovalResponse { bool }
1117
}

0 commit comments

Comments
 (0)