Skip to content

Commit f65526f

Browse files
TOOL-519: Implement basic MCP client POC
TOOL-519: Improve client start functions and rework errors. TOOL-519: Implement mock LLM client prototype and conversation state TOOL-519: Add mock prompt message. TOOL-519: Finalize MCP client MOCK POC TOOL-519: fix cargo warnings TOOL-519: Minor tweaks.
1 parent 119e817 commit f65526f

File tree

14 files changed

+596
-210
lines changed

14 files changed

+596
-210
lines changed

application/apps/indexer/mcp/Cargo.toml

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,26 @@ version = "0.1.0"
44
edition = "2024"
55

66
[lints]
7-
workspace = true
7+
workspace = true
88

99
[dependencies]
10+
serde.workspace = true
11+
anyhow.workspace = true
1012
axum = { version = "0.7", features = ["macros"] }
1113
rmcp = { version = "0.11", features = [
14+
"client",
15+
"macros",
16+
"reqwest",
17+
"schemars",
1218
"server",
1319
"transport-io",
14-
"schemars",
15-
"transport-streamable-http-server-session",
20+
"transport-streamable-http-client-reqwest",
1621
"transport-streamable-http-server",
22+
"transport-streamable-http-server-session",
1723
] }
1824
schemars = "1.1"
1925
tokio.workspace = true
2026
tokio-util.workspace = true
2127
log.workspace = true
22-
serde.workspace = true
23-
anyhow.workspace = true
28+
reqwest = "0.12.25"
29+
thiserror.workspace = true
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
// Simple model for storing the state of a chat conversation inside the MCP client
2+
3+
use rmcp::{
4+
model::Content,
5+
serde_json::{Map, Value},
6+
};
7+
8+
#[derive(Debug)]
9+
pub enum ChatMessage {
10+
ClientToLlm(ClientToLlm),
11+
LlmToClient(LlmToClient),
12+
}
13+
14+
#[derive(Debug)]
15+
pub enum ClientToLlm {
16+
SystemPrompt { content: String },
17+
UserPrompt { content: String },
18+
ToolResult { content: Vec<Content> },
19+
}
20+
21+
#[derive(Debug)]
22+
pub enum LlmToClient {
23+
ToolCall {
24+
tool_name: String,
25+
arguments: Option<Map<String, Value>>,
26+
},
27+
System {
28+
message: String,
29+
},
30+
FinalResponse {
31+
content: String,
32+
},
33+
}
34+
pub struct Conversation {
35+
chat_messages: Vec<ChatMessage>,
36+
// TODO:[MCP] keep track of steps?
37+
// TODO:[MCP] conversation ID=?
38+
}
39+
40+
impl Conversation {
41+
/// Create a new conversation with an initial system prompt
42+
/// # Arguments
43+
/// * `system_prompt`: The system prompt to initialize the conversation with
44+
pub fn new(system_prompt: String) -> Self {
45+
Self {
46+
chat_messages: vec![ChatMessage::ClientToLlm(ClientToLlm::SystemPrompt {
47+
content: system_prompt,
48+
})],
49+
}
50+
}
51+
52+
pub fn chat_messages(&self) -> &[ChatMessage] {
53+
&self.chat_messages
54+
}
55+
56+
pub fn add_chat_message(&mut self, message: ChatMessage) {
57+
self.chat_messages.push(message);
58+
}
59+
}
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
// A mock LLM client for testing purposes
2+
// It simulates LLM behavior without making actual API calls / without a HTTP client
3+
// The logic for processing messages is as follows:
4+
// - User prompts received from the chipmunk core will cause the mock LLM to emit a ToolCall message:
5+
// LlmToClient::ToolCall ("apply_search_filter") with the prompt content as filters
6+
// - ToolResult messages will be answered with a FinalResponse message containing the tool result
7+
8+
use log::warn;
9+
use rmcp::serde_json::{self, json};
10+
use tokio::time::{Duration, sleep};
11+
12+
use crate::client::conversation::LlmToClient;
13+
use crate::{
14+
client::{
15+
conversation::{ChatMessage, ClientToLlm, Conversation},
16+
llm::LlmClient,
17+
},
18+
types::{McpError, SearchFilter},
19+
};
20+
pub struct MockLlmClient;
21+
22+
// Abstraction of LLM clients using the LlmClient trait
23+
impl LlmClient for MockLlmClient {
24+
/// Process a conversation by taking appropriate action based on the last message and return a LLM response.
25+
async fn respond(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
26+
MockLlmClient::respond(self, conversation).await
27+
}
28+
}
29+
30+
impl MockLlmClient {
31+
/// Process a conversation take appropriate action based on the last message in the conversation
32+
/// and return a LLM response.
33+
/// For the mock client, we have hardcoded logic to simulate LLM behaviour
34+
pub async fn respond(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
35+
warn!(
36+
"🟢 Mock LLM client processing message: {:?}",
37+
conversation.chat_messages().last()
38+
);
39+
match conversation.chat_messages().last() {
40+
Some(ChatMessage::ClientToLlm(message)) => match message {
41+
ClientToLlm::SystemPrompt { .. } => Err(McpError::Generic {
42+
message: "Mock LLM client received a system prompt; nothing to do".into(),
43+
}),
44+
ClientToLlm::UserPrompt { content } => {
45+
// Simulate LLM reasoning duration
46+
warn!("⏰ Mock LLM client waits 5s ...");
47+
sleep(Duration::from_secs(5)).await;
48+
49+
let filters = vec![SearchFilter {
50+
value: content.clone(),
51+
is_regex: false,
52+
ignore_case: true,
53+
is_word: false,
54+
}];
55+
56+
let arguments: Option<serde_json::Map<String, serde_json::Value>> =
57+
json!({ "filters": filters }).as_object().cloned();
58+
59+
// Return a ToolCall
60+
Ok(LlmToClient::ToolCall {
61+
tool_name: "apply_search_filter".into(),
62+
arguments,
63+
})
64+
}
65+
ClientToLlm::ToolResult { content } => {
66+
// Simulate LLM reasoning duration
67+
warn!("⏰ Mock LLM client waits 5s ...");
68+
sleep(Duration::from_secs(5)).await;
69+
70+
// Return a FinalResponse
71+
Ok(LlmToClient::FinalResponse {
72+
content: format!("Final LLM Response: Tool result {:?}", content),
73+
})
74+
}
75+
},
76+
_ => Err(McpError::Generic {
77+
message: "Mock LLM client received unsupported request".into(),
78+
}),
79+
}
80+
}
81+
}
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
// LLM client abstraction layer
2+
use crate::{
3+
client::conversation::{Conversation, LlmToClient},
4+
types::McpError,
5+
};
6+
7+
pub mod mock;
8+
9+
/// Configuration used for creation of different LLM client
10+
#[derive(Debug, Clone)]
11+
pub enum LlmConfig {
12+
Mock,
13+
// Other LLM configurations can be added here. They may need other parameters like:
14+
// - API keys
15+
// - Model names
16+
// - temperature settings
17+
// - feature flags, ect.
18+
// // E.g.:
19+
// OpenAi { api_key: String, model: String },
20+
}
21+
22+
// LLM client trait that LLM clients must implement
23+
// Note: this causes the following warning:
24+
// use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified
25+
// note: you can suppress this lint if you plan to use the trait only in your own code, or do not care about auto traits like `Send` on the `Future`
26+
// note: `#[warn(async_fn_in_trait)]` on by default
27+
// note: `#[warn(async_fn_in_trait)]` on by default
28+
// We suppress the warning for now as all LLM clients are internal to the MCP client module.
29+
// Alternatively we would need to use the async-trait crate.
30+
#[allow(async_fn_in_trait)]
31+
pub trait LlmClient {
32+
/// Process a conversation by taking appropriate action based on the last message and return a LLM response.
33+
async fn respond(&self, conversation: &Conversation) -> Result<LlmToClient, McpError>;
34+
}
35+
36+
// LLM client abstraction wrapper providing a facade for different LLM client implementations
37+
pub struct Llm<C: LlmClient> {
38+
client: C,
39+
}
40+
41+
// LLM client abstraction wrapper implementation
42+
impl<C: LlmClient> Llm<C> {
43+
pub fn new(client: C) -> Self {
44+
Self { client }
45+
}
46+
47+
/// Forward processing of a conversation to the underlying LLM client implementation
48+
pub async fn process(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
49+
self.client.respond(conversation).await
50+
}
51+
}
52+
53+
// Implementation of LLM creation from configuration for the mock client
54+
// TODO:[MCP] Can this be moved to the client modules? Via trait?
55+
impl Llm<mock::MockLlmClient> {
56+
pub fn from_config(config: LlmConfig) -> Self {
57+
match config {
58+
LlmConfig::Mock => Llm::new(mock::MockLlmClient),
59+
}
60+
}
61+
}

application/apps/indexer/mcp/src/client/messages.rs

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,13 @@
22
#[derive(Debug)]
33
pub enum McpClientToChipmunk {
44
Response { response: String },
5+
// TODO:[MCP] add other message types as needed. E.g.:
56
}
67

7-
/// Messages chipmunk to the MCP client
8+
/// Messages from chipmunk to the MCP client
89
#[derive(Debug, Clone)]
910
pub enum McpChipmunkToClient {
10-
Prompt { prompt: String },
11+
UserPrompt { prompt: String },
12+
// TODO:[MCP] add other message types as needed. E.g.:
13+
// SystemPrompt { prompt: String },
1114
}

0 commit comments

Comments
 (0)