|
1 | | -using ModelContextProtocol.Protocol.Types; |
2 | | -using ModelContextProtocol.Server; |
3 | | -using System.ComponentModel; |
4 | | - |
5 | | -namespace EverythingServer.Tools; |
6 | | - |
7 | | -[McpServerToolType] |
8 | | -public class SampleLlmTool(IMcpServer server) |
9 | | -{ |
10 | | - private readonly IMcpServer _server = server ?? throw new ArgumentNullException(nameof(server)); |
11 | | - |
12 | | - [McpServerTool("sampleLLM"), Description("Samples from an LLM using MCP's sampling feature")] |
13 | | - public async Task<string> SampleLLM( |
14 | | - [Description("The prompt to send to the LLM")] string prompt, |
15 | | - [Description("Maximum number of tokens to generate")] int maxTokens, |
16 | | - CancellationToken cancellationToken) |
17 | | - { |
18 | | - var samplingParams = CreateRequestSamplingParams(prompt ?? string.Empty, "sampleLLM", maxTokens); |
19 | | - var sampleResult = await _server.RequestSamplingAsync(samplingParams, cancellationToken); |
20 | | - |
21 | | - return $"LLM sampling result: {sampleResult.Content.Text}"; |
22 | | - } |
23 | | - |
24 | | - private static CreateMessageRequestParams CreateRequestSamplingParams(string context, string uri, int maxTokens = 100) |
25 | | - { |
26 | | - return new CreateMessageRequestParams() |
27 | | - { |
28 | | - Messages = [new SamplingMessage() |
29 | | - { |
30 | | - Role = Role.User, |
31 | | - Content = new Content() |
32 | | - { |
33 | | - Type = "text", |
34 | | - Text = $"Resource {uri} context: {context}" |
35 | | - } |
36 | | - }], |
37 | | - SystemPrompt = "You are a helpful test server.", |
38 | | - MaxTokens = maxTokens, |
39 | | - Temperature = 0.7f, |
40 | | - IncludeContext = ContextInclusion.ThisServer |
41 | | - }; |
42 | | - } |
43 | | -} |
| 1 | +using ModelContextProtocol.Protocol.Types; |
| 2 | +using ModelContextProtocol.Server; |
| 3 | +using System.ComponentModel; |
| 4 | + |
| 5 | +namespace EverythingServer.Tools; |
| 6 | + |
| 7 | +[McpServerToolType] |
| 8 | +public class SampleLlmTool(IMcpServer server) |
| 9 | +{ |
| 10 | + private readonly IMcpServer _server = server ?? throw new ArgumentNullException(nameof(server)); |
| 11 | + |
| 12 | + [McpServerTool("sampleLLM"), Description("Samples from an LLM using MCP's sampling feature")] |
| 13 | + public async Task<string> SampleLLM( |
| 14 | + [Description("The prompt to send to the LLM")] string prompt, |
| 15 | + [Description("Maximum number of tokens to generate")] int maxTokens, |
| 16 | + CancellationToken cancellationToken) |
| 17 | + { |
| 18 | + var samplingParams = CreateRequestSamplingParams(prompt ?? string.Empty, "sampleLLM", maxTokens); |
| 19 | + var sampleResult = await _server.RequestSamplingAsync(samplingParams, cancellationToken); |
| 20 | + |
| 21 | + return $"LLM sampling result: {sampleResult.Content.Text}"; |
| 22 | + } |
| 23 | + |
| 24 | + private static CreateMessageRequestParams CreateRequestSamplingParams(string context, string uri, int maxTokens = 100) |
| 25 | + { |
| 26 | + return new CreateMessageRequestParams() |
| 27 | + { |
| 28 | + Messages = [new SamplingMessage() |
| 29 | + { |
| 30 | + Role = Role.User, |
| 31 | + Content = new Content() |
| 32 | + { |
| 33 | + Type = "text", |
| 34 | + Text = $"Resource {uri} context: {context}" |
| 35 | + } |
| 36 | + }], |
| 37 | + SystemPrompt = "You are a helpful test server.", |
| 38 | + MaxTokens = maxTokens, |
| 39 | + Temperature = 0.7f, |
| 40 | + IncludeContext = ContextInclusion.ThisServer |
| 41 | + }; |
| 42 | + } |
| 43 | +} |
0 commit comments