diff --git a/.cursor/rules/cli.mdc b/.cursor/rules/cli.mdc
index adb1927..e90b3de 100644
--- a/.cursor/rules/cli.mdc
+++ b/.cursor/rules/cli.mdc
@@ -4,40 +4,52 @@ globs: **/Program.cs, **/CLI/**/*.cs, **/CommandLine/**/*.cs
---
# CLI Tool Implementation Rules
-This rule provides guidance for implementing the Command Line Interface (CLI) for the .NET Codebase Context Server.
+This rule provides guidance for implementing the Command Line Interface (CLI) for the .NET Codebase Context Server, with specific optimizations for Claude 3.7 Sonnet integration.
-## User Experience
-- Create a simple, intuitive command-line interface
-- Provide clear help text and usage examples
-- Implement proper error messages for invalid inputs
-- Display progress information for long-running operations
+## AI-Assisted Development
+- Use descriptive XML comments that explain the "why" behind implementation choices
+- Include example usage for each command in code comments
+- Structure commands into logical groups with consistent naming patterns
+- Ensure each command has a clear, single responsibility
+- Add CLI output examples in comments to help Claude understand expected behavior
## Command Structure
-- Use a consistent command structure and naming
-- Implement proper argument parsing and validation
-- Support both short and long option formats
-- Provide sensible default values where appropriate
-
-## Configuration
-- Implement a clean configuration loading mechanism
-- Support both command-line arguments and config files
-- Validate all configuration values before use
-- Provide clear error messages for invalid configurations
-
-## Packaging
-- Configure the project as a .NET global tool
-- Set up proper NuGet packaging
-- Include all necessary dependencies
-- Ensure cross-platform compatibility
+- Use verb-noun pattern for all commands (e.g., `list-projects`, `search-code`)
+- Implement consistent parameter naming across similar commands
+- Support both short and long option formats for all parameters
+- Include help text that explains parameter relationships and constraints
+- Group related commands into command sets with shared context
## Error Handling
-- Implement proper exception handling
-- Provide meaningful error messages to users
-- Include appropriate exit codes for different error conditions
-- Log errors for troubleshooting purposes
+- Return structured JSON responses with error codes and descriptive messages
+- Include targeted troubleshooting guidance in error messages
+- Implement detailed logging with contextual information
+- Add correlation IDs to associate related operations for troubleshooting
+- Handle graceful shutdown for CTRL+C and other interrupts
+
+## User Experience
+- Provide colorized, formatted output with clear visual hierarchy
+- Implement progress indicators for long-running operations
+- Support interactive mode for complex operations
+- Add "did you mean?" suggestions for common command typos
+- Include completion scripts for bash/zsh/PowerShell
+
+## Documentation
+- Create example-driven documentation with realistic scenarios
+- Document each command with purpose, parameters, examples, and limitations
+- Include troubleshooting sections for common issues
+- Add visual guides showing command relationships and workflows
+- Provide explicit mapping between CLI commands and MCP protocol capabilities
+
+## AI Integration Features
+- Add machine-readable JSON output option for all commands
+- Support template-based rendering of results for custom output formats
+- Include metadata in command responses to aid in AI understanding
+- Add descriptive response schemas for Claude to better understand outputs
## Security
-- Validate all user inputs
+- Validate all user inputs with clear error messages
- Implement proper access control for file operations
+- Use least-privilege principles for all operations
- Never expose sensitive information in logs or output
-- Handle graceful shutdown for CTRL+C and other interrupts
\ No newline at end of file
+- Support credential management with secure storage
\ No newline at end of file
diff --git a/.cursor/rules/dotnet.mdc b/.cursor/rules/dotnet.mdc
index 5d64c60..113eb44 100644
--- a/.cursor/rules/dotnet.mdc
+++ b/.cursor/rules/dotnet.mdc
@@ -4,54 +4,66 @@ globs: *.cs, *.csproj, *.sln
---
# .NET Codebase Context MCP Server Development Rules
-This rule applies to all .NET code files in the project and provides guidance for implementing the MCP server.
+This rule applies to all .NET code files in the project and provides guidance for implementing the MCP server with specific optimizations for Claude 3.7 Sonnet integration.
-## Code Style and Conventions
-- Follow standard C# coding conventions and .NET design patterns
-- Use nullable reference types throughout the codebase
-- Prefer async/await for I/O operations
-- Use meaningful variable and method names that clearly indicate purpose
-- Include XML documentation comments for public APIs
+## Code Documentation for AI Assistance
+- Include comprehensive XML documentation for all public and protected members
+- Add code examples within XML comments for complex functionality
+- Document "why" behind implementation decisions, not just "what" the code does
+- Use consistent terminology across all documentation
+- Include parameter validation logic in method documentation
+- Specify explicit return value descriptions and possible exceptions
+- Add class/interface relationship diagrams in code comments for complex hierarchies
+
+## Code Organization
+- Keep files under 300 lines when possible for better AI comprehension
+- Use partial classes to logically organize larger components
+- Follow consistent naming conventions for related patterns
+- Group related functionality into clearly named namespaces
+- Create dedicated classes for cross-cutting concerns
+- Use explicit rather than implicit patterns to improve AI code understanding
+- Prefer composition over inheritance for better AI reasoning
## Implementation Guidelines
-- Prioritize simplicity and readability over premature optimization
-- Implement core functionality first before adding advanced features
-- Follow the component structure outlined in the design document
-- Use dependency injection for better testability
-- Implement proper error handling and logging
+- Prioritize readability over clever or overly compact code
+- Implement one complete feature at a time
+- Use strongly typed models for all data exchanges
+- Create dedicated DTOs for MCP protocol interactions
+- Add comprehensive input validation with descriptive error messages
+- Implement self-explanatory function and variable names
+- Avoid magic numbers and strings; use constants with descriptive names
## Testing Guidelines
-- All tests generated by AI must include the trait: `[Trait("Category", "AI_Generated")]`
-- Write unit tests for all public APIs
-- Use descriptive test names following the pattern `MethodName_Scenario_ExpectedResult`
-- Ensure tests are isolated and don't depend on external resources
-- Include both positive and negative test cases
-
-## Project Structure
-- Keep the core MCP server logic separate from the CLI tool
-- Organize code by feature/component rather than by type
-- Follow standard .NET project organization:
- - `src/` for source code
- - `tests/` for test projects
- - `docs/` for documentation
- - `samples/` for example usage
-
-## Development Workflow
-- Implement one complete component at a time
-- Test each component before moving to the next
-- Focus on the critical path first:
- 1. Project Scanner/Indexer
- 2. Core MCP Tools
- 3. Server Host
- 4. CLI Interface
+- Write unit tests for all public APIs with descriptive test names
+- Create integration tests for MCP protocol interactions
+- Add performance tests for critical path operations
+- Include contract tests for MCP protocol compliance
+- Cover edge cases and failure scenarios in tests
+- Document test coverage goals and gaps
+
+## Semantic Search Optimization
+- Document embedding generation strategy and parameters
+- Implement configurable chunking strategies for different document types
+- Add explicit tokenization examples for code files
+- Create dedicated indexes for different code artifacts (methods, classes, etc.)
+- Implement relevance scoring tuned for code understanding
+- Include heuristics for determining code boundaries
+- Add context preservation mechanisms between code chunks
## Performance Considerations
-- Be mindful of memory usage when scanning large codebases
-- Consider streaming large files rather than loading them entirely in memory
-- Implement caching where appropriate to avoid redundant file operations
-
-## Security Guidelines
-- Implement proper validation for all file paths
-- Restrict access to sensitive files and directories
-- Provide clear logging of all operations for transparency
-- Never execute code from the codebase being analyzed
\ No newline at end of file
+- Implement efficient large codebase scanning with incremental updates
+- Use caching with clear invalidation strategies
+- Add benchmarks for critical operations
+- Implement background processing for long-running operations
+- Add memory usage monitoring and adaptive throttling
+- Include performance characteristics documentation
+- Support incremental updates to minimize reprocessing
+
+## Claude 3.7 Sonnet-Specific Guidelines
+- Structure code to optimize for Claude's context window (200K tokens)
+- Include focused example outputs for all tools
+- Add explicit relationship maps between components
+- Implement consistent state management with clear transitions
+- Document all assumptions about AI capabilities and limitations
+- Include failure recovery mechanisms for interrupted AI interactions
+- Structure code to facilitate accurate citation by Claude
\ No newline at end of file
diff --git a/README.md b/README.md
index d0a8ed3..c954728 100644
--- a/README.md
+++ b/README.md
@@ -76,6 +76,8 @@ Now Cursor AI can understand your codebase! Try asking it questions like:
- "List all .csproj files in this directory"
- "Show me the contents of this file"
- "What's the current base directory for file operations?"
+- "Help me think through the authentication system design"
+- "Document my reasoning about this architectural decision"
## 📚 Documentation
@@ -93,6 +95,12 @@ Now Cursor AI can understand your codebase! Try asking it questions like:
- 📖 **File Content Access**: Read source files with safety checks and size limits
- 🛡️ **Security**: Built-in safeguards for sensitive files and directory access
- 🎯 **Pattern Management**: Flexible ignore patterns for controlling file access
+- 💭 **Structured Thinking**: Document and validate reasoning about complex operations
+ - 🧩 **AI-Optimized Reasoning**: Based on [Anthropic's research](https://www.anthropic.com/engineering/claude-think-tool) on improving LLM problem-solving
+ - 📋 **Task Planning**: Break down complex problems into manageable steps
+ - ✅ **Policy Compliance**: Verify solutions against project guidelines
+ - 🔄 **Tool Output Analysis**: Process results from other tools before taking next actions
+ - 📝 **Decision Documentation**: Maintain audit trails of architectural choices
## Building from Source
diff --git a/docs/tool-reference.md b/docs/tool-reference.md
index 2d636df..d697e2d 100644
--- a/docs/tool-reference.md
+++ b/docs/tool-reference.md
@@ -245,6 +245,151 @@ Simple health check endpoint that returns a greeting message.
dotnet run --project src/NetContextClient/NetContextClient.csproj -- hello
```
+### `think`
+Provides a space for structured thinking during complex operations, allowing AI models to reason about actions, verify compliance with rules, and plan next steps without making any state changes.
+
+**Parameters:**
+- `--thought` (required): The thought or reasoning to process
+
+**Example:**
+```bash
+dotnet run --project src/NetContextClient/NetContextClient.csproj -- think --thought "Planning to refactor the authentication module to use JWT tokens"
+```
+
+**Output Example:**
+```json
+{
+ "thought": "Planning to refactor the authentication module to use JWT tokens",
+ "message": "Thought processed successfully",
+ "category": "Refactoring",
+ "timestamp": "2024-03-21T14:30:00.000Z",
+ "characterCount": 58
+}
+```
+
+**When to Use:**
+The think tool is particularly valuable when you want Claude to:
+1. Break down complex problems into manageable steps
+2. Analyze tool outputs before taking further actions
+3. Verify compliance with project policies and rules
+4. Plan multi-step operations that require careful consideration
+5. Document reasoning about architectural decisions
+
+> 📚 **Learn More**: This implementation is based on Anthropic's research on improving Claude's performance with a dedicated thinking space. [Read their detailed blog post](https://www.anthropic.com/engineering/claude-think-tool) for more insights.
+
+**Best Practice Examples:**
+
+1. **Multi-step Task Planning**
+```
+Before implementing a new feature, use the think tool to:
+- List all required functionality
+- Identify affected components and files
+- Plan the implementation sequence
+- Consider potential edge cases
+- Outline test scenarios
+
+Example: When adding authentication, think through: user flow, security requirements, error states, and affected API endpoints.
+```
+
+2. **Policy Compliance Verification**
+```
+When evaluating solutions against project guidelines, use the think tool to:
+- List all relevant policies
+- Check each policy requirement against the solution
+- Identify any compliance gaps
+- Document justifications for approach
+- Flag areas needing further review
+
+Example: "Before implementing this database change, let me verify it meets our data security policies..."
+```
+
+3. **Tool Output Analysis**
+```
+After receiving complex tool outputs (like search results or code analysis), use the think tool to:
+- Summarize key findings
+- Identify patterns across results
+- Connect information from different sources
+- Determine next investigation steps
+- Validate assumptions based on collected data
+
+Example: "After searching the codebase for auth-related files, I've found these patterns..."
+```
+
+4. **Architectural Decision Documentation**
+```
+When making architectural choices, use the think tool to:
+- Document decision criteria
+- Compare alternative approaches
+- List pros and cons of each option
+- Justify the chosen solution
+- Note implications for future development
+
+Example: "Considering three approaches for the caching layer: in-memory, Redis, or database..."
+```
+
+**Features:**
+- Automatic thought categorization:
+ - Refactoring: Code restructuring and improvements
+ - Security: Security-related considerations
+ - Performance: Optimization and performance improvements
+ - Testing: Testing and debugging thoughts
+ - Architecture: Design and architectural decisions
+ - General: Other uncategorized thoughts
+- Detailed metadata including timestamps and character counts
+- Content validation for potentially harmful patterns
+- Support for Unicode characters and emoji
+- Automatic log rotation for debugging logs
+
+**Limitations and Considerations:**
+- The tool is stateless - it doesn't persist thoughts between invocations
+- No state changes are made to the codebase
+- Maximum thought length is 32KB (32,768 characters)
+- Potentially harmful content is automatically rejected
+- Timestamps are in ISO 8601 format (UTC)
+- Log files are automatically rotated at 5MB
+
+**Logging Configuration:**
+Enable thought logging by setting the environment variable:
+```bash
+# PowerShell
+$env:NETCONTEXT_LOG_THOUGHTS="true"
+
+# Bash
+export NETCONTEXT_LOG_THOUGHTS="true"
+```
+
+Logs are stored in:
+- Location: `[AppDirectory]/logs/thoughts.log`
+- Format: `[Timestamp] JSON-formatted-thought-data`
+- Rotation: Automatic at 5MB with timestamp-based archiving
+
+**Integration with AI Workflows:**
+When working with Claude, the think tool can be used to:
+- Document decision-making processes
+- Create structured plans for complex refactoring
+- Validate approaches against project guidelines
+- Break down large tasks into smaller, manageable steps
+- Maintain a clear record of reasoning in the conversation history
+- Track thought patterns through categorization
+
+**Error Handling:**
+The tool returns error responses in the following cases:
+```json
+{
+ "error": "Missing required parameter 'thought'"
+}
+```
+```json
+{
+ "error": "Error: Thought exceeds maximum length of 32768 characters"
+}
+```
+```json
+{
+ "error": "Error: Thought contains invalid content"
+}
+```
+
## Default Ignore Patterns
The following patterns are ignored by default to protect sensitive information:
diff --git a/src/NetContextClient/Models/ThinkResponse.cs b/src/NetContextClient/Models/ThinkResponse.cs
new file mode 100644
index 0000000..93e8b59
--- /dev/null
+++ b/src/NetContextClient/Models/ThinkResponse.cs
@@ -0,0 +1,22 @@
+namespace NetContextClient.Models;
+
+///
+/// Response model for the think tool.
+///
+public class ThinkResponse
+{
+ ///
+ /// The thought that was processed.
+ ///
+ public string Thought { get; set; } = string.Empty;
+
+ ///
+ /// A confirmation message about the thought processing.
+ ///
+ public string Message { get; set; } = string.Empty;
+
+ ///
+ /// Error message if something went wrong.
+ ///
+ public string? Error { get; set; }
+}
\ No newline at end of file
diff --git a/src/NetContextClient/Program.cs b/src/NetContextClient/Program.cs
index 093379a..5b93df1 100644
--- a/src/NetContextClient/Program.cs
+++ b/src/NetContextClient/Program.cs
@@ -3,7 +3,10 @@
using ModelContextProtocol.Protocol.Transport;
using NetContextClient.Models;
using System.CommandLine;
+using System.Text.Encodings.Web;
using System.Text.Json;
+using System.Text.Json.Serialization;
+using System.Text.Unicode;
///
/// Command-line interface for the .NET Context Client, which interacts with the MCP server
@@ -18,6 +21,21 @@
///
class Program
{
+ ///
+ /// Default JSON serializer options used for response deserialization.
+ ///
+ private static readonly JsonSerializerOptions DefaultJsonOptions = new()
+ {
+ WriteIndented = true,
+ PropertyNameCaseInsensitive = true,
+ AllowTrailingCommas = true,
+ ReadCommentHandling = JsonCommentHandling.Skip,
+ PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
+ DictionaryKeyPolicy = JsonNamingPolicy.CamelCase,
+ DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull,
+ Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping
+ };
+
///
/// Entry point for the command-line interface. Sets up the MCP client and defines
/// the command structure using System.CommandLine.
@@ -703,6 +721,38 @@ static async Task Main(string[] args)
}
});
+ // Think command
+ var thinkCommand = new Command("think", "Process a thought without making any state changes");
+ var thoughtOption = new Option("--thought", "The thought to process") { IsRequired = true };
+ thinkCommand.AddOption(thoughtOption);
+ thinkCommand.SetHandler(async (string thought) =>
+ {
+ try
+ {
+ var result = await client.CallToolAsync("think", new() { ["thought"] = thought });
+ var jsonText = result.Content.First(c => c.Type == "text").Text;
+ if (jsonText != null)
+ {
+ var response = JsonSerializer.Deserialize(jsonText, DefaultJsonOptions);
+
+ if (!string.IsNullOrEmpty(response?.Error))
+ {
+ await Console.Error.WriteLineAsync($"Error: {response.Error}");
+ return;
+ }
+
+ await Console.Out.WriteLineAsync("Processed thought:");
+ await Console.Out.WriteLineAsync($" {response?.Thought}");
+ await Console.Out.WriteLineAsync($"\n{response?.Message}");
+ }
+ }
+ catch (Exception ex)
+ {
+ await Console.Error.WriteLineAsync($"Error: {ex.Message}");
+ Environment.Exit(1);
+ }
+ }, thoughtOption);
+
rootCommand.AddCommand(helloCommand);
rootCommand.AddCommand(setBaseDirCommand);
rootCommand.AddCommand(getBaseDirCommand);
@@ -720,6 +770,7 @@ static async Task Main(string[] args)
rootCommand.AddCommand(getStateFileLocationCommand);
rootCommand.AddCommand(semanticSearchCommand);
rootCommand.AddCommand(analyzePackagesCommand);
+ rootCommand.AddCommand(thinkCommand);
return await rootCommand.InvokeAsync(args);
}
diff --git a/src/NetContextServer/Tools/ThinkTools.cs b/src/NetContextServer/Tools/ThinkTools.cs
new file mode 100644
index 0000000..f0f92cc
--- /dev/null
+++ b/src/NetContextServer/Tools/ThinkTools.cs
@@ -0,0 +1,182 @@
+using ModelContextProtocol.Server;
+using System.ComponentModel;
+using System.Text.Encodings.Web;
+using System.Text.Json;
+using System.Text.Json.Serialization;
+using System.Text.RegularExpressions;
+
+namespace NetContextServer.Tools;
+
+///
+/// Provides a tool for structured thinking during complex operations, allowing the model
+/// to reason about its actions without making state changes.
+///
+[McpToolType]
+public static class ThinkTools
+{
+ ///
+ /// Default JSON serializer options used for think tool output.
+ ///
+ private static readonly JsonSerializerOptions DefaultJsonOptions = new()
+ {
+ WriteIndented = true,
+ PropertyNameCaseInsensitive = true,
+ AllowTrailingCommas = true,
+ ReadCommentHandling = JsonCommentHandling.Skip,
+ PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
+ DictionaryKeyPolicy = JsonNamingPolicy.CamelCase,
+ DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull,
+ Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping
+ };
+
+ ///
+ /// Maximum allowed length for thought parameter to prevent memory issues.
+ ///
+ private const int MaxThoughtLength = 32768; // 32KB
+
+ ///
+ /// Maximum log file size before rotation (5MB).
+ ///
+ private const int MaxLogFileSize = 5 * 1024 * 1024;
+
+ ///
+ /// Standard error messages.
+ ///
+ private static readonly string ErrorMissingThought = "Error: Missing required parameter 'thought'";
+ private static readonly string ErrorThoughtTooLarge = $"Error: Thought exceeds maximum length of {MaxThoughtLength} characters";
+ private static readonly string ErrorInvalidContent = "Error: Thought contains invalid content";
+
+ ///
+ /// Pattern to detect potentially harmful content.
+ ///
+ private static readonly Regex InvalidContentPattern = new(
+ @"(?i)(exec\s+\{|system\s*\(|eval\s*\(|