diff --git a/.changeset/grumpy-dogs-rest.md b/.changeset/grumpy-dogs-rest.md new file mode 100644 index 000000000000..5bd5b5def1a4 --- /dev/null +++ b/.changeset/grumpy-dogs-rest.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +add a button to fix mermaid syntax errors by calling the LLM diff --git a/.changeset/large-olives-wink.md b/.changeset/large-olives-wink.md new file mode 100644 index 000000000000..a3b4486f1ef3 --- /dev/null +++ b/.changeset/large-olives-wink.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Fix model search being prefilled in dropdown to prevent confusion in available models diff --git a/.changeset/stale-rivers-travel.md b/.changeset/stale-rivers-travel.md new file mode 100644 index 000000000000..ba8047b2c82e --- /dev/null +++ b/.changeset/stale-rivers-travel.md @@ -0,0 +1,5 @@ +--- +roo-code: minor +--- + +Add copy prompt button to task actions. Based on [@vultrnerd's feedback](https://github.com/Kilo-Org/kilocode/discussions/850). diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 5bfc08f80ff2..44626273b579 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -53,6 +53,16 @@ body: validations: required: true + - type: textarea + id: roo-code-tasks + attributes: + label: Roo Code Task Links (Optional) + description: | + If you have any publicly shared task links that demonstrate the issue, please paste them here. + This helps maintainers understand the context. + Example: https://app.roocode.com/share/task-id + placeholder: Paste your Roo Code share links here, one per line + - type: textarea id: steps attributes: @@ -85,4 +95,4 @@ body: attributes: label: 📄 Relevant Logs or Errors (Optional) description: Paste API logs, terminal output, or errors here. Use triple backticks (```) for code formatting. - render: shell \ No newline at end of file + render: shell diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 85263d881352..4863f9ffa61c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -57,6 +57,16 @@ body: label: Additional context (optional) description: Mockups, screenshots, links, user quotes, or other relevant information that supports your proposal. + - type: textarea + id: roo-code-tasks + attributes: + label: Roo Code Task Links (Optional) + description: | + If you used Roo Code to explore this feature request or develop solutions, share the public task links here. + This helps maintainers understand the context and any exploration you've done. + Example: https://app.roocode.com/share/task-id + placeholder: Paste your Roo Code share links here, one per line + - type: checkboxes id: checklist attributes: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 765c70614c80..e83e44cd66d4 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -12,6 +12,14 @@ Before submitting your PR, please ensure: Closes: # +### Roo Code Task Context (Optional) + + + ### Description @@ -36,7 +35,7 @@ - Extract API specifications from code implementations + Extract API specifications from code. @@ -46,10 +45,8 @@ - HTTP method - Route path - - Path parameters - - Query parameters - - Request body schema - - Response schemas + - Path/query parameters + - Request/response schemas - Status codes @@ -58,9 +55,8 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) ]]> - - Schema types + - Schema and input types - Resolvers - - Input types - Return types - Field arguments @@ -70,15 +66,15 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Map all dependencies and integration points + Map dependencies and integration points. - Import statements and require calls - Package.json dependencies + Import/require statements + package.json dependencies External API calls - Database connections + DB connections Message queue integrations - File system operations + Filesystem operations @@ -102,31 +98,22 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Extract data models, schemas, and type definitions + Extract data models, schemas, and type definitions. - - interface definitions - - type aliases - - class declarations - - enum definitions + - interfaces, types, classes, enums - - Schema definitions - - Migration files - - Model definitions (ORM) - - SQL CREATE statements + - Schema definitions, migration files, ORM models - - JSON Schema - - Joi/Yup schemas - - Validation decorators - - Custom validators + - JSON Schema, Joi/Yup/Zod schemas, validation decorators @@ -147,37 +134,33 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Identify and document business rules and logic + Identify and document business rules. - Complex conditional statements + Complex conditionals Calculation functions Validation rules State machines - Business-specific constants - Domain-specific algorithms + Domain-specific constants and algorithms - Why the logic exists (business requirement) - When the logic applies (conditions) - What the logic does (transformation) - Edge cases and exceptions - Business impact of changes + Why logic exists (business need) + When logic applies (conditions) + What logic does (transformation) + Edge cases + Impact of changes - Document error handling strategies and recovery mechanisms + Document error handling and recovery. - Try-catch blocks and error boundaries - Custom error classes and types + try/catch blocks, error boundaries + Custom error classes Error codes and messages - Logging strategies - Fallback mechanisms - Retry logic - Circuit breakers + Logging, fallbacks, retries, circuit breakers @@ -196,81 +179,68 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Identify security measures and potential vulnerabilities + Identify security measures and vulnerabilities. - - JWT implementation - - Session management - - OAuth flows - - API key handling + - JWT, sessions, OAuth, API keys - - Role-based access control - - Permission checks - - Resource ownership validation - - Access control lists + - RBAC, permission checks, ownership validation - - Encryption usage - - Hashing algorithms - - Sensitive data handling - - PII protection + - Encryption, hashing, sensitive data handling - - Input sanitization - - SQL injection prevention - - XSS protection - - CSRF tokens - + - Sanitization, SQLi/XSS/CSRF prevention + - Identify performance characteristics and optimization opportunities + Identify performance factors and optimization opportunities. - Database query patterns (N+1 queries) + DB query patterns (N+1) Caching strategies - Async/await usage + Async usage Batch processing Resource pooling Memory management Algorithm complexity - Time complexity of algorithms - Space complexity - Database query counts + Time/space complexity + DB query counts API response times - Memory usage patterns - Concurrent request handling + Memory usage + Concurrency handling - Analyze test coverage and quality + Analyze test coverage. - + __tests__, *.test.ts, *.spec.ts - Function-level coverage + Function coverage - + integration/, e2e/ - Feature workflow coverage + Workflow coverage - + api-tests/, *.api.test.ts Endpoint coverage @@ -293,20 +263,16 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Extract all configuration options and their impacts + Extract configuration options and their impacts. - Environment variables (.env files) - Configuration files (config.json, settings.yml) - Command-line arguments - Feature flags - Build-time constants + .env files, config files, CLI args, feature flags Default values - Valid value ranges - Impact on behavior - Dependencies between configs + Valid values + Behavior impact + Config dependencies Security implications @@ -315,40 +281,29 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Map complete user workflows through the feature + Map user workflows through the feature. - Identify user entry points (UI, API, CLI) - Trace user actions through the system - Document decision points and branches - Map data transformations at each step - Identify exit points and outcomes + Identify entry points (UI, API, CLI). + Trace user actions. + Document decision points. + Map data transformations. + Identify outcomes. - User flow diagrams - Step-by-step procedures - Decision trees - State transition diagrams + Flow diagrams, procedures, decision trees, state diagrams. - Document how the feature integrates with other systems + Document integration with other systems. - Synchronous API calls - Asynchronous messaging - Event-driven interactions - Batch processing - Real-time streaming + Sync API calls, async messaging, events, batch processing, streaming. - Integration protocols and formats - Authentication mechanisms - Error handling and retries - Data transformation requirements - SLA and performance expectations + Protocols, auth, error handling, data transforms, SLAs. @@ -356,10 +311,7 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - Package.json engines field - README compatibility sections - Migration guides - Breaking change documentation + package.json, READMEs, migration guides, breaking changes docs. @@ -372,16 +324,10 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - @deprecated annotations - TODO: deprecate comments - Legacy code markers - Migration warnings + @deprecated, TODO comments, legacy code markers. - Deprecation date - Removal timeline - Migration path - Alternative solutions + Deprecation date, removal timeline, migration path, alternatives. @@ -389,21 +335,17 @@ type\s+(Query|Mutation|Subscription)\s*{[^}]+}|@(Query|Mutation|Resolver) - All public APIs documented - Examples provided for complex features - Error scenarios covered - Configuration options explained - Security considerations addressed + Public APIs documented. + Examples for complex features. + Error scenarios covered. + Config options explained. + Security addressed. - Cyclomatic complexity - Code duplication - Test coverage percentage - Documentation coverage - Technical debt indicators + Cyclomatic complexity, code duplication, test coverage, doc coverage, tech debt. diff --git a/.roo/rules-docs-extractor/4_tool_usage_guide.xml b/.roo/rules-docs-extractor/4_tool_usage_guide.xml index a94fdfc0d8d7..d746141daa93 100644 --- a/.roo/rules-docs-extractor/4_tool_usage_guide.xml +++ b/.roo/rules-docs-extractor/4_tool_usage_guide.xml @@ -1,16 +1,15 @@ - Specific guidance on using tools effectively for comprehensive documentation extraction, - with emphasis on gathering complete information across all aspects of a feature. + Guidance on using tools for documentation extraction. codebase_search - Initial discovery of feature-related code + Initial code discovery. - Finding feature entry points + Find feature entry points authentication login user session JWT token @@ -18,7 +17,7 @@ ]]> - Locating business logic + Find business logic calculate pricing discount tax invoice billing @@ -26,7 +25,7 @@ ]]> - Finding configuration + Find configuration config settings environment variables .env process.env @@ -38,11 +37,11 @@ list_code_definition_names - Understanding code structure and organization + Understand code structure. - Use on directories containing core feature logic - Analyze both implementation and test directories - Look for patterns in naming conventions + Use on core feature directories. + Analyze implementation and test directories. + Look for naming patterns. @@ -53,12 +52,12 @@ read_file - Deep analysis of specific implementations + Analyze specific implementations. - Read main feature files first - Follow imports to understand dependencies - Read test files to understand expected behavior - Examine configuration and type definition files + Read main feature files. + Follow imports to find dependencies. + Read test files for expected behavior. + Examine config and type definition files. @@ -85,10 +84,10 @@ search_files - Finding specific patterns and implementations + Find specific patterns. - Find all API endpoints + Find API endpoints src @@ -97,7 +96,7 @@ ]]> - Find error handling patterns + Find error handling src @@ -106,7 +105,7 @@ ]]> - Find configuration usage + Find config usage src @@ -120,14 +119,14 @@ - Create the final documentation file when generating new documentation from scratch. - This tool is NOT used when reviewing a user-provided document section. In that scenario, feedback is provided directly in the chat. + Create documentation file for new docs. + Not used for reviews. Feedback for reviews is provided in chat. DOCS-TEMP-[feature-name].md - Use descriptive feature names in filename - Include table of contents with anchors - Use consistent markdown formatting - Include code examples with syntax highlighting + Use descriptive feature name in filename. + Include table of contents. + Use consistent Markdown formatting. + Include syntax-highlighted code examples. @@ -149,32 +148,32 @@ The authentication system provides secure user authentication using JWT tokens.. - Clarify requirements when multiple interpretations exist + Clarify ambiguous requirements. - Multiple features with similar names exist - Documentation depth needs clarification - Target audience priorities need definition + Multiple features have similar names. + Documentation depth is unclear. + Audience priorities are undefined. -Which aspects of the authentication system should I focus on? +Which authentication aspects should be the focus? -Complete authentication flow including JWT tokens, session management, and OAuth integration -Only the JWT token implementation and validation -OAuth2 integration with external providers -Password reset and account recovery workflows +The complete flow (JWT, sessions, OAuth). +Only JWT implementation and validation. +Only OAuth2 integration. +Password reset and recovery workflows. ]]> -What level of technical detail should the documentation include? +What level of technical detail is needed? -High-level overview suitable for all audiences -Detailed technical implementation for developers -API reference with code examples -Complete coverage for all audience types +High-level overview for all audiences. +Detailed developer implementation. +API reference with code examples. +Full coverage for all audiences. ]]> @@ -183,21 +182,21 @@ The authentication system provides secure user authentication using JWT tokens.. - + - Systematic approach to finding all files related to a feature + Find all files related to a feature. - Start with semantic search + Start with semantic search. -feature implementation main logic core functionality +feature implementation main logic ]]> - List directory structure + List directory structure. src/features @@ -206,7 +205,7 @@ The authentication system provides secure user authentication using JWT tokens.. ]]> - Find related tests + Find related tests. src @@ -216,7 +215,7 @@ The authentication system provides secure user authentication using JWT tokens.. ]]> - Locate configuration files + Find config files. . @@ -230,14 +229,14 @@ The authentication system provides secure user authentication using JWT tokens.. - Follow import chains to understand all dependencies + Follow import chains to map dependencies. - Read main feature file - Extract all imports - Read each imported file - Recursively analyze their imports - Build dependency graph + Read main file. + Extract all imports. + Read each imported file. + Recursively analyze imports. + Build dependency graph. @@ -256,35 +255,31 @@ The authentication system provides secure user authentication using JWT tokens.. - Extract complete API documentation from code + Extract API documentation from code. - Route definitions - Request/response schemas - Authentication requirements - Rate limiting rules - Error responses + Route definitions, request/response schemas, auth requirements, rate limiting, error responses. - Find all route files - Extract route definitions - Find associated controllers - Analyze request validation - Document response formats + Find route files. + Extract route definitions. + Find controllers. + Analyze request validation. + Document response formats. - Use tests to understand expected behavior + Use tests to document expected behavior. - Tests show real usage examples - Test descriptions explain functionality - Edge cases are often tested - Expected outputs are documented + Tests provide usage examples. + Test descriptions explain functionality. + Tests cover edge cases. + Tests document expected outputs. @@ -354,44 +349,31 @@ config\.(\w+)\.(\w+) - - Organize output for easy navigation + + Organize output for navigation. - - Clear hierarchy with numbered sections - - Consistent heading levels - - Table of contents with links - - Cross-references between sections + - Clear hierarchy, consistent headings, ToC with links, cross-references. - Include relevant code examples + Include relevant code examples. - - Use syntax highlighting - - Show both request and response - - Include error cases - - Provide language-specific examples + - Use syntax highlighting, show request/response, include error cases. - - Suggest where diagrams would help + + Suggest diagrams where helpful. - - Architecture diagrams - - Sequence diagrams - - Data flow diagrams - - State machines + - Architecture, sequence, data flow, state machine diagrams. - - Always include important metadata + + Include important metadata. - - Version compatibility - - Last updated date - - Feature status (stable/beta/deprecated) - - Performance characteristics - - Security considerations + - Version compatibility, last updated, status, performance, security. diff --git a/.roo/rules-docs-extractor/5_complete_extraction_examples.xml b/.roo/rules-docs-extractor/5_complete_extraction_examples.xml index 6248a9587fbb..3975b5b26275 100644 --- a/.roo/rules-docs-extractor/5_complete_extraction_examples.xml +++ b/.roo/rules-docs-extractor/5_complete_extraction_examples.xml @@ -1,48 +1,42 @@ - Complete examples demonstrating the full documentation extraction workflow - for different types of features and components. + Examples of the documentation extraction workflow. - Extract comprehensive documentation for a JWT-based authentication system - including login, logout, token refresh, and role-based access control. + Extract documentation for a JWT-based authentication system, including login, token refresh, and RBAC. - Initial feature discovery + Discover feature authentication JWT login logout token refresh auth middleware ]]> - - Auth controller files - - JWT service implementations - - Middleware functions - - User model with auth fields - - Auth route definitions + - Auth controllers, services, middleware, models, and routes. - Analyze authentication structure + Analyze structure src/auth ]]> - - Identify main classes and functions - - Map authentication flow - - Find token generation/validation logic + - Identify main classes/functions. + - Map auth flow. + - Find token generation/validation logic. - Read core implementation files + Read core files @@ -75,16 +69,12 @@ ]]> - - POST /auth/login - - POST /auth/logout - - POST /auth/refresh - - GET /auth/profile - - POST /auth/register + - POST /auth/login, POST /auth/logout, POST /auth/refresh, GET /auth/profile, POST /auth/register - Document configuration + Extract configuration src @@ -94,7 +84,7 @@ - Analyze security measures + Analyze security src/auth @@ -104,7 +94,7 @@ - Generate documentation + Generate docs DOCS-TEMP-authentication-system.md @@ -552,24 +542,23 @@ DEBUG=auth:* npm start - Start with semantic search to find all related files - Read multiple files together for context - Extract API documentation from route definitions - Use tests to understand expected behavior - Document security measures comprehensively - Include troubleshooting based on common errors + Use semantic search to find related files. + Read multiple files for context. + Extract API docs from route definitions. + Use tests to understand behavior. + Document security measures. + Include troubleshooting for common errors. - Extract documentation for database models, relationships, migrations, - and data access patterns. + Extract documentation for database models, relationships, and migrations. - Find database-related files + Find DB files database schema model entity migration table column relationship @@ -578,7 +567,7 @@ DEBUG=auth:* npm start - Analyze model definitions + Analyze models src/models diff --git a/.roo/rules-docs-extractor/6_communication_guidelines.xml b/.roo/rules-docs-extractor/6_communication_guidelines.xml index aed30f40945c..908b1fcfb6e4 100644 --- a/.roo/rules-docs-extractor/6_communication_guidelines.xml +++ b/.roo/rules-docs-extractor/6_communication_guidelines.xml @@ -1,90 +1,87 @@ - Guidelines for communicating with users and formatting documentation output - during the extraction process. + Guidelines for user communication and output formatting. - - Users will specify what they want documented in their initial message - Start working immediately based on their request - Only ask for clarification if genuinely ambiguous - + + Act on the user's request immediately. + Only ask for clarification if the request is ambiguous. + - + - Multiple features with identical names found - Request is genuinely ambiguous (rare) - User explicitly asks for options + Multiple features with similar names are found. + The request is ambiguous. + The user explicitly asks for options. -I found multiple authentication systems. Which one should I document? +Found multiple auth systems. Which to document? -JWT-based authentication system (src/auth/jwt/*) +JWT-based system (src/auth/jwt/*) OAuth2 integration (src/auth/oauth/*) -Basic authentication middleware (src/middleware/basic-auth.ts) -All authentication features comprehensively +Basic auth middleware (src/middleware/basic-auth.ts) +All of them ]]> - + - Starting major analysis phase - Completed significant extraction - Found unexpected complexity - Discovered related features + Starting a major analysis phase. + Extraction is complete. + Unexpected complexity is found. - + - Alert user to potential security concerns found during analysis + Alert user to security concerns found during analysis. - Note deprecated features that need migration documentation + Note deprecated features needing migration docs. - - Highlight areas where code lacks inline documentation + + Highlight code that lacks inline documentation. - Warn about intricate dependency chains affecting the feature + Warn about complex dependency chains. - + @@ -92,18 +89,14 @@ This feedback can be copied and pasted for your documentation team. - - Use # for main title only - Use ## for major sections - Use ### for subsections - Use #### sparingly for minor subsections - Never skip heading levels - + + Use # for main title, ## for major sections, ### for subsections. + Never skip heading levels. + - Always specify language for syntax highlighting - Use appropriate language identifiers (typescript, javascript, json, yaml, bash) - Include file paths as comments when relevant + Always specify language for syntax highlighting (e.g., typescript, json, bash). + Include file paths as comments where relevant. - Use tables for structured data like configurations - Include headers with proper alignment - Keep cell content concise + Use tables for structured data like configs. + Include headers and align columns. + Keep cell content brief. - Use bullet points for unordered lists - Use numbers for sequential steps - Nest lists with proper indentation - Keep list items parallel in structure + Use bullets for unordered lists, numbers for sequential steps. + Keep list items parallel in structure. [Link text](#section-anchor) - Use lowercase, hyphenated anchors - Test all internal links + Use lowercase, hyphenated anchors. Test all links. [Link text](https://example.com) - Use HTTPS when available - Link to official documentation + Use HTTPS. Link to official docs. `path/to/file.ts` - Use relative paths from project root - Use backticks for inline file references + Use relative paths from project root, in backticks. @@ -160,15 +148,15 @@ export class AuthService { > ⚠️ **Warning**: [message] - Security concerns, breaking changes, deprecations + Security, breaking changes, deprecations. > 📝 **Note**: [message] - Important information, clarifications + Important info, clarifications. > 💡 **Tip**: [message] - Best practices, optimization suggestions + Best practices, optimizations. @@ -186,138 +174,110 @@ Status: Stable - - Be conversational and approachable - Use active voice and "you" to address the reader - Lead with benefits, not features - Use concrete examples and scenarios - Keep paragraphs short and scannable - Avoid unnecessary technical details - - - - Write as if explaining to a colleague who isn't technical - Use analogies and comparisons to familiar concepts - Focus on "what" and "why" before "how" - Include practical examples users can relate to - Address common concerns and questions directly - - - - - Friendly, helpful, encouraging - Plain language, minimal jargon - Real-world scenarios, before/after comparisons - Problem → Solution → Benefits → How to use + + Be direct, not conversational. + Use active voice. + Lead with benefits. + Use concrete examples. + Keep paragraphs short. + Avoid unnecessary technical details. + + + + + Technical and direct. + Standard programming terms. + Code snippets, implementation details. - - - Technical when needed, but still approachable - Use standard programming terminology - Include code snippets and implementation details - - - - Friendly, instructional, step-by-step - Avoid technical jargon, explain concepts simply - Use screenshots and real-world scenarios + + Instructional, step-by-step. + Simple language, no jargon. + Screenshots, real-world scenarios. - - - Professional, operational focus - Use IT/DevOps terminology - Include command-line examples and configurations + + Operational focus. + IT/DevOps terms. + CLI examples, configs. - - - Business-oriented, value-focused - Use business terminology, avoid implementation details - Include metrics, ROI, and business benefits - - + - Summary of what was documented - Key findings or insights - File location and name - Suggestions for next steps (if applicable) + Summary of documented feature. + Key findings. + File location. + Next step suggestions (if applicable). - + - I couldn't find a feature matching "[feature name]". Here are some similar features I found: + Could not find a feature matching "[feature name]". Similar features found: - [List similar features] - Would you like me to document one of these instead? + Document one of these instead? - + - The code for [feature] has limited inline documentation. I'll extract what I can from: - - Code structure and naming - - Test files - - Related documentation - - Usage patterns + Code for [feature] has limited inline documentation. Extracting from code structure, tests, and usage patterns. - This feature is quite complex with [X] components. Would you like me to: - - Document everything comprehensively (may result in a large document) - - Focus on the core functionality - - Split into multiple documentation files + This feature is complex. Choose documentation scope: + - Document comprehensively + - Focus on core functionality + - Split into multiple documents - + - All sections have content (no placeholders) - Code examples are syntactically correct - Links and cross-references work - Tables are properly formatted - Version information is included - File naming follows convention + No placeholder content remains. + Code examples are correct. + Links and cross-references work. + Tables are formatted correctly. + Version info is included. + Filename follows conventions. \ No newline at end of file diff --git a/.roo/rules-docs-extractor/7_user_friendly_examples.xml b/.roo/rules-docs-extractor/7_user_friendly_examples.xml index 9de359a62ad1..6b94e88de609 100644 --- a/.roo/rules-docs-extractor/7_user_friendly_examples.xml +++ b/.roo/rules-docs-extractor/7_user_friendly_examples.xml @@ -1,93 +1,87 @@ - Examples and patterns for creating documentation that prioritizes user experience - and practical understanding over technical completeness. + Examples for creating user-focused, practical documentation. - - The concurrent file read feature uses parallel processing to read multiple files. - Read multiple files at once, saving time and reducing interruptions. + + The concurrent file read feature uses parallel processing. + Read multiple files at once, reducing interruptions. - This feature improves efficiency. - Instead of approving 10 file reads one by one, approve them all at once and get your answer faster. + This improves efficiency. + Instead of approving 10 file reads one-by-one, approve them all at once. - - The feature uses a thread pool with configurable concurrency limits to process file I/O operations. - Roo can read up to 100 files at once (you can change this limit in settings). + + The feature uses a thread pool with configurable concurrency limits. + Roo reads up to 100 files at once (changeable in settings). - + Users must configure the concurrent file read limit parameter. - You can adjust how many files Roo reads at once in the settings. + Adjust how many files Roo reads at once in settings. - + - + - + - + @@ -95,160 +89,130 @@ You can customize this feature in Roo's settings: - - - - + - - + + The system imposes a hard limit of 100 concurrent operations. - Roo can handle up to 100 files at once - more than enough for most projects! + Roo handles up to 100 files at once. - + Error: Maximum concurrency threshold exceeded. - Oops! That's too many files at once. Try lowering the file limit in settings. + Too many files requested. Lower the file limit in settings. - + Reduces API call overhead through request batching. - Get answers faster by reading all the files Roo needs in one go. + Get answers faster by reading all needed files at once. - + - - - Error messages: ⚠️ - Tips: 💡 - Important notes: 📝 + + + Error: ⚠️ + Tip: 💡 + Note: 📝 Security: 🔒 - - - - For emphasis on key points - For settings names, file paths, or commands - For important callouts or warnings - - + + + + For emphasis + For settings, file paths, or commands + For callouts or warnings + + - Concurrent File Reads Documentation + Concurrent File Reads Doc - Does it start with benefits, not features? - Are technical terms explained or avoided? - Does it use "you" to address the reader? - Are there practical examples or scenarios? - Is the tone conversational and friendly? + Does it start with benefits? + Are technical terms avoided? + Is the tone direct? + Are there practical examples? Are sections short and scannable? - Does it answer common user questions? - Is help easily accessible? + Does it answer user questions? + Is help accessible? \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/10_pr_template_format.xml b/.roo/rules-issue-fixer-orchestrator/10_pr_template_format.xml new file mode 100644 index 000000000000..d16eb51424f5 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/10_pr_template_format.xml @@ -0,0 +1,213 @@ + + + This document defines the format for PR messages that are saved to the temp folder + before creating a pull request. The PR message is saved in two formats: + 1. JSON format in pr_summary.json (for programmatic use) + 2. Markdown format in pr_message.md (for manual PR creation) + + The PR message must follow the exact Roo Code contribution template. + + + + + The pr_summary.json file contains the PR title and body in a structured format + that can be easily parsed by scripts and the GitHub CLI. + + + { + "title": "fix: [description] (#[issue-number])", + "body": "[Full markdown body as described below]", + "issue_number": 123, + "repo_owner": "owner", + "repo_name": "repo", + "base_branch": "main", + "head_branch": "fix/issue-123-description" + } + + + + + + The pr_message.md file contains the complete PR message in a format that can be + directly copied and pasted when creating a PR manually. + + + PR Title: [title from JSON] + + --- + + [Full PR body from JSON] + + + + + + The PR body must follow this exact Roo Code PR template with all required sections. + + + + + + The GitHub issue number being fixed + Optional Roo Code task links (remove section if not applicable) + + Summary of changes and implementation details. Should include: + - Key implementation details + - Design choices or trade-offs made + - Specific areas reviewers should focus on + + + Detailed testing steps including: + - Unit tests added/modified + - Manual testing steps performed + - How reviewers can reproduce tests + - Testing environment details + + + For UI changes: before/after screenshots or video + For non-UI changes: "N/A - No UI changes" + + + Check appropriate box: + - "- [x] No documentation updates are required." OR + - "- [x] Yes, documentation updates are required. [describe updates]" + + + Any additional context, or remove entire section if not needed + + User's Discord username for contact + + + + + pr_summary.json + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json + Structured data for programmatic PR creation + + + pr_message.md + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md + Human-readable format for manual PR creation + + + + + + Always save both formats when preparing a PR to give users flexibility + in how they create the pull request. + + + The pr_message.md file should be self-contained and ready to copy/paste + without any additional formatting needed. + + + Include all sections in the template, maintaining the exact format + and HTML comments as shown. + + + Pre-check all checklist items that can be verified programmatically. + Leave documentation checkbox unchecked for user to decide. + + + For sections that don't apply, use appropriate placeholder text + rather than removing the section entirely. + + + + + + If translations were added during the issue fix, include details in the + Description section about which languages were updated. + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/1_Workflow.xml b/.roo/rules-issue-fixer-orchestrator/1_Workflow.xml new file mode 100644 index 000000000000..e41d30bbda67 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/1_Workflow.xml @@ -0,0 +1,874 @@ + + + Initialize Task Context + + The user will provide a GitHub issue URL. + + 1. **Parse URL**: Extract the `owner`, `repo`, and `issue_number`. + 2. **Create Task Directory**: Create a dedicated directory to store all context for this task. Use a unique identifier for the directory name, like the task ID. For example: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/`. + + mkdir -p .roo/temp/issue-fixer-orchestrator/[TASK_ID] + + 3. **Retrieve Issue Details**: Fetch the issue details and its comments as a single JSON object. + + gh issue view [issue_number] --repo [owner]/[repo] --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author,comments > .roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json + + 4. **Handle Auth Errors**: If the `gh` command fails with an authentication error, prompt the user to log in. + + GitHub CLI is not authenticated. Please run 'gh auth login' in your terminal, then let me know when you're ready to continue. + + I've authenticated, please continue + + + 5. **Confirm Context**: Inform the user that the context has been saved. + + + + + Delegate: Analyze Requirements & Explore Codebase + + Launch a subtask in `architect` mode to perform a detailed analysis of the issue and the codebase. The subtask will be responsible for identifying affected files and creating an implementation plan. + + The context file `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json` will be the input for this subtask. The subtask should write its findings (the implementation plan) to a new file: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md`. + + + architect + + **Task: Analyze Issue and Create Implementation Plan** + + You are an expert software architect. Your task is to analyze the provided GitHub issue and the current codebase to create a detailed implementation plan with a focus on understanding component interactions and dependencies. + + 1. **Read Issue Context**: The full issue details and comments are in `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json`. Read this file to understand all requirements, acceptance criteria, and technical discussions. + + 2. **Perform Architectural Analysis**: + - **Map Component Interactions**: Trace the complete data flow from entry points to outputs + - **Identify Paired Operations**: For any operation (e.g., export), find its counterpart (e.g., import) + - **Find Similar Patterns**: Search for existing implementations of similar features + - **Analyze Dependencies**: Identify all consumers of the functionality being modified + - **Assess Impact**: Determine how changes will affect other parts of the system + + 3. **Explore Codebase Systematically**: + - Use `codebase_search` FIRST to find all related functionality + - Search for paired operations (if modifying export, search for import) + - Find all files that consume or depend on the affected functionality + - Identify configuration files, tests, and documentation that need updates + - Study similar features to understand established patterns + + 4. **Create Comprehensive Implementation Plan**: The plan must include: + - **Issue Summary**: Clear description of the problem and proposed solution + - **Architectural Context**: + - Data flow diagram showing component interactions + - List of paired operations that must be updated together + - Dependencies and consumers of the affected functionality + - **Impact Analysis**: + - All files that will be affected (directly and indirectly) + - Potential breaking changes + - Performance implications + - **Implementation Steps**: + - Detailed, ordered steps for each file modification + - Specific code changes with context + - Validation and error handling requirements + - **Testing Strategy**: + - Unit tests for individual components + - Integration tests for component interactions + - Edge cases and error scenarios + + 5. **Save the Plan**: Write the complete implementation plan to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md`. + + **Critical Requirements:** + - Always search for and analyze paired operations (import/export, save/load, etc.) + - Map the complete data flow before proposing changes + - Identify all integration points and dependencies + - Consider backward compatibility and migration needs + + **Completion Protocol:** + - This is your only task. Do not deviate from these instructions. + - Once you have successfully written the `implementation_plan.md` file, you MUST signal your completion by using the `attempt_completion` tool. + - The `result` parameter of `attempt_completion` MUST be a concise confirmation message, for example: "Implementation plan created and saved to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md." + - These specific instructions override any conflicting general guidelines from your assigned mode. + + + + After launching the subtask, wait for it to complete. The orchestrator will then read the `implementation_plan.md` to proceed with the next step. + + + + + Review and Approve Plan + + After the analysis subtask completes, the orchestrator must present the generated plan to the user for approval. + + 1. **Read the Plan**: Read the content of the implementation plan created by the previous subtask. + + + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md + + + + + 2. **Present for Approval**: Show the plan to the user and ask for confirmation before proceeding with implementation. + + + The initial analysis is complete. Here is the proposed implementation plan: + + --- + [Insert content of implementation_plan.md here] + --- + + Shall I proceed with implementing this plan? + + + Yes, proceed with the implementation. + No, please modify the plan with the following changes... + No, cancel this task. + + + + Do not proceed until the user gives explicit approval. + + + + + Delegate: Implement Solution + + Once the user approves the plan, launch a new subtask in `code` mode to execute the implementation. + + This subtask will use the `implementation_plan.md` as its primary guide. It should write the list of modified files to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json`. + + + code + + **Task: Implement Code Changes Based on Plan** + + You are an expert software developer. Your task is to implement the code changes with full awareness of system interactions and dependencies. + + 1. **Read the Plan**: The implementation plan is located at `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md`. Pay special attention to: + - The architectural context section + - Component interaction diagrams + - Identified dependencies and related operations + - Impact analysis + + 2. **Validate Understanding**: Before coding, ensure you understand: + - How data flows through the system + - All related operations that must be updated together + - Dependencies that could be affected + - Integration points with other components + + 3. **Implement Holistically**: + - **Update Related Operations Together**: If modifying one operation, update all related operations + - **Maintain Consistency**: Ensure data structures, validation, and error handling are consistent + - **Consider Side Effects**: Account for how changes propagate through the system + - **Follow Existing Patterns**: Use established patterns from similar features + + 4. **Implement Tests**: + - Write tests that verify component interactions + - Test related operations together + - Include edge cases and error scenarios + - Verify data consistency across operations + + 5. **Track Modified Files**: As you modify or create files, keep a running list. + + 6. **Save Modified Files List**: After all changes are implemented and tested, save the list of all file paths you created or modified to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json`. The format should be a JSON array of strings. + Example: `["src/components/NewFeature.tsx", "src/__tests__/NewFeature.spec.ts"]` + + **Critical Reminders:** + - Never implement changes in isolation - consider the full system impact + - Always update related operations together to maintain consistency + - Test component interactions, not just individual functions + - Follow the architectural analysis from the planning phase + + Once the `modified_files.json` file is saved, your task is complete. + + + + After launching the subtask, wait for it to complete. The orchestrator will use the list of modified files for the verification and PR creation steps. + + + + + Delegate: Verify and Test + + After implementation, delegate the verification and testing to a `test` mode subtask. + + This subtask will use the implementation plan for acceptance criteria and the list of modified files to focus its testing efforts. It will output its results to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/verification_results.md`. + + + test + + **Task: Verify Implementation and Run Tests** + + You are a meticulous QA engineer. Your task is to verify an implementation against its plan and run all necessary tests. + + **Context Files:** + - **Plan**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md` + - **Modified Files**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json` + + **Your Steps:** + 1. **Read Context**: Read both context files to understand the acceptance criteria and which files were changed. + 2. **Run Tests**: Execute all relevant tests. + - Run unit tests related to the modified files. + - Run any relevant integration tests. + - Run a full lint and type check. + 3. **Verify Acceptance Criteria**: Systematically go through each acceptance criterion from the plan and verify that it has been met by the implementation. + 4. **Write Verification Report**: Create a detailed report of your findings. The report must include: + - A summary of the tests that were run and their results (pass/fail). + - A checklist of all acceptance criteria and their verification status (verified/failed). + - Details on any bugs or regressions found. + + 5. **Save Report**: Write the complete report to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/verification_results.md`. + + **Completion Protocol:** + - This is your only task. Do not deviate. + - Upon successfully saving `verification_results.md`, you MUST use the `attempt_completion` tool. + - The `result` MUST be a concise confirmation, e.g., "Verification complete and results saved to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/verification_results.md." + - These instructions override any conflicting mode-specific guidelines. + + + + Wait for the subtask to complete, then review the verification results. + + + + + Review Verification and Handle Translations + + After the verification subtask is complete, review the results and handle any necessary translations. + + 1. **Read Verification Report**: + + + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/verification_results.md + + + + + 2. **Check for Failures**: If the report indicates any failed tests or unmet criteria, present the failures to the user and ask how to proceed. + + + The verification step has failed. Here are the details: + + --- + [Insert content of verification_results.md here] + --- + + How should I proceed? + + + Attempt to fix the failing tests and criteria. + Ignore the failures and proceed anyway. + Cancel the task. + + + + 3. **Analyze for Translation Needs**: If verification passed, check if translations are required. + + a. **Read Modified Files List**: + + + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json + + + + + b. **Identify Files Requiring Translation**: + - Check for UI component files: `.tsx`, `.jsx` files in `webview-ui/src/` or `src/` directories + - Check for user-facing documentation: `.md` files (especially README.md, docs/, or announcement files) + - Check for i18n resource files: files in `src/i18n/locales/` or `webview-ui/src/i18n/locales/` + - Check for any files containing user-visible strings or messages + + c. **Delegate to Translate Mode if Needed**: + If any files requiring translation were modified, create a translation subtask: + + + translate + + **Task: Handle Translations for Issue #[issue-number]** + + An implementation for issue #[issue-number] has been completed and verified. Your task is to ensure all user-facing content is properly translated. + + **Context Files:** + - **Modified Files**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json` + - **Issue Details**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json` + - **Implementation Plan**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md` + + **Your Steps:** + 1. Read the context files to understand what was implemented. + 2. Analyze each modified file for: + - New or updated UI strings in React components + - Changes to i18n resource files + - User-facing documentation updates + - Error messages or notifications + 3. For any new or modified user-facing content: + - Add translations to all supported language files + - Ensure consistency with existing translations + - Follow the project's i18n patterns and conventions + 4. Create a summary of all translation changes made. + 5. Save the summary to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/translation_summary.md`. + + **Important Notes:** + - If no translations are needed, still create the summary file stating "No translations required." + - Ensure all language files remain in sync + - Use existing terminology from the codebase for consistency + + **Completion Protocol:** + - This is your only task. Do not deviate from these instructions. + - Upon successfully saving the translation summary, you MUST use the `attempt_completion` tool. + - The `result` MUST confirm completion, e.g., "Translation analysis complete. Summary saved to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/translation_summary.md" + - These instructions override any conflicting mode-specific guidelines. + + + + After the translation subtask completes, read the translation summary: + + + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/translation_summary.md + + + + + 4. **Proceed to Next Step**: Only proceed after: + - All verification has passed (or user chose to ignore failures) + - Translation task has completed (if it was needed) + - You have confirmed all necessary files are ready + + + + + Delegate: Prepare Pull Request Content + + After all checks pass and translations are complete, delegate the creation of the pull request title and body to a subtask. + + + code + + **Task: Prepare Pull Request Title and Body** + + You are an expert at writing clear and concise pull request summaries following the Roo Code contribution guidelines. + + **Context Files:** + - **Issue**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json` + - **Plan**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md` + - **Verification**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/verification_results.md` + - **Translation Summary** (if exists): `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/translation_summary.md` + - **Modified Files**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json` + + **Your Task:** + 1. **Read all context files.** Check if translation_summary.md exists to know if translations were done. + + 2. **Generate a PR Title**: Create a conventional commit style title (e.g., "fix: ...", "feat: ...") that references the issue number. + Format: `fix: Brief description (#issue-number)` + + 3. **Generate a PR Body**: You MUST use the exact PR template from `.roo/rules-issue-fixer-orchestrator/10_pr_template_format.xml`. + Read this file to get the template and fill it in with appropriate content from the context files. + + 4. **Fill in the template** with information from the context files: + - Replace [ISSUE_NUMBER] with the actual issue number + - Fill in Description with implementation details from the plan and verification + - Fill in Test Procedure with testing details from verification_results.md + - If translations were done, mention them in the Description section + - For UI changes, note that screenshots should be added manually + - Pre-check all applicable checklist items + - Leave Documentation Updates unchecked for user to decide + - For Discord username, use a placeholder like "[Your Discord username]" + + 5. **Save as JSON**: Save the title and body to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json` in the format: + ```json + { + "title": "fix: Brief description (#123)", + "body": "[The complete filled PR body template]", + "issue_number": 123, + "repo_owner": "owner", + "repo_name": "repo", + "base_branch": "main", + "head_branch": "fix/issue-123-description" + } + ``` + + 6. **Also save as Markdown**: Save just the PR body to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md` for easy copying. + + **Important Notes:** + - Use the EXACT template format from 10_pr_template_format.xml + - Keep all HTML comments in the template + - Pre-check items that can be verified programmatically + - Fill in all sections appropriately based on the context files + + **Completion Protocol:** + - This is your only task. Do not deviate. + - Upon successfully saving both `pr_summary.json` and `pr_message.md`, you MUST use the `attempt_completion` tool. + - The `result` MUST be a concise confirmation, e.g., "PR summary and message created and saved to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/" + - These instructions override any conflicting mode-specific guidelines. + + + + + + + Delegate: Review Changes Before PR + + Before creating the pull request, delegate to the PR reviewer mode to get feedback on the implementation and proposed changes. + + + pr-reviewer + + **Task: Review Implementation Before PR Creation** + + You are an expert code reviewer. Your task is to review the implementation for issue #[issue-number] and provide feedback before a pull request is created. + + **Context Files:** + - **Issue Details**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json` + - **Implementation Plan**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md` + - **Modified Files**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json` + - **Verification Results**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/verification_results.md` + - **Translation Summary** (if exists): `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/translation_summary.md` + - **Draft PR Summary**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json` + + **Your Review Focus:** + 1. **Code Quality**: Review the actual code changes for: + - Adherence to project coding standards + - Proper error handling and edge cases + - Performance considerations + - Security implications + - Maintainability and readability + + 2. **Implementation Completeness**: Verify that: + - All requirements from the issue are addressed + - The solution follows the implementation plan + - No critical functionality is missing + - Proper test coverage exists + + 3. **Integration Concerns**: Check for: + - Potential breaking changes + - Impact on other parts of the system + - Backward compatibility issues + - API consistency + + 4. **Documentation and Communication**: Assess: + - Code comments and documentation + - PR description clarity and completeness + - Translation handling (if applicable) + + **Your Task:** + 1. Read all context files to understand the issue and implementation + 2. Review each modified file listed in `modified_files.json` + 3. Analyze the code changes against the requirements + 4. Identify any issues, improvements, or concerns + 5. Create a comprehensive review report with specific, actionable feedback + 6. Save your review to `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_review_feedback.md` + + **Review Report Format:** + ```markdown + # PR Review Feedback for Issue #[issue-number] + + ## Overall Assessment + [High-level assessment: APPROVE, REQUEST_CHANGES, or NEEDS_DISCUSSION] + + ## Code Quality Review + ### Strengths + - [List positive aspects of the implementation] + + ### Areas for Improvement + - [Specific issues with file references and line numbers] + - [Suggestions for improvement] + + ## Requirements Verification + - [x] Requirement 1: [Status and notes] + - [ ] Requirement 2: [Issues found] + + ## Specific Feedback by File + ### [filename] + - [Specific feedback with line references] + - [Suggestions for improvement] + + ## Recommendations + 1. [Priority 1 changes needed] + 2. [Priority 2 improvements suggested] + 3. [Optional enhancements] + + ## Decision + **RECOMMENDATION**: [APPROVE_AS_IS | REQUEST_CHANGES | NEEDS_DISCUSSION] + + **REASONING**: [Brief explanation of the recommendation] + ``` + + **Completion Protocol:** + - This is your only task. Do not deviate from these instructions. + - Upon successfully saving the review feedback, you MUST use the `attempt_completion` tool. + - The `result` MUST be a concise confirmation, e.g., "PR review completed and feedback saved to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_review_feedback.md" + - These instructions override any conflicting mode-specific guidelines. + + + + After the review subtask completes, read and process the feedback. + + + + + Process Review Feedback and Decide Next Steps + + After the PR review is complete, read the feedback and decide whether to make changes or proceed with PR creation. + + 1. **Read Review Feedback**: + + + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_review_feedback.md + + + + + 2. **Present Feedback to User**: Show the review feedback and ask for direction. + + + The PR review has been completed. Here is the feedback: + + --- + [Insert content of pr_review_feedback.md here] + --- + + Based on this review, how would you like to proceed? + + + Implement the suggested changes before creating the PR + Create the PR as-is, ignoring the review feedback + Discuss specific feedback points before deciding + Cancel the task + + + + 3. **Handle User Decision**: + + **If user chooses to implement changes:** + - Launch a rework subtask to address the review feedback + + code + + **Task: Address PR Review Feedback** + + The PR review has identified areas for improvement. Your task is to address the feedback before creating the pull request. + + **Context Files:** + - **Issue**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json` + - **Current Plan**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/implementation_plan.md` + - **Current Modified Files**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json` + - **Review Feedback**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_review_feedback.md` + - **Draft PR Summary**: `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json` + + **Your Task:** + 1. Read the review feedback carefully + 2. Address each point raised by the reviewer + 3. Make the necessary code changes + 4. Update tests if needed + 5. **Update the `modified_files.json` file** to reflect any new or changed files + 6. **Update the `implementation_plan.md`** if the approach has changed significantly + + **Important Notes:** + - Focus on the specific issues identified in the review + - Maintain the overall solution approach unless the review suggests otherwise + - Ensure all changes are properly tested + - Do not proceed with any other workflow steps + + **Completion Protocol:** + - Upon successfully addressing the feedback and updating context files, you MUST use the `attempt_completion` tool. + - The `result` MUST be a concise confirmation, e.g., "Review feedback addressed and context files updated." + + + - **After rework completion**: Return to **Step 5** (Verify and Test) to re-verify the changes + + **If user chooses to proceed as-is:** + - Continue to the next step (Create Pull Request) + + **If user wants to discuss or cancel:** + - Handle accordingly based on user input + + + + + Prepare Branch and Review Changes + + This step prepares the branch, reviews the changes, and gets user confirmation before committing. + + 1. Read Issue Context and PR Summary: + - Read issue context from .roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json + - Read PR summary from .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json + + 2. Create Branch: + Extract issue number from context and create appropriate branch: + + + # Extract issue number from context + ISSUE_NUM=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json | jq -r '.number') + # Determine branch type based on labels or title + BRANCH_NAME="fix/issue-${ISSUE_NUM}-solution" + git checkout -b $BRANCH_NAME + + + + 3. Review Files to be Committed: + a. Read the modified files list: + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json + + + b. Check git status to ensure only intended files are staged: + + git status --porcelain + + + c. Stage only the files from modified_files.json: + + + # Stage only the files we actually modified + cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/modified_files.json | jq -r '.[]' | while read file; do + if [ -f "$file" ]; then + git add "$file" + fi + done + # Show what will be committed + git status --short + + + + 4. Generate and Save Merge Diff: + + + # Generate diff of staged changes + git diff --cached > .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes.diff + # Count lines in diff + LINE_COUNT=$(wc -l < .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes.diff) + echo "Diff contains $LINE_COUNT lines" + + # If diff is too large, create a summary instead + if [ $LINE_COUNT -gt 2000 ]; then + echo "Creating summary due to large diff size..." + git diff --cached --stat > .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes_summary.txt + git diff --cached --name-status >> .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes_summary.txt + echo -e "\n\nNote: Full diff exceeds 2000 lines. Showing summary only." >> .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes_summary.txt + fi + + + + 5. Save PR Message to File: + Read the PR summary and save the formatted PR message: + + + # Extract title and body from pr_summary.json + PR_TITLE=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json | jq -r '.title') + PR_BODY=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json | jq -r '.body') + + # Save PR message to file + echo "PR Title: $PR_TITLE" > .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md + echo -e "\n---\n" >> .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md + echo "$PR_BODY" >> .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md + + echo "PR message saved to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md" + + + + 6. Present Changes for User Confirmation: + Read the diff (or summary) and PR message, then ask for confirmation: + + a. Read the appropriate diff file: + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes.diff + + OR (if summary exists): + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/staged_changes_summary.txt + + + b. Read the PR message: + + .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md + + + c. Ask for user confirmation: + + + I've prepared the changes for commit. Here's what will be included: + + **Files to be committed:** + [List files from modified_files.json] + + **Changes summary:** + [Include diff or summary content] + + **Proposed PR message:** + [Include PR message content] + + How would you like to proceed? + + + Looks good, go ahead and commit the changes + I tested the changes, and something is wrong - let me explain... + I still need to test the changes manually before committing + I want to modify the PR message before proceeding + Cancel this task + + + + 7. Handle User Decision: + Based on user response: + + **If "Looks good, go ahead and commit":** + - Proceed to commit and push: + + + # Get PR title for commit message + PR_TITLE=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json | jq -r '.title') + git commit -m "$PR_TITLE" + git push -u origin $(git branch --show-current) + + + - Continue to next step + + **If "something is wrong":** + - Ask for details about the issue + - Launch a fix subtask in code mode to address the problems + - Return to Step 5 (Verify and Test) after fixes + + **If "need to test manually":** + - Inform user that changes are staged but not committed + - Provide instructions for testing + - Wait for user to confirm testing is complete + + **If "modify PR message":** + - Ask for the new PR title and body + - Update pr_summary.json with new content + - Return to step 5 of this workflow + + **If "cancel":** + - Clean up staged changes and branch + - End workflow + + + + + Finalize and Create PR + + After user confirmation and successful commit, finalize the process. + + 1. Verify Push Success: + + + # Verify the branch was pushed + git branch -r | grep $(git branch --show-current) + # Get the remote URL for PR creation + git remote get-url origin + + + + 2. Present Final PR Creation Options: + + + The changes have been committed and pushed to the branch. The PR message has been saved to: + `.roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md` + + You can now: + 1. Create the PR manually using the saved message + 2. Let me create the PR automatically + 3. Make additional changes before creating the PR + + What would you like to do? + + + Create the PR automatically using the saved message + I'll create the PR manually - just show me the branch name + I want to make more changes first + + + + 3. Handle Final Decision: + + **If "Create PR automatically":** + - Read PR details and create: + + + # Extract PR details + PR_TITLE=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json | jq -r '.title') + PR_BODY=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_summary.json | jq -r '.body') + ISSUE_NUM=$(cat .roo/temp/issue-fixer-orchestrator/[TASK_ID]/issue_context.json | jq -r '.number') + + # Create PR + gh pr create --title "$PR_TITLE" --body "$PR_BODY" --base main + + # Get PR number and link to issue + PR_NUM=$(gh pr list --head $(git branch --show-current) --json number -q '.[0].number') + gh issue comment $ISSUE_NUM --body "PR #$PR_NUM has been created to address this issue." + + + + **If "Create manually":** + - Show branch name and location of PR message: + + + echo "Branch name: $(git branch --show-current)" + echo "PR message saved at: .roo/temp/issue-fixer-orchestrator/[TASK_ID]/pr_message.md" + echo "You can copy the PR message from the file above when creating the PR." + + + + **If "Make more changes":** + - Inform user they can continue working on the branch + - Provide the task directory location for reference + + 4. Cleanup: + Ask if user wants to clean up the temporary files: + + + Would you like me to clean up the temporary task files now, or keep them for reference? + + + Yes, clean up the temporary files + No, keep the files for now + + + + If yes, clean up: + + rm -rf .roo/temp/issue-fixer-orchestrator/[TASK_ID] + + + + + + Monitor PR (Optional) + + If a PR was created automatically, offer to monitor its status. + + 1. Check if PR exists: + + + # Check if there's a PR for the current branch + BRANCH=$(git branch --show-current) + gh pr list --head $BRANCH --json number,state,checks + + + + 2. If PR exists, offer monitoring: + + + Would you like me to monitor the PR checks and CI status? + + + Yes, monitor the PR checks + No, I'll check it myself + + + + 3. If user wants monitoring: + + + PR_NUM=$(gh pr list --head $(git branch --show-current) --json number -q '.[0].number') + echo "Monitoring PR #$PR_NUM checks..." + gh pr checks $PR_NUM --watch + + + + This concludes the orchestration workflow. + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/2_best_practices.xml b/.roo/rules-issue-fixer-orchestrator/2_best_practices.xml new file mode 100644 index 000000000000..839d862fb854 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/2_best_practices.xml @@ -0,0 +1,120 @@ + + - Always read the entire issue and all comments before starting + - Follow the project's coding standards and patterns + - Focus exclusively on addressing the issue's requirements. + - Make minimal, high-quality changes for bug fixes. The goal is a narrow, targeted fix, not a one-line hack. + - Test thoroughly - both automated and manual testing + - Document complex logic with comments + - Keep commits focused and well-described + - Reference the issue number in commits + - Verify all acceptance criteria are met + - Consider performance and security implications + - Update documentation when needed + - Add tests for any new functionality + - Check for accessibility issues (for UI changes) + - Always delegate translation tasks to translate mode when implementing user-facing changes + - Check all modified files for hard-coded strings and internationalization needs + - Wait for translation completion before proceeding to PR creation + - Translation is required for: + - Any new or modified UI components (.tsx, .jsx files) + - User-facing documentation changes (.md files) + - Error messages and notifications + - Any strings visible to end users + - The translate mode will handle: + - Adding translations to all supported language files + - Ensuring consistency with existing terminology + - Maintaining sync across all language resources + + + Always verify files before committing + + - Review git status to ensure only intended files are staged + - Stage only files listed in modified_files.json + - Never commit unrelated changes or temporary files + - Always get user confirmation before committing + + + + - Save full diff to staged_changes.diff for review + - If diff exceeds 2000 lines, create a summary instead + - Summary should include file stats and change types + - Always inform user when showing summary vs full diff + + + + - Save PR message in both JSON and Markdown formats + - pr_summary.json for programmatic use + - pr_message.md for manual PR creation + - Include all standard template sections + - Make PR message self-contained and ready to use + + + + - Always ask for confirmation with clear options + - First option should be "Looks good, go ahead" + - Provide options for testing and issue reporting + - Allow PR message modification before proceeding + - Handle each user response appropriately + + + + - All delegated tasks must save outputs to .roo/temp/issue-fixer-orchestrator/[TASK_ID]/ + - Keep all context files until user confirms cleanup + - Offer cleanup option after PR creation + - Never delete files without user permission + + + + Always use `codebase_search` FIRST to understand the codebase structure and find all related files before using other tools like `read_file`. + + + Critical: Understand Component Interactions + + Map the complete data flow from input to output + Identify ALL paired operations (import/export, save/load, encode/decode) + Find all consumers and dependencies of the affected code + Trace how data transformations occur throughout the system + Understand error propagation and handling patterns + + + + + Investigation Checklist for Bug Fixes + Search for the specific error message or broken functionality. + Find all relevant error handling and logging statements. + Locate related test files to understand expected behavior. + Identify all dependencies and import/export patterns for the affected code. + Find similar, working patterns in the codebase to use as a reference. + **CRITICAL**: For any operation being fixed, find and analyze its paired operations + Trace the complete data flow to understand all affected components + + + + Investigation Checklist for New Features + Search for any similar existing features to use as a blueprint. + Find potential integration points (e.g., API routes, UI component registries). + Locate relevant configuration files that may need to be updated. + Identify common patterns, components, and utilities that should be reused. + **CRITICAL**: Design paired operations together (e.g., both import AND export) + Map all data transformations and state changes + Identify all downstream consumers of the new functionality + + + + Always Implement Paired Operations Together + + When fixing export, ALWAYS check and update import + When modifying save, ALWAYS verify load handles the changes + When changing serialization, ALWAYS update deserialization + When updating create, consider read/update/delete operations + + + Paired operations must maintain consistency. Changes to one without the other leads to data corruption, import failures, or broken functionality. + + + + + Always read multiple related files together to understand the full context. Never assume a change is isolated - trace its impact through the entire system. + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/3_common_patterns.xml b/.roo/rules-issue-fixer-orchestrator/3_common_patterns.xml new file mode 100644 index 000000000000..97bba7a9e4e2 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/3_common_patterns.xml @@ -0,0 +1,38 @@ + + + 1. Reproduce the issue + 2. Identify root cause + 3. Implement minimal fix + 4. Add regression test + 5. Verify fix works + 6. Check for side effects + + + + 1. Understand all requirements + 2. Design the solution + 3. Implement incrementally + 4. Test each component + 5. Integrate components + 6. Verify acceptance criteria + 7. Add comprehensive tests + 8. Update documentation + + + + 1. Review git status to identify all changes + 2. Stage only files from modified_files.json + 3. Generate diff for review (full or summary based on size) + 4. Create PR message and save to temp directory + 5. Present changes to user for confirmation + 6. Handle user response: + - If approved: commit and proceed to PR options + - If issues found: return to implementation + - If manual testing needed: wait for user + 7. After commit, offer PR creation options: + - Create PR automatically + - Save PR message for manual creation + - Skip PR creation + 8. Optionally monitor PR and offer cleanup + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/4_github_cli_usage.xml b/.roo/rules-issue-fixer-orchestrator/4_github_cli_usage.xml new file mode 100644 index 000000000000..e12fb06a5b41 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/4_github_cli_usage.xml @@ -0,0 +1,221 @@ + + + This mode uses the GitHub CLI (gh) for all GitHub operations. + The mode assumes the user has gh installed and authenticated. If authentication errors occur, + the mode will prompt the user to authenticate. + + Users must provide full GitHub issue URLs (e.g., https://github.com/owner/repo/issues/123) + so the mode can extract the repository information dynamically. + + + + https://github.com/[owner]/[repo]/issues/[number] + + - Owner: The organization or username + - Repo: The repository name + - Number: The issue number + + + + + Assume authenticated, handle errors gracefully + Only check authentication if a gh command fails with auth error + + - "gh: Not authenticated" + - "HTTP 401" + - "HTTP 403: Resource not accessible" + + + + + + Retrieve the issue details at the start + Always use first to get the full issue content + gh issue view [issue-number] --repo [owner]/[repo] --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author + + + gh issue view 123 --repo octocat/hello-world --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author + + + + + + Get additional context and requirements from issue comments + Always use after viewing issue to see full discussion + gh issue view [issue-number] --repo [owner]/[repo] --comments + + + gh issue view 123 --repo octocat/hello-world --comments + + + + + + + Find recent changes to affected files + Use during codebase exploration + gh api repos/[owner]/[repo]/commits?path=[file-path]&per_page=10 + + + gh api repos/octocat/hello-world/commits?path=src/api/index.ts&per_page=10 --jq '.[].sha + " " + .[].commit.message' + + + + + + Search for code patterns on GitHub + Use to supplement local codebase_search + gh search code "[search-query]" --repo [owner]/[repo] + + + gh search code "function handleError" --repo octocat/hello-world --limit 10 + + + + + + + + Add progress updates or ask questions on issues + Use if clarification needed or to show progress + gh issue comment [issue-number] --repo [owner]/[repo] --body "[comment]" + + + gh issue comment 123 --repo octocat/hello-world --body "Working on this issue. Found the root cause in the theme detection logic." + + + + + + Find related or similar PRs + Use to understand similar changes + gh pr list --repo [owner]/[repo] --search "[search-terms]" + + + gh pr list --repo octocat/hello-world --search "dark theme" --limit 10 + + + + + + View the diff of a pull request + Use to understand changes in a PR + gh pr diff [pr-number] --repo [owner]/[repo] + + + gh pr diff 456 --repo octocat/hello-world + + + + + + + + Create a pull request + Use in step 11 after user approval + + - Target the repository from the provided URL + - Use "main" as the base branch unless specified otherwise + - Include issue number in PR title + - Use --maintainer-can-modify flag + + gh pr create --repo [owner]/[repo] --base main --title "[title]" --body "[body]" --maintainer-can-modify + + + gh pr create --repo octocat/hello-world --base main --title "fix: Resolve dark theme button visibility (#123)" --body "## Description + +Fixes #123 + +[Full PR description]" --maintainer-can-modify + + + + If working from a fork, ensure the fork is set as the remote and push the branch there first. + The gh CLI will automatically handle the fork workflow. + + + + + Fork the repository if user doesn't have push access + Use if user needs to work from a fork + gh repo fork [owner]/[repo] --clone + + + gh repo fork octocat/hello-world --clone + + + + + + Monitor CI/CD checks on a pull request + Use after creating PR to ensure checks pass + gh pr checks [pr-number] --repo [owner]/[repo] --watch + + + gh pr checks 789 --repo octocat/hello-world --watch + + + + + + + + Access GitHub API directly for advanced operations + Use when specific gh commands don't provide needed functionality + + + + gh api repos/[owner]/[repo] --jq '.default_branch' + + + + + gh api repos/[owner]/[repo]/contents/README.md --jq '.content' | base64 -d + + + + + gh api repos/[owner]/[repo]/actions/runs --jq '.workflow_runs[0:5] | .[] | .id, .status, .conclusion' + + + + + + Check GitHub Actions workflow status + Use to monitor CI/CD pipeline + gh run list --repo [owner]/[repo] --limit 5 + + + gh run list --repo octocat/hello-world --limit 5 + + + + + + + + gh: Not authenticated. Run 'gh auth login' to authenticate. + + Ask user to authenticate: + + GitHub CLI is not authenticated. Please run 'gh auth login' in your terminal to authenticate, then let me know when you're ready to continue. + + I've authenticated, please continue + I need help with authentication + Let's use a different approach + + + + + + + HTTP 403: Resource not accessible by integration + + Check if working from a fork is needed: + + gh repo fork [owner]/[repo] --clone + + + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/5_pull_request_workflow.xml b/.roo/rules-issue-fixer-orchestrator/5_pull_request_workflow.xml new file mode 100644 index 000000000000..041fa0347cb7 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/5_pull_request_workflow.xml @@ -0,0 +1,106 @@ + + + 1. Ensure all changes are committed with proper message format + 2. Push to appropriate branch (fork or direct) + 3. Prepare comprehensive PR description + 4. Get user approval before creating PR + 5. Extract owner and repo from the provided GitHub URL + + + + - Bug fixes: "fix: [description] (#[issue-number])" + - Features: "feat: [description] (#[issue-number])" + - Follow conventional commit format + + + A comprehensive PR description is critical. The subtask responsible for preparing the PR content should generate a body that includes the following markdown structure: + + ```markdown + ## Description + + Fixes #[issue number] + + [Detailed description of what was changed and why] + + ## Changes Made + + - [Specific change 1 with file references] + - [Specific change 2 with technical details] + - [Any refactoring or cleanup done] + + ## Testing + + - [x] All existing tests pass + - [x] Added tests for [specific functionality] + - [x] Manual testing completed: + - [Specific manual test 1] + - [Specific manual test 2] + + ## Translations + + [If translations were added/updated] + - [x] All user-facing strings have been translated + - [x] Updated language files: [list of languages] + - [x] Translations reviewed for consistency + + [If no translations needed] + - No user-facing string changes in this PR + + ## Verification of Acceptance Criteria + + [For each criterion from the issue, show it's met] + - [x] Criterion 1: [How it's verified] + - [x] Criterion 2: [How it's verified] + + ## Checklist + + - [x] Code follows project style guidelines + - [x] Self-review completed + - [x] Comments added for complex logic + - [x] Documentation updated (if needed) + - [x] No breaking changes (or documented if any) + - [x] Accessibility checked (for UI changes) + - [x] Translations added/updated (for UI changes) + + ## Screenshots/Demo (if applicable) + + [Add before/after screenshots for UI changes] + [Add terminal output for CLI changes] + ``` + + + + Use a consistent format for branch names. + + - Bug fixes: `fix/issue-[number]-[brief-description]` + - Features: `feat/issue-[number]-[brief-description]` + + + + + Use GitHub CLI to create the pull request: + + gh pr create --repo [owner]/[repo] --base main --title "[title]" --body "[description]" --maintainer-can-modify + + + If working from a fork, ensure you've forked first: + + gh repo fork [owner]/[repo] --clone + + + The gh CLI automatically handles fork workflows. + + + + 1. Comment on original issue with PR link: + + gh issue comment [issue-number] --repo [owner]/[repo] --body "PR #[pr-number] has been created to address this issue: [PR URL]" + + 2. Inform user of successful creation + 3. Provide next steps and tracking info + 4. Monitor PR checks: + + gh pr checks [pr-number] --repo [owner]/[repo] --watch + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/6_testing_guidelines.xml b/.roo/rules-issue-fixer-orchestrator/6_testing_guidelines.xml new file mode 100644 index 000000000000..721a89f2b94f --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/6_testing_guidelines.xml @@ -0,0 +1,10 @@ + + - Always run existing tests before making changes (baseline) + - Add tests for any new functionality + - Add regression tests for bug fixes + - Test edge cases and error conditions + - Run the full test suite before completing + - For UI changes, test in multiple themes + - Verify accessibility (keyboard navigation, screen readers) + - Test performance impact for large operations + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/7_communication_style.xml b/.roo/rules-issue-fixer-orchestrator/7_communication_style.xml new file mode 100644 index 000000000000..898269cc0c8b --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/7_communication_style.xml @@ -0,0 +1,27 @@ + + - Be clear about what you're doing at each step + - Explain technical decisions and trade-offs + - Ask for clarification if requirements are ambiguous + - Provide regular progress updates for complex issues + - Summarize changes clearly for non-technical stakeholders + - Use issue numbers and links for reference + - Inform the user when delegating to translate mode + - Include translation status in progress updates + - Mention in PR description if translations were added + + + - Clearly list all files that will be committed + - Explain when showing a summary vs full diff (>2000 lines) + - Provide file statistics for large diffs + - Mention that PR message has been saved to temp directory + - Offer clear options for user to proceed or report issues + + + + - Confirm successful commit with commit hash + - Explain PR creation options clearly + - Mention that PR message is saved and ready to use + - Provide path to PR message file for manual creation + - Offer cleanup option after PR is created + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/8_github_communication_guidelines.xml b/.roo/rules-issue-fixer-orchestrator/8_github_communication_guidelines.xml new file mode 100644 index 000000000000..627908f1f7cd --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/8_github_communication_guidelines.xml @@ -0,0 +1,16 @@ + + + - Provide brief status updates when working on complex issues + - Ask specific questions if requirements are unclear + - Share findings when investigation reveals important context + - Keep progress updates factual and concise + - Example: "Found the root cause in the theme detection logic. Working on a fix that preserves backward compatibility." + + + + - Follow conventional commit format: "type: description (#issue-number)" + - Keep first line under 72 characters + - Be specific about what changed + - Example: "fix: resolve button visibility in dark theme (#123)" + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer-orchestrator/9_translation_handling.xml b/.roo/rules-issue-fixer-orchestrator/9_translation_handling.xml new file mode 100644 index 000000000000..15d196263cf6 --- /dev/null +++ b/.roo/rules-issue-fixer-orchestrator/9_translation_handling.xml @@ -0,0 +1,125 @@ + + + The issue-fixer-orchestrator mode must ensure all user-facing content is properly translated before creating a pull request. This is achieved by delegating translation tasks to the specialized translate mode. + + + + + Any changes to React/Vue/Angular components + + - webview-ui/src/**/*.tsx + - webview-ui/src/**/*.jsx + - src/**/*.tsx (if contains UI elements) + + + - New text strings in JSX + - Updated button labels, tooltips, or placeholders + - Error messages displayed to users + - Any hardcoded strings that should use i18n + + + + + User-facing documentation changes + + - README.md + - docs/**/*.md + - webview-ui/src/components/chat/Announcement.tsx + - Any markdown files visible to end users + + + + + Direct changes to translation files + + - src/i18n/locales/**/*.json + - webview-ui/src/i18n/locales/**/*.json + + When English (en) locale is updated, all other locales must be synchronized + + + + New or modified error messages + + - API error responses + - Validation messages + - System notifications + - Status messages + + + + + + + Detect Translation Needs + + - Read the modified_files.json from the implementation step + - Check each file against the patterns above + - Determine if any user-facing content was changed + + + + + Prepare Translation Context + + - Gather all context files (issue details, implementation plan, modified files) + - Identify specific strings or content that need translation + - Note any special terminology or context from the issue + + + + + Delegate to Translate Mode + + - Use new_task to create a translation subtask + - Provide clear instructions about what needs translation + - Include paths to all context files + - Specify expected output (translation_summary.md) + + + + + Verify Translation Completion + + - Wait for the translate mode subtask to complete + - Read the translation_summary.md file + - Confirm all necessary translations were handled + - Only proceed to PR creation after confirmation + + + + + + Template for creating translation subtasks + + - Clear identification of the issue being fixed + - List of modified files requiring translation review + - Path to context files for understanding the changes + - Specific instructions for what to translate + - Expected output format and location + + + + + Always check for translations AFTER verification passes + Don't skip translation even for "minor" UI changes + Ensure the translate mode has access to full context + Wait for translation completion before creating PR + Include translation changes in the PR description + + + + + Assuming no translations needed without checking + Always analyze modified files for user-facing content + + + Proceeding to PR creation before translations complete + Wait for translation_summary.md confirmation + + + Not providing enough context to translate mode + Include issue details and implementation plan + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer/1_Workflow.xml b/.roo/rules-issue-fixer/1_Workflow.xml index 40971a106475..db1e968c6738 100644 --- a/.roo/rules-issue-fixer/1_Workflow.xml +++ b/.roo/rules-issue-fixer/1_Workflow.xml @@ -1,62 +1,41 @@ - Determine Workflow Type and Retrieve Context + Retrieve Issue Context - First, determine what type of work is needed. The user will provide either: - - An issue number/URL (e.g., "#123" or GitHub issue URL) - for new implementation - - A PR number/URL (e.g., "#456" or GitHub PR URL) - for addressing review feedback - - A description of changes needed for an existing PR - - For Issue-based workflow: - Extract the issue number and retrieve it: - - - github - get_issue - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "issue_number": [extracted number] - } - - - - For PR Review workflow: - Extract the PR number and retrieve it: - - - github - get_pull_request - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "pull_number": [extracted number] - } - - - - Then get PR review comments: - - - github - get_pull_request_reviews - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "pull_number": [extracted number] - } - - - - Analyze the context to determine: - 1. Type of work (new issue implementation vs PR feedback) - 2. All requirements and acceptance criteria - 3. Specific changes requested (for PR reviews) - 4. Technical details mentioned - 5. Any linked issues or discussions + The user should provide a full GitHub issue URL (e.g., "https://github.com/owner/repo/issues/123") for implementation. + + Parse the URL to extract: + - Owner (organization or username) + - Repository name + - Issue number + + For example, from https://github.com/RooCodeInc/Roo-Code/issues/123: + - Owner: RooCodeInc + - Repo: Roo-Code + - Issue: 123 + + Then retrieve the issue: + + + gh issue view [issue-number] --repo [owner]/[repo] --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author + + + If the command fails with an authentication error (e.g., "gh: Not authenticated" or "HTTP 401"), ask the user to authenticate: + + GitHub CLI is not authenticated. Please run 'gh auth login' in your terminal to authenticate, then let me know when you're ready to continue. + + I've authenticated, please continue + I need help with authentication + Let's use a different approach + + + + Analyze the issue to determine: + 1. All requirements and acceptance criteria + 2. Technical details mentioned + 3. Any linked issues or discussions + + Note: For PR review feedback, users should use the dedicated pr-fixer mode instead. @@ -69,23 +48,20 @@ - Community suggestions - Any decisions or changes to requirements - - github - get_issue_comments - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "issue_number": [issue number] - } - - + + gh issue view [issue number] --repo [owner]/[repo] --comments + Also check for: 1. Related issues mentioned in the body or comments 2. Linked pull requests 3. Referenced discussions + If related PRs are mentioned, view them: + + gh pr view [pr-number] --repo [owner]/[repo] + + Document all requirements and constraints found. @@ -109,16 +85,9 @@ - Identify patterns to follow - Find related components and utilities - For PR Reviews: - - Search for files mentioned in review comments - - Find related files that use similar patterns - - Locate test files for modified functionality - - Identify files that import/depend on changed code - Example searches based on issue type: - Bug: Search for error messages, function names, component names - Feature: Search for similar functionality, API endpoints, UI components - - PR Review: Search for patterns mentioned in feedback CRITICAL: Always read multiple related files together to understand: - Current code patterns and conventions @@ -133,10 +102,15 @@ - read_file to examine specific implementations (read multiple files at once) - search_files for specific patterns or error messages - Also use GitHub tools: - - list_commits to see recent changes to affected files - - get_commit to understand specific changes - - list_pull_requests to find related PRs + Also use GitHub CLI to check recent changes: + + gh api repos/[owner]/[repo]/commits?path=[file-path]&per_page=10 --jq '.[].sha + " " + .[].commit.message' + + + Search for related PRs: + + gh pr list --repo [owner]/[repo] --search "[relevant search terms]" --limit 10 + Document: - All files that need modification @@ -252,11 +226,63 @@ - [ ] No linting errors If any criteria fail, return to implementation step. - - - - - Run Tests and Checks + + + + + Check for Translation Requirements + + After implementing changes, analyze if any translations are required: + + Translation is needed if the implementation includes: + 1. New user-facing text strings in UI components + 2. New error messages or user notifications + 3. Updated documentation files that need localization + 4. New command descriptions or tooltips + 5. Changes to announcement files or release notes + 6. New configuration options with user-visible descriptions + + Check for these patterns: + - Hard-coded strings in React components (.tsx/.jsx files) + - New entries needed in i18n JSON files + - Updated markdown documentation files + - New VSCode command contributions + - Changes to user-facing configuration schemas + + If translations are required: + + + translate + Translation needed for issue #[issue-number] implementation. + + The following changes require translation into all supported languages: + + **Files with new/updated user-facing content:** + - [List specific files and what content needs translation] + - [Include context about where the strings appear] + - [Note any special formatting or constraints] + + **Translation scope:** + - [Specify if it's new strings, updated strings, or both] + - [List specific JSON keys that need attention] + - [Note any markdown files that need localization] + + **Context for translators:** + - [Explain the feature/fix being implemented] + - [Provide context about how the text is used] + - [Note any technical terms or constraints] + + Please ensure all translations maintain consistency with existing terminology and follow the project's localization guidelines. + + + Wait for the translation task to complete before proceeding to testing. + + If no translations are required, continue to the next step. + + + + + Run Tests and Checks Run comprehensive tests to ensure quality: @@ -289,7 +315,7 @@ - + Prepare Summary Create a comprehensive summary of the implementation: @@ -341,7 +367,7 @@ - + Prepare for Pull Request If user wants to create a pull request, prepare everything needed: @@ -412,13 +438,13 @@ **Branch:** [branch-name] **Title:** [PR title] - **Target:** RooCodeInc/Roo-Code (main branch) + **Target:** [owner]/[repo] (main branch) Here's the PR description: [Show prepared PR description] - Would you like me to create this pull request to RooCodeInc/Roo-Code? + Would you like me to create this pull request to [owner]/[repo]? Yes, create the pull request Let me review the PR description first @@ -429,49 +455,31 @@ - + Create Pull Request - Once user approves, create the pull request using GitHub MCP: - - - github - create_pull_request - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "title": "[Type]: [Brief description] (#[issue-number])", - "head": "[user-fork-owner]:[branch-name]", - "base": "main", - "body": "[Complete PR description from step 9]", - "draft": false, - "maintainer_can_modify": true - } - - - - Note: The "head" parameter format depends on where the branch exists: - - If user has push access: "branch-name" - - If working from a fork: "username:branch-name" + Once user approves, create the pull request using GitHub CLI: + + If the user doesn't have push access to [owner]/[repo], fork the repository: + + gh repo fork [owner]/[repo] --clone + + + Create the pull request: + + gh pr create --repo [owner]/[repo] --base main --title "[Type]: [Brief description] (#[issue-number])" --body "[Complete PR description from step 10]" --maintainer-can-modify + + + The gh CLI will automatically handle the fork workflow if needed. After PR creation: - 1. Capture the PR number and URL from the response + 1. Capture the PR number and URL from the command output 2. Link the PR to the issue by commenting on the issue 3. Inform the user of the successful creation - - github - add_issue_comment - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "issue_number": [original issue number], - "body": "PR #[new PR number] has been created to address this issue: [PR URL]" - } - - + + gh issue comment [original issue number] --repo [owner]/[repo] --body "PR #[new PR number] has been created to address this issue: [PR URL]" + Final message to user: ``` @@ -492,13 +500,13 @@ - + Monitor PR Checks After the PR is created, monitor the CI/CD checks to ensure they pass: - gh pr checks --watch + gh pr checks [PR number] --repo [owner]/[repo] --watch This command will: diff --git a/.roo/rules-issue-fixer/2_best_practices.xml b/.roo/rules-issue-fixer/2_best_practices.xml index 7d3a87aa9a21..dede40a92f54 100644 --- a/.roo/rules-issue-fixer/2_best_practices.xml +++ b/.roo/rules-issue-fixer/2_best_practices.xml @@ -12,4 +12,7 @@ - Update documentation when needed - Add tests for any new functionality - Check for accessibility issues (for UI changes) + - Delegate translation tasks to translate mode when implementing user-facing changes + - Always check for hard-coded strings and internationalization needs + - Wait for translation completion before proceeding to final testing \ No newline at end of file diff --git a/.roo/rules-issue-fixer/4_github_cli_usage.xml b/.roo/rules-issue-fixer/4_github_cli_usage.xml new file mode 100644 index 000000000000..e12fb06a5b41 --- /dev/null +++ b/.roo/rules-issue-fixer/4_github_cli_usage.xml @@ -0,0 +1,221 @@ + + + This mode uses the GitHub CLI (gh) for all GitHub operations. + The mode assumes the user has gh installed and authenticated. If authentication errors occur, + the mode will prompt the user to authenticate. + + Users must provide full GitHub issue URLs (e.g., https://github.com/owner/repo/issues/123) + so the mode can extract the repository information dynamically. + + + + https://github.com/[owner]/[repo]/issues/[number] + + - Owner: The organization or username + - Repo: The repository name + - Number: The issue number + + + + + Assume authenticated, handle errors gracefully + Only check authentication if a gh command fails with auth error + + - "gh: Not authenticated" + - "HTTP 401" + - "HTTP 403: Resource not accessible" + + + + + + Retrieve the issue details at the start + Always use first to get the full issue content + gh issue view [issue-number] --repo [owner]/[repo] --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author + + + gh issue view 123 --repo octocat/hello-world --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author + + + + + + Get additional context and requirements from issue comments + Always use after viewing issue to see full discussion + gh issue view [issue-number] --repo [owner]/[repo] --comments + + + gh issue view 123 --repo octocat/hello-world --comments + + + + + + + Find recent changes to affected files + Use during codebase exploration + gh api repos/[owner]/[repo]/commits?path=[file-path]&per_page=10 + + + gh api repos/octocat/hello-world/commits?path=src/api/index.ts&per_page=10 --jq '.[].sha + " " + .[].commit.message' + + + + + + Search for code patterns on GitHub + Use to supplement local codebase_search + gh search code "[search-query]" --repo [owner]/[repo] + + + gh search code "function handleError" --repo octocat/hello-world --limit 10 + + + + + + + + Add progress updates or ask questions on issues + Use if clarification needed or to show progress + gh issue comment [issue-number] --repo [owner]/[repo] --body "[comment]" + + + gh issue comment 123 --repo octocat/hello-world --body "Working on this issue. Found the root cause in the theme detection logic." + + + + + + Find related or similar PRs + Use to understand similar changes + gh pr list --repo [owner]/[repo] --search "[search-terms]" + + + gh pr list --repo octocat/hello-world --search "dark theme" --limit 10 + + + + + + View the diff of a pull request + Use to understand changes in a PR + gh pr diff [pr-number] --repo [owner]/[repo] + + + gh pr diff 456 --repo octocat/hello-world + + + + + + + + Create a pull request + Use in step 11 after user approval + + - Target the repository from the provided URL + - Use "main" as the base branch unless specified otherwise + - Include issue number in PR title + - Use --maintainer-can-modify flag + + gh pr create --repo [owner]/[repo] --base main --title "[title]" --body "[body]" --maintainer-can-modify + + + gh pr create --repo octocat/hello-world --base main --title "fix: Resolve dark theme button visibility (#123)" --body "## Description + +Fixes #123 + +[Full PR description]" --maintainer-can-modify + + + + If working from a fork, ensure the fork is set as the remote and push the branch there first. + The gh CLI will automatically handle the fork workflow. + + + + + Fork the repository if user doesn't have push access + Use if user needs to work from a fork + gh repo fork [owner]/[repo] --clone + + + gh repo fork octocat/hello-world --clone + + + + + + Monitor CI/CD checks on a pull request + Use after creating PR to ensure checks pass + gh pr checks [pr-number] --repo [owner]/[repo] --watch + + + gh pr checks 789 --repo octocat/hello-world --watch + + + + + + + + Access GitHub API directly for advanced operations + Use when specific gh commands don't provide needed functionality + + + + gh api repos/[owner]/[repo] --jq '.default_branch' + + + + + gh api repos/[owner]/[repo]/contents/README.md --jq '.content' | base64 -d + + + + + gh api repos/[owner]/[repo]/actions/runs --jq '.workflow_runs[0:5] | .[] | .id, .status, .conclusion' + + + + + + Check GitHub Actions workflow status + Use to monitor CI/CD pipeline + gh run list --repo [owner]/[repo] --limit 5 + + + gh run list --repo octocat/hello-world --limit 5 + + + + + + + + gh: Not authenticated. Run 'gh auth login' to authenticate. + + Ask user to authenticate: + + GitHub CLI is not authenticated. Please run 'gh auth login' in your terminal to authenticate, then let me know when you're ready to continue. + + I've authenticated, please continue + I need help with authentication + Let's use a different approach + + + + + + + HTTP 403: Resource not accessible by integration + + Check if working from a fork is needed: + + gh repo fork [owner]/[repo] --clone + + + + + \ No newline at end of file diff --git a/.roo/rules-issue-fixer/4_github_mcp_tool_usage.xml b/.roo/rules-issue-fixer/4_github_mcp_tool_usage.xml deleted file mode 100644 index 49b12f96cacb..000000000000 --- a/.roo/rules-issue-fixer/4_github_mcp_tool_usage.xml +++ /dev/null @@ -1,88 +0,0 @@ - - - - Retrieve the issue details at the start - Always use first to get the full issue content - - - - Get additional context and requirements - Always use after get_issue to see full discussion - - - - Find recent changes to affected files - Use during codebase exploration - - - - Find code patterns on GitHub - Use to supplement local codebase_search - - - - - - Add progress updates or ask questions - Use if clarification needed or to show progress - - - - Find related or similar PRs - Use to understand similar changes - - - - Get details of related PRs - Use when issue references specific PRs - - - - - - Create a pull request to RooCodeInc/Roo-Code - Use in step 10 after user approval - - - Always target RooCodeInc/Roo-Code repository - - Use "main" as the base branch unless specified otherwise - - Include issue number in PR title - - Set maintainer_can_modify to true - - - - github - create_pull_request - - { - "owner": "RooCodeInc", - "repo": "Roo-Code", - "title": "fix: Resolve dark theme button visibility (#123)", - "head": "username:fix/issue-123-dark-theme-button", - "base": "main", - "body": "## Description\n\nFixes #123\n\n[Full PR description]", - "draft": false, - "maintainer_can_modify": true - } - - - - - - - Fork the repository if user doesn't have push access - Use if user needs to work from a fork - - - github - fork_repository - - { - "owner": "RooCodeInc", - "repo": "Roo-Code" - } - - - - - - \ No newline at end of file diff --git a/.roo/rules-issue-fixer/5_pull_request_workflow.xml b/.roo/rules-issue-fixer/5_pull_request_workflow.xml index 79102fcf376a..60a8385e3e5f 100644 --- a/.roo/rules-issue-fixer/5_pull_request_workflow.xml +++ b/.roo/rules-issue-fixer/5_pull_request_workflow.xml @@ -4,6 +4,7 @@ 2. Push to appropriate branch (fork or direct) 3. Prepare comprehensive PR description 4. Get user approval before creating PR + 5. Extract owner and repo from the provided GitHub URL @@ -22,9 +23,30 @@ - Screenshots/demos if applicable + + Use GitHub CLI to create the pull request: + + gh pr create --repo [owner]/[repo] --base main --title "[title]" --body "[description]" --maintainer-can-modify + + + If working from a fork, ensure you've forked first: + + gh repo fork [owner]/[repo] --clone + + + The gh CLI automatically handles fork workflows. + + - 1. Comment on original issue with PR link + 1. Comment on original issue with PR link: + + gh issue comment [issue-number] --repo [owner]/[repo] --body "PR #[pr-number] has been created to address this issue: [PR URL]" + 2. Inform user of successful creation 3. Provide next steps and tracking info + 4. Monitor PR checks: + + gh pr checks [pr-number] --repo [owner]/[repo] --watch + \ No newline at end of file diff --git a/.roo/rules-issue-fixer/8_github_communication_guidelines.xml b/.roo/rules-issue-fixer/8_github_communication_guidelines.xml index 91ef0de89e34..627908f1f7cd 100644 --- a/.roo/rules-issue-fixer/8_github_communication_guidelines.xml +++ b/.roo/rules-issue-fixer/8_github_communication_guidelines.xml @@ -1,14 +1,4 @@ - - - Keep comments concise and focused on technical substance - - Avoid overly verbose explanations unless specifically requested - - Sound human and conversational, not robotic - - Address specific feedback points directly - - Use bullet points for multiple changes - - Reference line numbers or specific code when relevant - - Example: "Updated the error handling in `validateInput()` to catch edge cases as requested. Also added the missing null check on line 45." - - - Provide brief status updates when working on complex issues - Ask specific questions if requirements are unclear diff --git a/.roo/rules-issue-fixer/9_pr_review_workflow.xml b/.roo/rules-issue-fixer/9_pr_review_workflow.xml deleted file mode 100644 index 849ee7f1dfc1..000000000000 --- a/.roo/rules-issue-fixer/9_pr_review_workflow.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - When working on PR review feedback: - 1. Read all review comments carefully - 2. Identify specific changes requested - 3. Group related feedback into logical changes - 4. Address each point systematically - 5. Test changes thoroughly - 6. Respond to each review comment when pushing updates - 7. Use "Resolved" or brief explanations for each addressed point - - - - For partial workflows (user-requested changes to existing PRs): - 1. Focus only on the specific changes requested - 2. Don't refactor unrelated code unless explicitly asked - 3. Maintain consistency with existing PR approach - 4. Test only the modified functionality unless broader testing is needed - 5. Update PR description if significant changes are made - - - - When responding to review comments: - - "✅ Fixed - [brief description of change]" - - "✅ Added - [what was added]" - - "✅ Updated - [what was changed]" - - "❓ Question - [if clarification needed]" - - Keep responses short and action-oriented - - \ No newline at end of file diff --git a/.roo/rules-pr-fixer-orchestrator/1_Workflow.xml b/.roo/rules-pr-fixer-orchestrator/1_Workflow.xml new file mode 100644 index 000000000000..a376596c1df0 --- /dev/null +++ b/.roo/rules-pr-fixer-orchestrator/1_Workflow.xml @@ -0,0 +1,771 @@ + + + Initialize PR Context + + The user will provide a GitHub PR URL or number. + + 1. **Parse Input**: Extract the `owner`, `repo`, and `pr_number` from the URL or use provided number. + 2. **Create Task Directory**: Create a dedicated directory to store all context for this PR fix task. + + mkdir -p .roo/temp/pr-fixer-orchestrator/[TASK_ID] + + 3. **Retrieve PR Details**: Fetch the PR details, comments, and check status as a comprehensive JSON object. + + gh pr view [pr_number] --repo [owner]/[repo] --json number,title,body,state,labels,author,headRefName,baseRefName,mergeable,mergeStateStatus,isDraft,isCrossRepository,headRepositoryOwner,reviews,statusCheckRollup,comments > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_context.json + + 4. **Get Review Comments**: Fetch detailed review comments separately for better analysis. + + gh pr view [pr_number] --repo [owner]/[repo] --comments > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_comments.txt + + 5. **Check CI Status**: Get current check status and any failing workflows. + + gh pr checks [pr_number] --repo [owner]/[repo] --json name,state,conclusion,detailsUrl > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_checks.json + + 6. **Get Associated Issue**: Check if PR is linked to an issue and fetch issue details if available. + + gh pr view [pr_number] --repo [owner]/[repo] --json closingIssuesReferences > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/linked_issues.json + + If linked issues exist, fetch the first issue's details: + + gh issue view [issue_number] --repo [owner]/[repo] --json number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author,comments > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/issue_context.json + + 7. **Handle Auth Errors**: If any `gh` command fails with authentication error, prompt the user to log in. + 8. **Confirm Context**: Inform the user that the PR context has been gathered. + + + + + Checkout PR Branch and Initial Analysis + + Before delegating analysis, ensure the PR branch is checked out locally. + + 1. **Checkout PR Branch**: Use gh to checkout the PR branch locally. + + gh pr checkout [pr_number] --repo [owner]/[repo] --force + + + 2. **Determine Remote Type**: Check if this is a cross-repository PR (from a fork). + + gh pr view [pr_number] --repo [owner]/[repo] --json isCrossRepository,headRepositoryOwner,headRefName > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_remote_info.json + + + 3. **Setup Fork Remote if Needed**: If it's a cross-repository PR, ensure fork remote is configured. + Read the pr_remote_info.json file. If isCrossRepository is true: + + git remote add fork https://github.com/[headRepositoryOwner]/[repo].git || git remote set-url fork https://github.com/[headRepositoryOwner]/[repo].git + + + 4. **Fetch Latest Main**: Ensure we have the latest main branch for comparison. + + git fetch origin main + + + 5. **Check for Conflicts**: Determine if there are merge conflicts with main. + + git merge-tree $(git merge-base HEAD origin/main) HEAD origin/main > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/merge_conflicts.txt + + + 6. **Get PR Diff**: Fetch the files changed in this PR for context. + + gh pr diff [pr_number] --repo [owner]/[repo] --name-only > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_changed_files.txt + + + 7. **Check Merge Diff Size**: Get the full diff and check line count. + + git diff origin/main...HEAD > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/full_merge_diff.txt + + + wc -l .roo/temp/pr-fixer-orchestrator/[TASK_ID]/full_merge_diff.txt + + + If the diff has over 2000 lines, create a summary instead: + + git diff origin/main...HEAD --stat > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/merge_diff_summary.txt + + + rm .roo/temp/pr-fixer-orchestrator/[TASK_ID]/full_merge_diff.txt + + + + + + Delegate: Comprehensive Requirements and PR Analysis + + Launch a subtask in `architect` mode to perform a detailed analysis of the PR, its underlying requirements, and all issues that need to be addressed. + + The context files in `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/` will be the input for this subtask. + The subtask should write its findings to: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md`. + + + architect + + **Task: Analyze Pull Request Requirements and Create Comprehensive Fix Plan** + + You are an expert software architect. Your task is to analyze a pull request, understand its underlying requirements, and create a comprehensive plan to address all issues. + + 1. **Read PR Context**: The PR details are in: + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_context.json` - Full PR metadata + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_comments.txt` - Review comments + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_checks.json` - CI/CD check status + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/merge_conflicts.txt` - Conflict analysis + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_changed_files.txt` - Files changed in PR + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/linked_issues.json` - Associated issues (if any) + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/issue_context.json` - Issue details (if linked) + - `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/full_merge_diff.txt` OR `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/merge_diff_summary.txt` - Diff information + + 2. **Understand the PR's Purpose**: + - Extract the feature or bug being addressed from PR title, body, and linked issues + - Identify the acceptance criteria (from PR description or linked issue) + - Understand the intended functionality and expected behavior + - Note any design decisions or architectural choices made + + 3. **Perform Architectural Analysis**: + - **Map Component Interactions**: Trace the complete data flow for the PR's changes + - **Identify Paired Operations**: For any operation (e.g., export), find its counterpart (e.g., import) + - **Find Similar Patterns**: Search for existing implementations of similar features + - **Analyze Dependencies**: Identify all consumers of the functionality being modified + - **Assess Impact**: Determine how changes affect other parts of the system + + 4. **Explore Codebase Systematically**: + - Use `codebase_search` FIRST to understand the feature area + - Search for related functionality that might be affected + - Find all files that consume or depend on the changed functionality + - Identify configuration files, tests, and documentation that need updates + - Study similar features to understand established patterns + + 5. **Analyze Review Feedback**: + - Categorize review comments by type (bug, enhancement, style, etc.) + - Identify which comments are actionable vs informational + - Prioritize changes based on reviewer authority and importance + - Note any conflicting feedback that needs clarification + + 6. **Investigate Failing Tests**: + - For each failing check, determine the root cause + - Use `gh run view --log-failed` to get detailed error logs + - Identify if failures are due to code issues, flaky tests, or environment problems + - Determine which files need modification to fix test failures + + 7. **Assess Merge Conflicts**: + - Analyze the merge_conflicts.txt file + - Identify which files have conflicts + - Determine the complexity of conflict resolution + - Plan the rebase/merge strategy + + 8. **Create Comprehensive Fix Plan**: The plan must include: + - **PR Purpose Summary**: Clear description of what the PR is trying to achieve + - **Requirements Analysis**: + - Original requirements from issue or PR description + - Acceptance criteria that must be met + - Any missing functionality that needs to be added + - **Architectural Context**: + - Data flow diagram showing component interactions + - List of paired operations that must be updated together + - Dependencies and consumers of the affected functionality + - **Issue Summary**: Clear categorization of all issues found + - **Priority Order**: Which issues to tackle first and why + - **Review Feedback Analysis**: + - List of all actionable review comments + - Specific code changes required for each + - Any clarifications needed from reviewers + - **Test Failure Resolution**: + - Root cause of each failing test + - Files and changes needed to fix + - Any test updates required + - **Conflict Resolution Strategy**: + - Whether to rebase or merge + - Order of operations for conflict resolution + - Risk assessment of conflicts + - **Implementation Steps**: + - Detailed, ordered steps for fixing all issues + - Specific commands and file modifications + - Validation steps after each change + - **Risk Assessment**: + - Potential side effects of changes + - Areas requiring extra testing + - Backward compatibility concerns + + 9. **Save the Analysis**: Write the complete analysis to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md`. + + **Critical Requirements:** + - Always understand the PR's underlying purpose before analyzing issues + - Be thorough in analyzing all aspects of the PR + - Consider the interaction between different fixes + - Provide specific, actionable steps + - Include exact commands where applicable + - **IMPORTANT**: Save your analysis to the specified file in .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + **Completion Protocol:** + - This is your only task. Do not deviate from these instructions. + - Once you have successfully written the analysis report, you MUST signal completion by using the `attempt_completion` tool. + - The `result` parameter MUST be: "PR analysis complete and saved to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md" + + + + After launching the subtask, wait for it to complete. The orchestrator will then read the analysis report to proceed. + + + + + Review Analysis and Get User Approval + + After the analysis subtask completes, present the findings to the user for approval. + + 1. **Read the Analysis Report**: + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md + + + + + 2. **Present for Approval**: Show the analysis to the user and ask how to proceed. + + + I've completed the analysis of PR #[pr_number]. Here's what I found: + + --- + [Insert content of pr_analysis_report.md here] + --- + + How would you like to proceed with fixing these issues? + + + Fix all issues in the recommended priority order + Only fix the review comments, skip failing tests for now + Only fix failing tests and conflicts, skip review comments + Let me choose specific issues to fix + + + + 3. **Handle User Choice**: Based on the user's selection, prepare the implementation scope. + Save the user's choice to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/implementation_scope.txt` + + + + + Fetch Latest from Main and Check Differences + + Before implementing fixes, ensure we're working with the latest code and understand what has changed. + + 1. **Fetch Latest Changes**: + + git fetch origin main + + + 2. **Analyze Differences**: Create a detailed diff report. + + git diff origin/main...HEAD --name-status > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_file_changes.txt + + + 3. **Check Commit History**: Understand what commits are in this PR. + + git log origin/main..HEAD --oneline > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_commits.txt + + + 4. **Identify New Commits on Main**: See what has been merged to main since the PR was created. + + git log HEAD..origin/main --oneline > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/new_main_commits.txt + + + 5. **Save Merge Strategy**: Based on the analysis, determine if we should rebase or merge. + Create `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/merge_strategy.txt` with either "rebase" or "merge" + + + + + Delegate: Implement Fixes + + Launch a subtask in `code` mode to implement all the fixes based on the analysis and user's choices. + + + code + + **Task: Implement PR Fixes Based on Analysis** + + You are an expert software developer. Your task is to implement fixes for a pull request based on the analysis and plan. + + 1. **Read Context Files**: + - Analysis Report: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md` + - Implementation Scope: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/implementation_scope.txt` + - File Changes: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_file_changes.txt` + - Merge Strategy: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/merge_strategy.txt` + + 2. **Handle Merge/Rebase First** (if conflicts exist): + - If merge_strategy.txt says "rebase": + + GIT_EDITOR=true git rebase origin/main + + - If conflicts occur, resolve them by editing the conflicted files + - Remember to escape conflict markers when using apply_diff + - After resolving each file: `git add [file]` + - Continue rebase: `git rebase --continue` + + 3. **Implement Missing Functionality** (if identified in analysis): + - Add any missing features or functionality noted in the requirements analysis + - Follow the architectural patterns identified in the analysis + - Ensure all acceptance criteria are met + - Update related operations to maintain consistency + + 4. **Implement Review Feedback**: + - Address each actionable review comment from the analysis + - Make code changes using appropriate file editing tools + - Ensure changes follow project coding standards + - Add comments where complex logic is introduced + + 5. **Fix Failing Tests**: + - Based on the root cause analysis, fix test failures + - This may involve fixing source code or updating tests + - Run tests locally if possible to verify fixes + - Document any test changes made + + 6. **Track All Changes**: As you make changes, maintain a list of: + - Files modified with brief description of changes + - Review comments addressed + - Tests fixed + - Missing functionality added + - Any additional improvements made + + 7. **Create Change Summary**: Write a comprehensive summary to: + `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/changes_implemented.md` + Include: + - List of all files modified + - Review comments addressed (with file:line references) + - Test fixes applied + - Conflict resolutions performed + - Missing functionality implemented + - Any additional improvements + + **Important Reminders:** + - Follow the implementation plan from the analysis + - Respect the user's chosen scope + - Make minimal, targeted changes + - Preserve existing functionality + - When resolving conflicts, understand both sides before choosing + - Ensure all original PR requirements are met + - **IMPORTANT**: Save all output files to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + **Completion Protocol:** + - Once all fixes are implemented and the summary is saved, use `attempt_completion`. + - Result: "PR fixes implemented and summary saved to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/changes_implemented.md" + + + + Wait for the implementation subtask to complete before proceeding. + + + + + Delegate: Test and Validate Changes + + After implementation, delegate testing and validation to ensure all fixes work correctly. + + + test + + **Task: Validate PR Fixes and Run Tests** + + You are a meticulous QA engineer. Your task is to validate that all PR fixes have been properly implemented. + + **Context Files:** + - Original Analysis: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md` + - Changes Made: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/changes_implemented.md` + - Original PR Checks: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_checks.json` + + **Your Steps:** + 1. **Verify Requirements**: Check that all original PR requirements and acceptance criteria are met. + + 2. **Verify Review Comments**: Check that each review comment marked as addressed in changes_implemented.md has been properly fixed. + + 3. **Run Local Tests**: Execute relevant test suites. + - Identify test files related to changed code + - Run unit tests for modified components + - Run integration tests if applicable + - Document all test results + + 4. **Validate Code Quality**: + - Run linters on changed files + - Check for type errors (if TypeScript) + - Verify no console.logs or debug code remains + - Ensure proper error handling + + 5. **Check for Regressions**: + - Verify existing functionality still works + - Look for potential side effects of changes + - Test edge cases around modified code + + 6. **Create Validation Report**: Write findings to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/validation_report.md` + Include: + - Test results summary (pass/fail counts) + - Requirements verification checklist + - Review comment verification checklist + - Any issues or concerns found + - Recommendations for additional testing + - Overall assessment: READY or NEEDS_WORK + + **Critical Focus Areas:** + - Ensure all originally failing tests now pass + - Verify no new test failures introduced + - Confirm review feedback properly addressed + - Check that all PR requirements are fulfilled + - Check for unintended consequences + - **IMPORTANT**: Save your report to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + **Completion Protocol:** + - Save validation report and use `attempt_completion` + - Result: "Validation complete. Report saved to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/validation_report.md" + + + + Wait for validation to complete before proceeding. + + + + + Handle Validation Results and Translation Needs + + Review validation results and check if translation updates are needed. + + 1. **Read Validation Report**: + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/validation_report.md + + + + + 2. **If Validation Failed**: Present issues to user and ask how to proceed. + If the report indicates NEEDS_WORK, use ask_followup_question to get direction. + + 3. **Check for Translation Requirements**: + Read the changes_implemented.md file and check for: + - Changes to i18n JSON files + - Modifications to UI components with user-facing text + - Updates to announcement files or documentation + - New error messages or notifications + + 4. **Delegate Translation if Needed**: + If translation is required: + + translate + + **Task: Update Translations for PR Fixes** + + PR #[pr_number] fixes have been implemented. Please handle translation updates for the following changes: + + **Changed Files:** + [List specific files from changes_implemented.md that need translation] + + **Specific Changes:** + [Detail what text was added/modified] + + Please ensure all supported languages are updated with appropriate translations. + Save a summary of translation changes to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/translation_summary.md` + + **IMPORTANT**: Save your summary to the specified file in .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + + + 5. **Proceed When Ready**: Only continue after validation passes and translations complete (if needed). + + + + + Prepare PR Message and Get User Approval + + Before committing changes, prepare the PR update message and get user approval. + + 1. **Check Files to be Committed**: List all modified files. + + git status --porcelain > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/files_to_commit.txt + + + 2. **Read Implementation Summary**: + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/changes_implemented.md + + + + + 3. **Create PR Update Message**: Based on the changes, create a comprehensive PR update message following the template. + Save to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md`: + + ```markdown + ## PR Update Summary + + This update addresses the review feedback and fixes identified issues. + + ## Changes Made + + [List from changes_implemented.md] + - Fixed [specific issue] in [file] + - Addressed review comment about [topic] + - Updated tests for [functionality] + - Resolved merge conflicts in [files] + + ## Review Comments Addressed + + [For each review comment addressed] + - ✅ Comment: "[reviewer comment]" + - Fix: [what was done] + - Files: [files modified] + + ## Test Fixes + + [If tests were fixed] + - Fixed failing test: [test name] + - Issue: [root cause] + - Solution: [fix applied] + + ## Translations Updated + + [If translations were updated] + - Updated [X] language files for [changes] + - All user-facing strings properly translated + + ## Verification + + - [x] All review comments addressed + - [x] All tests passing locally + - [x] No regressions introduced + - [x] Code follows project standards + - [x] Translations updated (if applicable) + + ## Files Modified + + [List all files that will be committed] + ``` + + 4. **Get User Approval**: Present the changes and ask for confirmation. + + + I've completed all the fixes for PR #[pr_number]. Here's a summary of what will be committed: + + **Files to be committed:** + [Content of files_to_commit.txt] + + **PR Update Message:** + [Content of pr_update_message.md] + + Would you like me to proceed with committing these changes? + + + Looks good, go ahead and commit the changes + I tested the changes and something is wrong - let me describe the issue + I still need to test the changes manually before committing + Let me review specific files before committing + + + + 5. **Handle User Response**: + - If approved: Continue to commit + - If issues found: Document the issue and determine next steps + - If manual testing needed: Wait for user to complete testing + - If review requested: Show requested files and wait for approval + + + + + Commit Changes and Prepare for Push + + Once user approves, commit the changes with appropriate message. + + 1. **Stage Only Necessary Files**: Review files and stage appropriately. + Read files_to_commit.txt and ensure only relevant files are staged. + + git add [specific files from the implementation] + + + Note: Do NOT use `git add -A` to avoid adding unintended files. + + 2. **Create Commit Message**: Based on the changes made, create an appropriate commit message. + + git commit -m "fix: address PR feedback and fix failing tests + +- addressed review comments +- fixed failing tests +- resolved conflicts (if applicable) +- updated translations (if needed) + +See PR for detailed changes" + + + 3. **Verify Remote Configuration**: Check which remote to push to. + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_remote_info.json + + + + + 4. **Determine Push Target**: + - If isCrossRepository is false: push to origin + - If isCrossRepository is true: push to fork + + + + + Delegate: Final PR Review + + Before pushing changes, have the PR reviewer mode review all changes to ensure quality. + + + pr-reviewer + + **Task: Review PR Fix Implementation** + + You are reviewing the fixes applied to PR #[pr_number]. Your task is to ensure all changes are high quality and properly address the original feedback. + + **Context Files:** + - Original PR Analysis: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_analysis_report.md` + - Changes Implemented: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/changes_implemented.md` + - Validation Report: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/validation_report.md` + - Translation Summary (if exists): `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/translation_summary.md` + - PR Update Message: `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md` + + **Review Focus:** + 1. **Completeness**: Verify all identified issues have been addressed + 2. **Requirements**: Confirm all original PR requirements are met + 3. **Code Quality**: Check that fixes follow best practices + 4. **No Regressions**: Ensure no new issues introduced + 5. **Review Feedback**: Confirm all reviewer comments properly addressed + 6. **Test Coverage**: Verify tests cover the changes + 7. **Documentation**: Check if docs/comments are adequate + + **Your Task:** + 1. Review the actual code changes using git diff + 2. Cross-reference with the original review feedback + 3. Verify all PR requirements are fulfilled + 4. Assess the quality of the implementation + 5. Check for any missed requirements + 6. Create a final review report + + Save your review to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/final_review.md` with: + - Overall Assessment: APPROVED or NEEDS_REVISION + - Quality Score: 1-10 + - Detailed feedback on the implementation + - Any remaining concerns + - Recommendations for improvement + + **IMPORTANT**: Save your review to the specified file in .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + **Completion Protocol:** + - Save review and use `attempt_completion` + - Result: "Final review complete and saved to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/final_review.md" + + + + Wait for the review to complete. + + + + + Process Final Review and Push Changes + + Based on the final review, either push changes or address remaining issues. + + 1. **Read Final Review**: + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/final_review.md + + + + + 2. **If Review Requests Revisions**: + Present the feedback to the user and ask if they want to address the issues now or push as-is. + + 3. **Push Changes**: If approved or user chooses to push: + Based on pr_remote_info.json, push to the correct remote: + + For same-repository PRs: + + git push --force-with-lease origin [branch_name] + + + For cross-repository PRs: + + git push --force-with-lease fork [branch_name] + + + 4. **Monitor Push Result**: Ensure the push succeeds. + If --force-with-lease fails, fetch and retry with --force. + + + + + Verify PR Status and Monitor Checks + + After pushing, verify the PR is in good state and monitor CI/CD checks. + + 1. **Verify PR is Up to Date**: + + gh pr view [pr_number] --repo [owner]/[repo] --json mergeable,mergeStateStatus + + + 2. **Monitor CI/CD Checks in Real-Time**: + + gh pr checks [pr_number] --repo [owner]/[repo] --watch + + This will continuously monitor until all checks complete. + + 3. **Get Final Status**: Once monitoring completes, get the final state. + + gh pr checks [pr_number] --repo [owner]/[repo] --json name,state,conclusion > .roo/temp/pr-fixer-orchestrator/[TASK_ID]/final_checks.json + + + 4. **Comment on PR**: Add a summary comment about the fixes applied. + Read the PR update message we prepared: + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md + + + + + Then post it as a comment: + + gh pr comment [pr_number] --repo [owner]/[repo] --body-file .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md + + + 5. **Save PR Message**: Keep the PR message for reference. + The PR update message has already been saved to `.roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md` + + 6. **Final Summary**: Present the final status to the user, confirming: + - All requested changes have been implemented + - The branch is synced with main + - CI/CD checks status + - The PR is ready for maintainer review and merge + - PR update message has been posted and saved + + 7. **Optional Cleanup**: Ask user if they want to clean up temporary files. + + + PR #[pr_number] has been successfully updated! + + - All changes committed and pushed + - CI/CD checks are [status] + - PR comment posted with update summary + - PR message saved to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md + + Would you like me to clean up the temporary files? + + + Yes, clean up temporary files + No, keep the files for reference + + + + If user chooses cleanup: + + rm -rf .roo/temp/pr-fixer-orchestrator/[TASK_ID] + + + + \ No newline at end of file diff --git a/.roo/rules-pr-fixer-orchestrator/2_best_practices.xml b/.roo/rules-pr-fixer-orchestrator/2_best_practices.xml new file mode 100644 index 000000000000..82f1ba66aa75 --- /dev/null +++ b/.roo/rules-pr-fixer-orchestrator/2_best_practices.xml @@ -0,0 +1,186 @@ + + + + Always Delegate Specialized Work + The orchestrator coordinates but doesn't implement. Use specialized modes for analysis, coding, testing, and review. + Each mode has specific expertise and permissions optimized for their tasks. + + + + Maintain Context Between Steps + Use temporary files in .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ to pass context between subtasks. ALL delegated tasks must save outputs to this directory. + Subtasks run in isolation and need explicit context sharing. Files saved elsewhere will be inaccessible to subsequent steps. + + + + Get User Approval Before Committing + ALWAYS present changes and get explicit user approval before committing. Show modified files, summarize changes, and ask for confirmation. + Users must maintain control over what gets committed to their PR. Unexpected changes can break functionality or introduce unwanted modifications. + + + + Understand Requirements First + Always analyze the PR's underlying purpose and requirements before fixing issues. + Fixing review comments without understanding the feature can lead to incomplete or incorrect solutions. + + + + Handle Large Diffs Gracefully + Check diff size before processing. If over 2000 lines, create a summary instead of including the full diff. + Large diffs can overwhelm context windows and make analysis difficult. Summaries maintain clarity. + + + + + - Always understand the PR's purpose and requirements first + - Analyze before implementing - understand all issues comprehensively + - Address review feedback with the same priority as the reviewer's authority + - Fix root causes of test failures, not just symptoms + - Ensure all original PR requirements are met, not just review comments + - Resolve conflicts carefully, understanding both sides of changes + - Validate all changes before committing to avoid breaking the PR further + - NEVER use `git add -A` - always stage specific files intentionally + - Get user approval before committing any changes + - Keep commits focused and well-described + - Always check if PR is from a fork to push to correct remote + - Monitor CI/CD checks in real-time after pushing + - Consider translation needs for any user-facing changes + - Document what was changed and why in the PR update message + - Use the EXACT PR template format specified in 6_pr_template_format.xml + + + + + Non-Interactive Rebasing + Always use GIT_EDITOR=true for automated rebase operations + GIT_EDITOR=true git rebase origin/main + + + + Fork-Aware Pushing + Always check isCrossRepository before pushing + + - Check if PR is from fork using gh pr view --json isCrossRepository + - Add fork remote if needed + - Push to correct remote (origin vs fork) + + + + + Force with Lease + Use --force-with-lease for safer force pushing + If it fails, fetch and use --force + + + + Selective File Staging + Always stage files individually, never use git add -A + + - Review all modified files with git status + - Stage only files that were intentionally modified + - Use git add [specific-file] for each file + - Double-check staged files with git diff --cached + + Prevents accidentally committing temporary files, debug logs, or unintended changes + + + + Large Diff Handling + Check diff size before including in context files + + - Save diff to file and check line count with wc -l + - If over 2000 lines, create a summary instead + - Include file counts, insertion/deletion stats + - List most significantly changed files + + + + + + + architect + Comprehensive analysis and planning + Detailed reports and implementation plans + MUST save all outputs to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + + + code + Executing code changes and fixes + Implemented solutions and change summaries + MUST save changes_implemented.md to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + + + test + Testing and validating changes + Test results and validation reports + MUST save validation_report.md to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + + + pr-reviewer + Final quality review before submission + Quality assessment and recommendations + MUST save final_review.md to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + + + translate + Updating translations for UI changes + Synchronized translations across languages + MUST save translation_summary.md to .roo/temp/pr-fixer-orchestrator/[TASK_ID]/ + + + + + + GitHub CLI authentication error + Prompt user to run 'gh auth login' + + + + No linked issue found + Extract requirements from PR description and comments + + + + Force-with-lease push fails + Fetch latest and retry with --force + + + + Diff exceeds 2000 lines + Create summary with stats instead of full diff + + + + Expected context files not found in temp directory + Check if delegated task saved to correct location, re-run if needed + + + + + + Pre-Commit Approval + Always get explicit user approval before committing changes + + - Show list of modified files + - Summarize key changes made + - Present clear approval options + - Wait for user confirmation + + + + + Clear Communication + Present information clearly and concisely + + - Use bullet points for lists + - Highlight important warnings + - Provide actionable suggestions + - Avoid technical jargon when possible + + + + \ No newline at end of file diff --git a/.roo/rules-pr-fixer-orchestrator/3_github_cli_usage.xml b/.roo/rules-pr-fixer-orchestrator/3_github_cli_usage.xml new file mode 100644 index 000000000000..94c63b6f3aef --- /dev/null +++ b/.roo/rules-pr-fixer-orchestrator/3_github_cli_usage.xml @@ -0,0 +1,68 @@ + + + This mode uses the GitHub CLI (gh) for all GitHub operations. + The mode assumes the user has gh installed and authenticated. + It can work with PRs from both the main repository and forks. + + + + + Get comprehensive PR details + gh pr view [pr-number] --repo [owner]/[repo] --json [fields] + number,title,body,state,labels,author,headRefName,baseRefName,mergeable,mergeStateStatus,isDraft,isCrossRepository,headRepositoryOwner,reviews,statusCheckRollup,comments + + + + Checkout PR branch locally + gh pr checkout [pr-number] --repo [owner]/[repo] --force + Automatically handles fork setup + + + + Monitor CI/CD status + gh pr checks [pr-number] --repo [owner]/[repo] --watch + Use --json for programmatic access + + + + Get PR changes + gh pr diff [pr-number] --repo [owner]/[repo] --name-only + Use without --name-only for full diff + + + + Add comment to PR + gh pr comment [pr-number] --repo [owner]/[repo] --body "[message]" + + + + + + Get issues linked to PR + gh pr view [pr-number] --repo [owner]/[repo] --json closingIssuesReferences + Returns array of linked issues + + + + Get issue details if linked + gh issue view [issue-number] --repo [owner]/[repo] --json [fields] + number,title,body,state,labels,assignees,milestone,createdAt,updatedAt,closedAt,author,comments + + + + + + Get detailed CI logs + gh run view [run-id] --repo [owner]/[repo] --log-failed + Use to debug failing tests + + + + Direct API access for advanced operations + + - Get PR reviews: gh api repos/[owner]/[repo]/pulls/[pr-number]/reviews + - Get review comments: gh api repos/[owner]/[repo]/pulls/[pr-number]/comments + + + + \ No newline at end of file diff --git a/.roo/rules-pr-fixer-orchestrator/4_requirements_analysis.xml b/.roo/rules-pr-fixer-orchestrator/4_requirements_analysis.xml new file mode 100644 index 000000000000..5d5d5dbeb63c --- /dev/null +++ b/.roo/rules-pr-fixer-orchestrator/4_requirements_analysis.xml @@ -0,0 +1,120 @@ + + + The PR Fixer Orchestrator must understand the underlying requirements + of a PR before fixing issues. This ensures fixes align with the + original intent and all acceptance criteria are met. + + + + + Linked GitHub Issues + Primary source of requirements and acceptance criteria + + - Issue title and body + - Acceptance criteria sections + - Technical specifications + - User stories or use cases + + + + + PR Description + Often contains implementation notes and context + + - Feature description + - Implementation approach + - Testing notes + - Breaking changes + + + + + PR Comments + May contain clarifications and additional requirements + + - Author clarifications + - Reviewer questions and answers + - Scope changes or additions + + + + + Code Analysis + Infer requirements from the implementation + + - API contracts + - Data flow patterns + - Test cases (reveal expected behavior) + - Documentation comments + + + + + + + Extract Explicit Requirements + + - Parse linked issues for acceptance criteria + - Extract requirements from PR description + - Identify success metrics + + + + + Understand Implementation Intent + + - Analyze the code changes to understand approach + - Identify design decisions made + - Note any architectural patterns used + + + + + Map Requirements to Implementation + + - Verify each requirement has corresponding code + - Identify any missing functionality + - Note any extra functionality added + + + + + Identify Gaps + + - List unimplemented requirements + - Note incomplete features + - Identify missing tests + + + + + + + + - Clear description of the bug + - Steps to reproduce + - Expected vs actual behavior + - Affected versions/environments + + + + + + - Feature description + - User stories or use cases + - API design (if applicable) + - UI/UX specifications + - Performance requirements + + + + + + - Motivation for refactoring + - Backward compatibility needs + - Performance improvements expected + - Migration path (if breaking) + + + + \ No newline at end of file diff --git a/.roo/rules-pr-fixer-orchestrator/5_self_contained_workflow.xml b/.roo/rules-pr-fixer-orchestrator/5_self_contained_workflow.xml new file mode 100644 index 000000000000..62be497ea47a --- /dev/null +++ b/.roo/rules-pr-fixer-orchestrator/5_self_contained_workflow.xml @@ -0,0 +1,99 @@ + + + The PR Fixer Orchestrator must be completely self-contained and able + to work on any PR without requiring pre-existing context files from + other workflows like the Issue Fixer. + + + + + No External Dependencies + Never assume files from other workflows exist + + - Create own temp directory structure + - Gather all needed context independently + - Generate own analysis and plans + + + + + Complete Context Gathering + Collect all information needed for the task + + - Fetch PR details and metadata + - Get linked issues if they exist + - Analyze codebase independently + - Understand requirements from available sources + + + + + Flexible Requirements Analysis + Work with whatever information is available + + - Use linked issues when available + - Fall back to PR description + - Infer from code changes if needed + - Ask user for clarification when necessary + + + + + + Create dedicated task directory + Fetch all PR-related information + Check for linked issues and fetch if present + Analyze PR changes to understand scope + Build complete context from available sources + + + + + PR that references a GitHub issue + + - Fetch issue details for requirements + - Use issue acceptance criteria + - Cross-reference PR implementation with issue requirements + + + + + PR without linked issue + + - Extract requirements from PR description + - Analyze code to understand intent + - Use PR comments for additional context + - Infer acceptance criteria from tests + + + + + PR from a forked repository + + - Handle remote configuration properly + - Ensure push targets correct repository + - Manage permissions appropriately + + + + + + + No clear requirements found + + - Analyze code changes to infer purpose + - Look at test changes for expected behavior + - Ask user for clarification if needed + + + + + PR scope is ambiguous + + - Present findings to user + - Ask for specific guidance on what to fix + - Proceed with user-defined scope + + + + \ No newline at end of file diff --git a/.roo/rules-pr-fixer-orchestrator/6_pr_template_format.xml b/.roo/rules-pr-fixer-orchestrator/6_pr_template_format.xml new file mode 100644 index 000000000000..26129553c327 --- /dev/null +++ b/.roo/rules-pr-fixer-orchestrator/6_pr_template_format.xml @@ -0,0 +1,361 @@ + + + This file defines the EXACT PR message template that must be used when updating + pull requests. The format is specific to the Roo Code project and must be followed + precisely. + + + + + + + The GitHub issue number this PR closes + From linked_issues.json or pr_context.json + + + + Optional Roo Code task links if used + _No Roo Code task context for this PR._ + + + + Summary of changes and implementation details + + This PR addresses the review feedback and fixes identified issues for #[PR_NUMBER]. + + **Key Changes:** + - [List major changes from changes_implemented.md] + - [Implementation details and design choices] + - [Trade-offs or decisions made] + + **Review Comments Addressed:** + [Summary of addressed review comments] + + **Test Failures Fixed:** + [Summary of test fixes if applicable] + + **Conflicts Resolved:** + [Summary of conflict resolutions if applicable] + + + + + How the changes were tested + + **Testing performed:** + 1. Ran all unit tests locally: `[test command used]` + 2. Ran integration tests: `[test command used]` + 3. Manual testing steps: + - [Step 1] + - [Step 2] + - [Step 3] + + **To verify these changes:** + 1. Check out this branch + 2. Run `[specific test commands]` + 3. [Additional verification steps] + + **Test Environment:** + - Node.js version: [version] + - OS: [operating system] + - [Other relevant environment details] + + + + + Visual evidence of UI changes + _No UI changes in this PR._ + + + + Documentation impact assessment + + + + + + + + Any additional context for reviewers + + [Any special considerations, known issues, or questions for reviewers] + + **Files Modified:** + ``` + [List of modified files from changes_implemented.md] + ``` + + + + + Contact information + Discord: @[username] + + + + + + The template MUST be followed exactly - do not modify the structure or remove any sections + + + All placeholders must be replaced with actual content - no brackets should remain + + + The Pre-Submission Checklist items should all be marked as checked [x] since we're fixing an existing PR + + + Pull information from: + - changes_implemented.md for the description and file list + - validation_report.md for test results + - pr_context.json for issue numbers and PR details + - translation_summary.md for any translation updates + + + Keep the HTML comments intact - they provide guidance for reviewers + + + + + .roo/temp/pr-fixer-orchestrator/[TASK_ID]/pr_update_message.md + + - Used as the PR comment body when updating the PR + - Saved for reference and audit trail + - Can be edited by user before posting + - Should NOT be deleted even if temp files are cleaned + + + Post to PR using: gh pr comment [pr_number] --repo [owner]/[repo] --body-file [path_to_file] + + + + + + +### Related GitHub Issue + + + +Closes: #456 + +### Roo Code Task Context (Optional) + + + +_No Roo Code task context for this PR._ + +### Description + + + +This PR addresses the review feedback and fixes identified issues for #789. + +**Key Changes:** +- Fixed TypeScript type errors in the API handler by adding proper type annotations +- Improved error handling in the authentication flow to handle edge cases +- Refactored complex functions for better testability and maintainability +- Added missing user role management functionality +- Resolved merge conflicts with the latest main branch + +**Review Comments Addressed:** +- Added timeout handling with exponential backoff for network requests +- Refactored large functions into smaller, testable units +- Added comprehensive TypeScript interfaces for API responses +- Improved error messages for better debugging + +**Test Failures Fixed:** +- Updated email validation tests to match new validation rules +- Fixed mock server responses in integration tests +- Added missing test coverage for new functionality + +### Test Procedure + + + +**Testing performed:** +1. Ran all unit tests locally: `npm test` +2. Ran integration tests: `npm run test:integration` +3. Manual testing steps: + - Created new user with various role types + - Tested authentication flow with invalid credentials + - Verified timeout handling with slow network simulation + +**To verify these changes:** +1. Check out this branch +2. Run `npm install && npm test` +3. Start the dev server with `npm run dev` +4. Test the authentication flow at http://localhost:3000/login + +**Test Environment:** +- Node.js version: 18.17.0 +- OS: Windows 11 +- Browser: Chrome 120 + +### Pre-Submission Checklist + + + +- [x] **Issue Linked**: This PR is linked to an approved GitHub Issue (see "Related GitHub Issue" above). +- [x] **Scope**: My changes are focused on the linked issue (one major feature/fix per PR). +- [x] **Self-Review**: I have performed a thorough self-review of my code. +- [x] **Testing**: New and/or updated tests have been added to cover my changes (if applicable). +- [x] **Documentation Impact**: I have considered if my changes require documentation updates (see "Documentation Updates" section below). +- [x] **Contribution Guidelines**: I have read and agree to the [Contributor Guidelines](/CONTRIBUTING.md). + +### Screenshots / Videos + + + +_No UI changes in this PR._ + +### Documentation Updates + + + +- [x] No documentation updates are required. + +### Additional Notes + + + +All review feedback has been addressed. The main architectural change was refactoring the authentication service to use dependency injection, which improves testability. + +**Files Modified:** +``` +src/api/handler.ts - Added type annotations, improved error handling +src/services/auth.service.ts - Refactored for dependency injection +src/services/user.service.ts - Added role management functionality +src/types/api.types.ts - New TypeScript interfaces +src/__tests__/services/auth.service.test.ts - Updated tests +src/__tests__/integration/api.test.ts - Fixed mock responses +``` + +### Get in Touch + + + +Discord: @contributor123 + ]]> + + \ No newline at end of file diff --git a/.roo/rules-pr-fixer/1_workflow.xml b/.roo/rules-pr-fixer/1_workflow.xml index 845a05d4ea22..4b43e9921771 100644 --- a/.roo/rules-pr-fixer/1_workflow.xml +++ b/.roo/rules-pr-fixer/1_workflow.xml @@ -41,17 +41,24 @@ Execute the user's chosen course of action. - Check out the PR branch locally using 'gh pr checkout'. - Apply code changes based on review feedback. - Fix failing tests. - Resolve conflicts by rebasing the PR branch and force-pushing. + Check out the PR branch locally using 'gh pr checkout --force'. + Determine if the PR is from a fork by checking 'gh pr view --json isCrossRepository'. + Apply code changes based on review feedback using file editing tools. + Fix failing tests by modifying test files or source code as needed. + For conflict resolution: Use GIT_EDITOR=true for non-interactive rebases, then resolve conflicts via file editing. + If changes affect user-facing content (i18n files, UI components, announcements), delegate translation updates using the new_task tool with translate mode. + Commit changes using git commands. + Push changes to the correct remote (origin for same-repo PRs, fork remote for cross-repo PRs) using 'git push --force-with-lease'. Verify that the pushed changes resolve the issues. - Use 'gh pr checks --watch' to monitor the CI/CD pipeline and ensure all workflows execute successfully. + Use 'gh pr checks --watch' to monitor check status in real-time until all checks complete. + If needed, check specific workflow runs with 'gh run list --pr' for detailed CI/CD pipeline status. + Verify that all translation updates (if any) have been completed and committed. + Confirm PR is ready for review by checking mergeable state with 'gh pr view --json'. @@ -60,5 +67,6 @@ All actionable review comments have been addressed. All tests are passing. The PR is free of merge conflicts. + All required translations have been completed and committed (if changes affect user-facing content). \ No newline at end of file diff --git a/.roo/rules-pr-fixer/2_best_practices.xml b/.roo/rules-pr-fixer/2_best_practices.xml index 1576daa3c1fd..e9ef4a8b27d8 100644 --- a/.roo/rules-pr-fixer/2_best_practices.xml +++ b/.roo/rules-pr-fixer/2_best_practices.xml @@ -10,6 +10,16 @@ Address issues one at a time (e.g., fix tests first, then address comments). This makes the process more manageable and easier to validate. Tackling all issues at once can be complex and error-prone. + + Handle Fork Remotes Correctly + Always check if a PR comes from a fork (cross-repository) before pushing changes. Use 'gh pr view --json isCrossRepository' to determine the correct remote. + Pushing to the wrong remote (e.g., origin instead of fork) will fail for cross-repository PRs. + + PR from a fork + Check isCrossRepository, add fork remote if needed, push to fork + Always push to origin without checking PR source + + diff --git a/.roo/rules-pr-fixer/3_common_patterns.xml b/.roo/rules-pr-fixer/3_common_patterns.xml index 659aa7d07f31..5d2c7f033f7c 100644 --- a/.roo/rules-pr-fixer/3_common_patterns.xml +++ b/.roo/rules-pr-fixer/3_common_patterns.xml @@ -24,31 +24,87 @@ - - A sequence of commands to resolve merge conflicts locally using rebase. + + Commands to detect merge conflicts. + + + + + Rebase operations using GIT_EDITOR to prevent interactive prompts. + + + + Check current conflict status without interactive input. + - Command to check out a pull request branch locally. + Check out a pull request branch locally. + + + + + Determine the correct remote to push to (handles forks). + + + + + Monitor PR checks in real-time as they run. + + + + + Push operations that handle both origin and fork remotes correctly. - - After pushing changes, use this command to monitor the CI/CD pipeline in real-time. + + + Commit operations that work in automated environments. diff --git a/.roo/rules-pr-fixer/4_tool_usage.xml b/.roo/rules-pr-fixer/4_tool_usage.xml index 10361dc3ec92..15833f3dd7d2 100644 --- a/.roo/rules-pr-fixer/4_tool_usage.xml +++ b/.roo/rules-pr-fixer/4_tool_usage.xml @@ -11,6 +11,11 @@ Quickly identifies if there are failing automated checks that need investigation. + new_task (mode: translate) + When changes affect user-facing content, i18n files, or UI components that require translation. + Ensures translation consistency across all supported languages when PR fixes involve user-facing changes. + + gh pr checks --watch After pushing a fix, to confirm that the changes have resolved the CI/CD failures. Provides real-time feedback on whether the fix was successful. @@ -35,6 +40,41 @@ Use this command to get the exact error messages from failing tests. Search the log for keywords like 'error', 'failed', or 'exception' to quickly find the root cause. + Always specify run ID explicitly to avoid interactive selection prompts. + + + + + + Use --force flag: 'gh pr checkout --force' + If gh checkout fails, use: git fetch origin pull//head: + + + + + + Use --force-with-lease for safer force pushing. + Use GIT_EDITOR=true to prevent interactive prompts during rebases. + Always determine the correct remote before pushing (origin vs fork). + + + Check if PR is from a fork: 'gh pr view --json isCrossRepository' + If isCrossRepository is true, add fork remote if needed + Push to appropriate remote: 'git push --force-with-lease ' + + + Use 'GIT_EDITOR=true git rebase main' to start rebase + If conflicts occur, edit files to resolve them + Use 'git add .' and 'git rebase --continue' to proceed + + + + + + Use --watch flag to monitor checks in real-time: 'gh pr checks --watch' + For one-time status checks, use --json flag: 'gh pr checks --json state,conclusion,name' + The --watch flag automatically updates the display as check statuses change. + Use 'gh run list --pr ' to get detailed workflow status if needed. @@ -45,5 +85,34 @@ Example suggestions: "Address review comments first.", "Tackle the failing tests.", "Resolve merge conflicts." + + + + Use when PR fixes involve changes to user-facing strings, i18n files, or UI components. + Provide specific details about what content needs translation in the message. + Include file paths and descriptions of the changes made. + List all affected languages that need updates. + Wait for translation completion before proceeding to validation phase. + + + Changes to webview-ui/src/i18n/locales/en/*.json files + Changes to src/i18n/locales/en/*.json files + Modifications to UI components with user-facing text + Updates to announcement files or documentation requiring localization + Addition of new error messages or user notifications + + +translate +Translation updates needed for PR #1234 fixes. Please translate the following changes: + +Files modified: +- webview-ui/src/i18n/locales/en/common.json: Added new error message "connection_failed" +- webview-ui/src/components/settings/ApiSettings.tsx: Updated button text from "Save" to "Save Configuration" + +Please ensure all supported languages (ca, de, es, fr, hi, id, it, ja, ko, nl, pl, pt-BR, ru, tr, vi, zh-CN, zh-TW) are updated with appropriate translations for these changes. + + ]]> + \ No newline at end of file diff --git a/.roo/rules-pr-fixer/5_examples.xml b/.roo/rules-pr-fixer/5_examples.xml index e34a79421ffc..0ff98de25087 100644 --- a/.roo/rules-pr-fixer/5_examples.xml +++ b/.roo/rules-pr-fixer/5_examples.xml @@ -62,7 +62,7 @@ Check out the pull request branch. - gh pr checkout 4365 + gh pr checkout 4365 --force The PR branch is now ready for local edits. @@ -82,13 +82,13 @@ - After pushing the changes, watch the PR checks to confirm the fix. + After pushing the changes, monitor PR checks in real-time. - gh pr checks --watch + gh pr checks 4365 --watch - Confirm that all checks are passing after the fix. + Monitor checks continuously until all complete. The --watch flag provides real-time updates as check statuses change. @@ -97,4 +97,112 @@ Use a combination of the GitHub MCP server and the `gh` CLI to get a complete picture of the PR's status. + + + + A pull request has review comments requesting UI text changes that require translation updates across all supported languages. + + + + Fix PR #1234 in RooCodeInc/Roo-Code - the reviewer asked to change button text and there are some failing tests. + + + + + Get PR details and analyze changes. + + + github + get_pull_request + + { + "owner": "RooCodeInc", + "repo": "Roo-Code", + "pullNumber": 1234 + } + + + + Identify the files changed and review feedback requiring UI text modifications. + + + + Check out PR and apply the requested changes. + + +gh pr checkout 1234 --force + + + Make the requested button text changes in the UI components. + + + + Identify translation requirements and delegate to translate mode. + + +translate +Translation updates needed for PR #1234 fixes. The following changes were made based on review feedback: + +Files modified: +- webview-ui/src/components/settings/ApiSettings.tsx: Changed button text from "Save" to "Save Configuration" +- webview-ui/src/i18n/locales/en/common.json: Updated key "save_button" to "save_config_button" + +Please update all supported languages (ca, de, es, fr, hi, id, it, ja, ko, nl, pl, pt-BR, ru, tr, vi, zh-CN, zh-TW) with appropriate translations for: +- New key "save_config_button" with translation equivalent to "Save Configuration" +- Any other text changes that affect user-facing content + +Ensure consistency across all language files and maintain the same context and tone as existing translations. + + + Translation subtask created and all language files updated. + + + + Commit all changes including translations with automated git configuration. + + +git add . && git commit -m "fix: update button text and translations as requested in review" + + + All code changes and translation updates are now committed. + + + + Check if PR is from a fork and push to correct remote. + + +gh pr view 1234 --json isCrossRepository,headRepositoryOwner,headRefName + + + Determine if this is a cross-repository PR to know which remote to push to. + + + + Push changes to the appropriate remote. + + +git push --force-with-lease origin + + + Push changes safely to update the pull request. Use 'fork' remote instead if PR is from a fork. + + + + Monitor CI status in real-time. + + +gh pr checks 1234 --watch + + + Watch CI checks continuously until all tests pass. The --watch flag provides automatic updates as check statuses change. + + + + + Always check if PR fixes involve user-facing content that requires translation. + Use new_task with translate mode to ensure consistent translation updates. + Include detailed context about what changed and why in translation requests. + Verify translation completeness before considering the PR fix complete. + + diff --git a/.roo/rules-pr-reviewer/1_orchestrator_workflow.xml b/.roo/rules-pr-reviewer/1_orchestrator_workflow.xml new file mode 100644 index 000000000000..cea0486a26b8 --- /dev/null +++ b/.roo/rules-pr-reviewer/1_orchestrator_workflow.xml @@ -0,0 +1,203 @@ + + + This workflow orchestrates a comprehensive pull request review process by delegating + specialized analysis tasks to appropriate modes while maintaining context through + structured report files. The orchestrator ensures critical review coverage while + avoiding redundant feedback. + + + + + Parse PR Information and Initialize Context + + Extract PR information from user input (URL or PR number). + Create context directory and tracking files. + If called by another mode (Issue Fixer, PR Fixer), set calledByMode field. + + + - Parse PR URL or number from user input + - Create directory: .roo/temp/pr-[PR_NUMBER]/ + - Initialize review-context.json with PR metadata + - Check if called by another mode and record it + + + + + + + Fetch PR Details and Context + + Try using GitHub MCP tools first. If unavailable or failing, fall back to GitHub CLI. + + + Use get_pull_request tool to fetch PR details + + + gh pr view [PR_NUMBER] --repo [owner]/[repo] --json number,title,author,state,body,url,headRefName,baseRefName,files,additions,deletions,changedFiles + + + + + Fetch Linked Issue + + If PR references an issue, fetch its details for context. + + + Use get_issue tool if issue is referenced + + + gh issue view [issue_number] --repo [owner]/[repo] --json number,title,body,author,state + + + + + Fetch Existing Comments and Reviews + + CRITICAL: Get all existing feedback to avoid redundancy. + + + Use get_pull_request_comments and get_pull_request_reviews + + + gh pr review [PR_NUMBER] --repo [owner]/[repo] --json comments,reviews + + .roo/temp/pr-[PR_NUMBER]/existing-feedback.json + + + + Check Out PR Locally + gh pr checkout [PR_NUMBER] --repo [owner]/[repo] + Enable local code analysis and pattern comparison + + + + + + Delegate Pattern Analysis + + Create a subtask to analyze code patterns and organization. + + + code + + - Identifying similar existing features/components + - Checking if implementations follow established patterns + - Finding potential code redundancy + - Verifying test organization + - Checking file/directory structure consistency + + .roo/temp/pr-[PR_NUMBER]/pattern-analysis.md + + + + + Delegate Architecture Review + + Create a subtask for architectural analysis. + + + architect + + - Module boundary violations + - Dependency management issues + - Separation of concerns + - Potential circular dependencies + - Overall architectural consistency + + .roo/temp/pr-[PR_NUMBER]/architecture-review.md + + + + + Delegate Test Coverage Analysis + + If test files are modified or added, delegate test analysis. + + + test + + - Test organization and location + - Test coverage adequacy + - Test naming conventions + - Mock usage patterns + - Edge case coverage + + .roo/temp/pr-[PR_NUMBER]/test-analysis.md + + + + + + + Synthesize Findings + + Collect all delegated analysis results and create comprehensive review. + + + - Read all analysis files from .roo/temp/pr-[PR_NUMBER]/ + - Identify critical issues vs suggestions + - Check against existing comments to avoid redundancy + - Prioritize findings by impact + + + + + Create Final Review Report + + Generate comprehensive review report with all findings. + + .roo/temp/pr-[PR_NUMBER]/final-review.md + + - Executive Summary + - Critical Issues (must fix) + - Pattern Inconsistencies + - Redundancy Findings + - Architecture Concerns + - Test Coverage Issues + - Minor Suggestions + + + + + + + Present Review to User + + Show the review findings and ask for action. + + + + Only present the analysis report, do not comment on PR + + + Ask user if they want to post the review as a comment + + + + + + Post Review Comment (if approved) + + If user approves and not called by another mode, post review. + + + Use add_issue_comment or create PR review + + + gh pr comment [PR_NUMBER] --repo [owner]/[repo] --body-file .roo/temp/pr-[PR_NUMBER]/final-review.md + + + + + + + Always fall back to GitHub CLI commands + + + Continue with available analysis and note limitations + + + Always save intermediate results to temp files + + + \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/1_workflow.xml b/.roo/rules-pr-reviewer/1_workflow.xml deleted file mode 100644 index 31b70d981d61..000000000000 --- a/.roo/rules-pr-reviewer/1_workflow.xml +++ /dev/null @@ -1,229 +0,0 @@ - - - Fetch Pull Request Information - - By default, use the GitHub MCP server to fetch and review pull requests from the - https://github.com/RooCodeInc/Roo-Code repository. - - If the user provides a PR number or URL, extract the necessary information: - - Repository owner and name - - Pull request number - - Use the GitHub MCP tool to fetch the PR details: - - - github - get_pull_request - - { - "owner": "[owner]", - "repo": "[repo]", - "pullNumber": [number] - } - - - - - - - Fetch Associated Issue (If Any) - - Check the pull request body for a reference to a GitHub issue (e.g., "Fixes #123", "Closes #456"). - If an issue is referenced, use the GitHub MCP tool to fetch its details: - - - github - get_issue - - { - "owner": "[owner]", - "repo": "[repo]", - "issue_number": [issue_number] - } - - - - The issue description and comments can provide valuable context for the review. - - - - - Fetch Pull Request Diff - - Get the pull request diff to understand the changes: - - - github - get_pull_request_diff - - { - "owner": "[owner]", - "repo": "[repo]", - "pullNumber": [number] - } - - - - - - - Check Out Pull Request Locally - - Use the GitHub CLI (e.g. `gh pr checkout `) to check out the pull request locally after fetching - the diff. This provides a better understanding of code context and interactions than relying solely on the diff. - - - gh pr checkout [PR_NUMBER] - - - This allows you to: - - Navigate the actual code structure - - Understand how changes interact with existing code - - Get better context for your review - - - - - Fetch Existing PR Comments - - Get existing comments to understand the current discussion state: - - - github - get_pull_request_comments - - { - "owner": "[owner]", - "repo": "[repo]", - "pullNumber": [number] - } - - - - Examine existing PR comments to understand the current state of discussion. When reading the comments and reviews, you must verify which are resolved by reading the files they refer to, since they might already be resolved. This prevents you from making redundant suggestions. - - - - - Perform Comprehensive Review - - Review the pull request thoroughly: - - Verify that the changes are directly related to the linked issue and do not include unrelated modifications. - - Focus primarily on the changes made in the PR. - - Prioritize code quality, code smell, structural consistency, and for UI-related changes, ensure proper internationalization (i18n) is applied. - - Watch for signs of technical debt (e.g., overly complex logic, lack of abstraction, tight coupling, missing tests, TODOs). - - For large PRs, alert the user and recommend breaking it up if appropriate. - - NEVER run tests or execute code in PR Reviewer mode. The repository likely has automated testing. Your role is limited to: - - Code review and analysis - - Leaving review comments - - Checking code quality and structure - - Reviewing test coverage and quality (without execution) - - Document your findings: - - Code quality issues - - Structural improvements - - Missing tests or documentation - - Potential bugs or edge cases - - Performance concerns - - Security considerations - - - - - Prepare Review Comments - - Format your review comments following these guidelines: - - Your suggestions should: - - Use a **friendly, curious tone** — prefer asking: "Is this intentional?" or "Could we approach this differently to improve X?" - - Avoid assumptions or judgments; ask questions instead of declaring problems. - - Skip ALL praise and positive comments. Focus exclusively on issues that need attention. - - Use Markdown sparingly — only for code blocks or when absolutely necessary for clarity. Avoid markdown headings (###, ##, etc.) entirely. - - Avoid including internal evaluation terminology (e.g., scores or internal tags) in public comments. - - When linking to specific lines or files, use full GitHub URLs relative to the repository, e.g. - `https://github.com/RooCodeInc/Roo-Code/blob/main/src/api/providers/human-relay.ts#L50`. - - Group your comments by: - - Critical issues (must fix) - - Important suggestions (should consider) - - Minor improvements (nice to have) - - - - - Preview Review with User - - Always show the user a preview of your review suggestions and comments before taking any action. - Summarize your findings clearly for the user before submitting comments. - - - I've completed my review of PR #[number]. Here's what I found: - - [Summary of findings organized by priority] - - Would you like me to: - 1. Create a comprehensive review with all comments - 2. Modify any of the suggestions - 3. Skip the review submission - - Create a comprehensive review - Let me modify the suggestions first - Skip submission - just wanted the analysis - - - - - - - Submit Review - - Based on user preference, submit the review as a comprehensive review: - - 1. First create a pending review: - - github - create_pending_pull_request_review - - { - "owner": "[owner]", - "repo": "[repo]", - "pullNumber": [number] - } - - - - 2. Add comments to the pending review using: - - github - add_pull_request_review_comment_to_pending_review - - { - "owner": "[owner]", - "repo": "[repo]", - "pullNumber": [number], - "path": "[file path]", - "line": [line number], - "body": "[comment text]", - "subjectType": "LINE" - } - - - - 3. Submit the review: - - github - submit_pending_pull_request_review - - { - "owner": "[owner]", - "repo": "[repo]", - "pullNumber": [number], - "event": "COMMENT", - "body": "[overall review summary]" - } - - - - - \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/2_best_practices.xml b/.roo/rules-pr-reviewer/2_best_practices.xml deleted file mode 100644 index 69ba8088f010..000000000000 --- a/.roo/rules-pr-reviewer/2_best_practices.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - Always fetch and review the entire PR diff before commenting - - Check for and review any associated issue for context - - Check out the PR locally for better context understanding - - Review existing comments and verify against the current code to avoid redundant feedback on already resolved issues - - Focus on the changes made, not unrelated code - - Ensure all changes are directly related to the linked issue - - Use a friendly, curious tone in all comments - - Ask questions rather than making assumptions - there may be intentions behind the code choices - - Provide actionable feedback with specific suggestions - - Focus exclusively on issues and improvements - skip all praise or positive comments - - Use minimal markdown - avoid headings (###, ##) and excessive formatting - - Only use markdown for code blocks or when absolutely necessary for clarity - - Consider the PR's scope - suggest breaking up large PRs - - Verify proper i18n implementation for UI changes - - Check for test coverage without executing tests - - Look for signs of technical debt and code smells - - Ensure consistency with existing code patterns - - Link to specific lines using full GitHub URLs - - Group feedback by priority (critical, important, minor) - - Always preview comments with the user before submitting - \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/2_critical_review_guidelines.xml b/.roo/rules-pr-reviewer/2_critical_review_guidelines.xml new file mode 100644 index 000000000000..ebccff3dbc9c --- /dev/null +++ b/.roo/rules-pr-reviewer/2_critical_review_guidelines.xml @@ -0,0 +1,208 @@ + + + These guidelines ensure PR reviews are appropriately critical while remaining + constructive. The goal is to maintain high code quality and consistency + across the codebase by identifying issues that might be overlooked in a + less thorough review. + + + + + Always support criticism with evidence from the codebase + + Instead of: "This doesn't follow our patterns" + Say: "This implementation differs from the pattern used in src/api/handlers/*.ts + where we consistently use the factory pattern for endpoint creation" + + + + + Reference similar existing implementations + + 1. Find 2-3 examples of similar features + 2. Identify the common patterns they follow + 3. Explain how the PR deviates from these patterns + 4. Suggest alignment with existing approaches + + + + + Challenge architectural choices when appropriate + + - "Why was this implemented as a separate module instead of extending the existing X module?" + - "This introduces a new pattern for Y. Have we considered using the established pattern from Z?" + - "This creates a circular dependency with module A. Could we restructure to maintain cleaner boundaries?" + + + + + + + Do new endpoints follow the same structure as existing ones? + Are error responses consistent with other endpoints? + Is authentication/authorization handled the same way? + Are request validations following established patterns? + + + + Do components follow the same file structure (types, helpers, component)? + Are props interfaces defined consistently? + Is state management approach consistent with similar components? + Are hooks used in the same patterns as elsewhere? + + + + Are test files in the correct directory structure? + Do test descriptions follow the same format? + Are mocking strategies consistent with other tests? + Is test data generation following established patterns? + + + + Could this utility already exist elsewhere? + Should this be added to an existing utility module? + Does the naming convention match other utilities? + Are similar transformations already implemented? + + + + + + + Search for similar functionality by behavior + + If PR adds a "formatDate" function, search for: + - "date format" + - "format.*date" + - "dateFormat" + - Existing date manipulation utilities + + + + + Search for similar code patterns + + If PR adds error handling, search for: + - try/catch patterns in similar contexts + - Error boundary implementations + - Existing error utilities + + + + + Check what similar files import + + Look at imports in files with similar purposes + to discover existing utilities that could be reused + + + + + + + Reimplementing existing utilities + + - String manipulation functions + - Array transformations + - Date formatting + - API response transformations + + + + + Creating similar components + + - Modal variations that could use a base modal + - Form inputs that could extend existing inputs + - List components with slight variations + + + + + Repeating business logic + + - Validation rules implemented multiple times + - Permission checks duplicated across files + - Data transformation logic repeated + + + + + + + + + + + + + + + + + + Issues that should block PR approval + + - Security vulnerabilities + - Breaking changes without migration path + - Significant pattern violations that would confuse future developers + - Major redundancy that adds maintenance burden + + + + + Important issues that need addressing + + - Test files in wrong location + - Inconsistent error handling + - Missing critical test cases + - Code organization that violates module boundaries + + + + + Improvements that would benefit the codebase + + - Minor pattern inconsistencies + - Opportunities for code reuse + - Additional test coverage + - Documentation improvements + + + + \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/3_common_mistakes_to_avoid.xml b/.roo/rules-pr-reviewer/3_common_mistakes_to_avoid.xml deleted file mode 100644 index 0868956e8756..000000000000 --- a/.roo/rules-pr-reviewer/3_common_mistakes_to_avoid.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - Running tests or executing code during review - - Making judgmental or harsh comments - - Providing feedback on code outside the PR's scope - - Overlooking unrelated changes not tied to the main issue - - Including ANY praise or positive comments - focus only on issues - - Using markdown headings (###, ##, #) in review comments - - Using excessive markdown formatting when plain text would suffice - - Submitting comments without user preview/approval - - Ignoring existing PR comments or failing to verify if they have already been resolved by checking the code - - Forgetting to check for an associated issue for additional context - - Missing critical security or performance issues - - Not checking for proper i18n in UI changes - - Failing to suggest breaking up large PRs - - Using internal evaluation terminology in public comments - - Not providing actionable suggestions for improvements - - Reviewing only the diff without local context - - Making assumptions instead of asking clarifying questions about potential intentions - - Forgetting to link to specific lines with full GitHub URLs - \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/3_delegation_patterns.xml b/.roo/rules-pr-reviewer/3_delegation_patterns.xml new file mode 100644 index 000000000000..9632c7dcf7c3 --- /dev/null +++ b/.roo/rules-pr-reviewer/3_delegation_patterns.xml @@ -0,0 +1,238 @@ + + + Patterns for effectively delegating analysis tasks to specialized modes + while maintaining context and ensuring comprehensive review coverage. + + + + + + When PR contains new features or significant code changes + + code + + Analyze the following changed files for pattern consistency: + [List of changed files] + + Please focus on: + 1. Finding similar existing implementations in the codebase + 2. Identifying established patterns for this type of feature + 3. Checking if the new code follows these patterns + 4. Looking for potential code redundancy + 5. Verifying proper file organization + + Use codebase_search and search_files to find similar code. + Document all findings with specific examples and file references. + + Save your analysis to: .roo/temp/pr-[PR_NUMBER]/pattern-analysis.md + + Format the output as: + ## Pattern Analysis for PR #[PR_NUMBER] + ### Similar Existing Implementations + ### Established Patterns + ### Pattern Deviations + ### Redundancy Findings + ### Organization Issues + + + + + + When PR modifies core modules, adds new modules, or changes dependencies + + architect + + Review the architectural implications of PR #[PR_NUMBER]: + + Changed files: + [List of changed files] + + PR Description: + [PR description] + + Please analyze: + 1. Module boundary adherence + 2. Dependency management (new dependencies, circular dependencies) + 3. Separation of concerns + 4. Impact on system architecture + 5. Consistency with architectural patterns + + Save your findings to: .roo/temp/pr-[PR_NUMBER]/architecture-review.md + + Format as: + ## Architecture Review for PR #[PR_NUMBER] + ### Module Boundaries + ### Dependency Analysis + ### Architectural Concerns + ### Recommendations + + + + + + When PR adds or modifies test files + + test + + Analyze test changes in PR #[PR_NUMBER]: + + Test files changed: + [List of test files] + + Please review: + 1. Test file organization and location + 2. Test naming conventions + 3. Coverage of edge cases + 4. Mock usage patterns + 5. Consistency with existing test patterns + + Compare with similar existing tests in the codebase. + + Save analysis to: .roo/temp/pr-[PR_NUMBER]/test-analysis.md + + Format as: + ## Test Analysis for PR #[PR_NUMBER] + ### Test Organization + ### Coverage Assessment + ### Pattern Consistency + ### Recommendations + + + + + + When PR modifies UI components or adds new ones + + design-engineer + + Review UI changes in PR #[PR_NUMBER]: + + UI files changed: + [List of UI files] + + Please analyze: + 1. Component structure consistency + 2. Styling approach (Tailwind usage) + 3. Accessibility considerations + 4. i18n implementation + 5. Component reusability + + Save findings to: .roo/temp/pr-[PR_NUMBER]/ui-review.md + + + + + + + Always save delegation results to temp files + .roo/temp/pr-[PR_NUMBER]/[analysis-type].md + + + + Request structured markdown output from delegates + + - Easy to parse and combine + - Consistent formatting + - Clear section headers + + + + + Include relevant context in delegation requests + + - PR number and description + - List of changed files + - Specific areas of concern + - Output file location + + + + + + + Delegate tasks one at a time, using results to inform next delegation + + 1. Pattern analysis first + 2. If patterns violated, delegate architecture review + 3. If tests affected, delegate test analysis + + + + + Delegate multiple independent analyses simultaneously + + - Pattern analysis (code mode) + - Test analysis (test mode) + - UI review (design-engineer mode) + + + + + Only delegate based on file types changed + + - If *.test.ts changed -> delegate to test mode + - If src/components/* changed -> delegate to design-engineer + - If package.json changed -> delegate to architect + + + + + + + Read all analysis files from temp directory + + - pattern-analysis.md + - architecture-review.md + - test-analysis.md + - ui-review.md + + + + + Find common issues across analyses + + - Pattern violations mentioned multiple times + - Redundancy identified by different modes + - Organizational issues + + + + + Categorize by severity + + - Critical (blocks PR) + - Important (should fix) + - Suggestions (nice to have) + + + + + Combine all findings into final review + + ## PR Review Summary + ### Critical Issues + ### Pattern Inconsistencies + ### Architecture Concerns + ### Test Coverage + ### Suggestions + + + + + + + Continue with available analyses + Document which analyses couldn't be completed + + + + Perform basic analysis in orchestrator mode + Note limitations in final report + + + + Use completed analyses + Set reasonable time limits for delegations + + + \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/4_github_operations.xml b/.roo/rules-pr-reviewer/4_github_operations.xml new file mode 100644 index 000000000000..dd1c97e1bf48 --- /dev/null +++ b/.roo/rules-pr-reviewer/4_github_operations.xml @@ -0,0 +1,226 @@ + + + Guidelines for handling GitHub operations with fallback strategies + when MCP tools are unavailable or failing. + + + + + Always try MCP tools first, fall back to GitHub CLI if they fail + + + - Structured data responses + - Better error handling + - Integrated with the system + + + - More reliable when MCP is down + - Direct GitHub API access + - Can handle complex queries + + + + + + + get_pull_request + +github +get_pull_request + +{ + "owner": "RooCodeInc", + "repo": "Roo-Code", + "pullNumber": 123 +} + + + ]]> + + + gh pr view [PR_NUMBER] --repo [owner]/[repo] --json number,title,author,state,body,url,headRefName,baseRefName,files,additions,deletions,changedFiles + true + + + + + + get_pull_request_diff + +github +get_pull_request_diff + +{ + "owner": "RooCodeInc", + "repo": "Roo-Code", + "pullNumber": 123 +} + + + ]]> + + + gh pr diff [PR_NUMBER] --repo [owner]/[repo] + .roo/temp/pr-[PR_NUMBER]/pr.diff + + + + + + get_pull_request_files + + + gh pr view [PR_NUMBER] --repo [owner]/[repo] --json files --jq '.files[].path' + Lists all files changed in the PR + + + + + + get_pull_request_comments + + + gh pr view [PR_NUMBER] --repo [owner]/[repo] --json comments --jq '.comments' + + + + + + get_pull_request_reviews + + + gh pr view [PR_NUMBER] --repo [owner]/[repo] --json reviews --jq '.reviews' + + + + + + gh pr checkout [PR_NUMBER] --repo [owner]/[repo] + No MCP equivalent - always use CLI + + + + + + add_issue_comment + PRs use same comment system as issues + + + gh pr comment [PR_NUMBER] --repo [owner]/[repo] --body-file [file_path] + gh pr comment [PR_NUMBER] --repo [owner]/[repo] --body "[comment_text]" + + + + + + + 1. create_pending_pull_request_review + 2. add_pull_request_review_comment_to_pending_review (multiple times) + 3. submit_pending_pull_request_review + + + + gh pr review [PR_NUMBER] --repo [owner]/[repo] --comment --body-file [review_file] + + + + + + + + Error message contains "MCP server" or "github server not found" + + + Immediately switch to CLI commands for all operations + + + + + + Error contains "rate limit" or status code 403 + + + 1. Wait briefly (30 seconds) + 2. Retry with CLI using --limit flag + 3. Reduce number of API calls + + + + + + Error contains "authentication" or status code 401 + + + 1. Inform user about auth issue + 2. Suggest checking gh auth status + 3. Continue with available data + + + + + + Error contains "not found" or status code 404 + + + 1. Verify PR number and repository + 2. Ask user to confirm details + 3. Check if PR is from a fork + + + + + + + Always save API responses to temp files + Preserve data in case of failures + + + + Use jq or built-in JSON parsing + + gh pr view --json files --jq '.files[].path' + + + + + For PRs with many files, process in batches + More than 50 files + + + + + + gh pr view [number] --json [fields] + + number, title, author, state, body, url, + headRefName, baseRefName, files, additions, + deletions, changedFiles, comments, reviews + + + + + gh pr checkout [number] + gh pr diff [number] + gh pr comment [number] --body "[text]" + gh pr review [number] --comment --body "[text]" + + + + gh issue view [number] --json [fields] + + number, title, body, author, state, + labels, assignees, milestone + + + + + + Always specify --repo to avoid ambiguity + Use --json for structured data + Save command outputs to temp files + Check gh auth status before operations + Handle both personal repos and org repos + + \ No newline at end of file diff --git a/.roo/rules-pr-reviewer/5_context_management.xml b/.roo/rules-pr-reviewer/5_context_management.xml new file mode 100644 index 000000000000..4b55431c7441 --- /dev/null +++ b/.roo/rules-pr-reviewer/5_context_management.xml @@ -0,0 +1,356 @@ + + + Strategies for maintaining review context across delegated tasks and + ensuring no information is lost during the orchestration process. + + + + + Central tracking file for the entire review process + .roo/temp/pr-[PR_NUMBER]/review-context.json + + { + "prNumber": "string", + "repository": "string", + "reviewStartTime": "ISO timestamp", + "calledByMode": "string or null", + "prMetadata": { + "title": "string", + "author": "string", + "state": "string", + "baseRefName": "string", + "headRefName": "string", + "additions": "number", + "deletions": "number", + "changedFiles": "number" + }, + "linkedIssue": { + "number": "number", + "title": "string", + "body": "string" + }, + "existingComments": [], + "existingReviews": [], + "filesChanged": [], + "delegatedTasks": [ + { + "mode": "string", + "status": "pending|completed|failed", + "outputFile": "string", + "startTime": "ISO timestamp", + "endTime": "ISO timestamp" + } + ], + "findings": { + "critical": [], + "patterns": [], + "redundancy": [], + "architecture": [], + "tests": [] + }, + "reviewStatus": "initialized|analyzing|synthesizing|completed" + } + + + + + Raw PR data from GitHub + .roo/temp/pr-[PR_NUMBER]/pr-metadata.json + + + + All existing comments and reviews + .roo/temp/pr-[PR_NUMBER]/existing-feedback.json + + + + Output from code mode delegation + .roo/temp/pr-[PR_NUMBER]/pattern-analysis.md + + + + Output from architect mode delegation + .roo/temp/pr-[PR_NUMBER]/architecture-review.md + + + + Output from test mode delegation + .roo/temp/pr-[PR_NUMBER]/test-analysis.md + + + + Synthesized review ready for posting + .roo/temp/pr-[PR_NUMBER]/final-review.md + + + + + + Update review-context.json with PR metadata + +.roo/temp/pr-123/review-context.json + + + + + +.roo/temp/pr-123/review-context.json + +{ + ...existing, + "prMetadata": { + "title": "Fix user authentication", + "author": "developer123", + ... + }, + "filesChanged": ["src/auth.ts", "tests/auth.test.ts"], + "reviewStatus": "analyzing" +} + + + ]]> + + + + Update delegatedTasks array with task status + + - mode: Which mode was delegated to + - status: pending -> completed/failed + - outputFile: Where results were saved + - timestamps: Start and end times + + + + + Update findings object with categorized issues + + - critical: Must-fix issues + - patterns: Pattern inconsistencies + - redundancy: Duplicate code findings + - architecture: Architectural concerns + - tests: Test-related issues + + + + + + + Always read-modify-write for JSON updates + + 1. Read current context file + 2. Parse JSON + 3. Update specific fields + 4. Write entire updated JSON + + + + + Save copies of important data + + - PR diff before analysis + - Existing comments before review + - Each delegation output + + + + + Track review progress through status field + + - initialized: Just started + - analyzing: Delegating tasks + - synthesizing: Combining results + - completed: Ready for user + + + + + + + Some delegations failed + + 1. Mark failed tasks in context + 2. Continue with available data + 3. Note limitations in final review + + + + + JSON file becomes invalid + + 1. Try to recover from backups + 2. Reconstruct from individual files + 3. Start fresh if necessary + + + + + Review process interrupted + + 1. Check reviewStatus field + 2. Resume from last completed step + 3. Re-run failed delegations + + + + + + + Keep reviewStatus current to enable recovery + + + + Add timestamps to all operations for debugging + + + + Ensure JSON is valid before writing + + + + Make it clear what each file contains + + + + Suggest cleaning .roo/temp/ periodically + + + + + + Initialize context + +New-Item -ItemType Directory -Force -Path ".roo/temp/pr-123" + + + +.roo/temp/pr-123/review-context.json + +{ + "prNumber": "123", + "repository": "RooCodeInc/Roo-Code", + "reviewStartTime": "2025-01-04T18:00:00Z", + "calledByMode": null, + "prMetadata": {}, + "linkedIssue": {}, + "existingComments": [], + "existingReviews": [], + "filesChanged": [], + "delegatedTasks": [], + "findings": { + "critical": [], + "patterns": [], + "redundancy": [], + "architecture": [], + "tests": [] + }, + "reviewStatus": "initialized" +} + + + ]]> + + + + Update after GitHub fetch + +.roo/temp/pr-123/review-context.json + + + + + +.roo/temp/pr-123/review-context.json + +{ + ...existing, + "prMetadata": { + "title": "Fix user authentication", + "author": "developer123", + "state": "open", + "baseRefName": "main", + "headRefName": "fix-auth", + "additions": 150, + "deletions": 50, + "changedFiles": 3 + }, + "filesChanged": ["src/auth.ts", "tests/auth.test.ts", "docs/auth.md"], + "reviewStatus": "analyzing" +} + + + ]]> + + + + Track delegation + + +.roo/temp/pr-123/review-context.json + + + + +.roo/temp/pr-123/review-context.json + +{ + ...existing, + "delegatedTasks": [ + ...existing, + { + "mode": "code", + "status": "pending", + "outputFile": "pattern-analysis.md", + "startTime": "2025-01-04T18:05:00Z", + "endTime": null + } + ] +} + + + + + + ]]> + + + + Synthesize results + + +.roo/temp/pr-123/pattern-analysis.md + + + +.roo/temp/pr-123/architecture-review.md + + + +.roo/temp/pr-123/test-analysis.md + + + + +.roo/temp/pr-123/review-context.json + +{ + ...existing, + "findings": { + "critical": ["Missing error handling in auth.ts"], + "patterns": ["Inconsistent naming convention"], + "redundancy": ["Duplicate validation logic"], + "architecture": [], + "tests": ["Missing test for edge case"] + }, + "reviewStatus": "completed" +} + + + ]]> + + + \ No newline at end of file diff --git a/.roo/rules/rules.md b/.roo/rules/rules.md index d3795393f335..aad45c9bc64a 100644 --- a/.roo/rules/rules.md +++ b/.roo/rules/rules.md @@ -6,6 +6,12 @@ - Ensure all tests pass before submitting changes - The vitest framework is used for testing; the `describe`, `test`, `it`, etc functions are defined by default in `tsconfig.json` and therefore don't need to be imported - Tests must be run from the same directory as the `package.json` file that specifies `vitest` in `devDependencies` + - Run tests with: `npx vitest ` + - Do NOT run tests from project root - this causes "vitest: command not found" error + - Tests must be run from inside the correct workspace: + - Backend tests: `cd src && npx vitest path/to/test-file` (don't include `src/` in path) + - UI tests: `cd webview-ui && npx vitest src/path/to/test-file` + - Example: For `src/tests/user.test.ts`, run `cd src && npx vitest tests/user.test.ts` NOT `npx vitest src/tests/user.test.ts` 2. Lint Rules: @@ -15,7 +21,3 @@ - Use Tailwind CSS classes instead of inline style objects for new markup - VSCode CSS variables must be added to webview-ui/src/index.css before using them in Tailwind classes - Example: `
` instead of style objects - -# Adding a New Setting - -To add a new setting that persists its state, follow the steps in docs/settings.md diff --git a/.roomodes b/.roomodes index 236026c73877..4ae740a6b48e 100644 --- a/.roomodes +++ b/.roomodes @@ -1,7 +1,7 @@ customModes: - slug: mode-writer name: ✍️ Mode Writer - roleDefinition: >- + roleDefinition: |- You are Roo, a mode creation specialist focused on designing and implementing custom modes for the Roo-Code project. Your expertise includes: - Understanding the mode system architecture and configuration - Creating well-structured mode definitions with clear roles and responsibilities @@ -9,7 +9,7 @@ customModes: - Ensuring modes have appropriate tool group permissions - Crafting clear whenToUse descriptions for the Orchestrator - Following XML structuring best practices for clarity and parseability - + You help users create new modes by: - Gathering requirements about the mode's purpose and workflow - Defining appropriate roleDefinition and whenToUse descriptions @@ -17,8 +17,7 @@ customModes: - Creating detailed XML instruction files in the .roo folder - Ensuring instructions are well-organized with proper XML tags - Following established patterns from existing modes - whenToUse: >- - Use this mode when you need to create a new custom mode. + whenToUse: Use this mode when you need to create a new custom mode. groups: - read - - edit @@ -29,30 +28,11 @@ customModes: source: project - slug: test name: 🧪 Test - roleDefinition: >- - You are Roo, a Vitest testing specialist with deep expertise in: - - Writing and maintaining Vitest test suites - - Test-driven development (TDD) practices - - Mocking and stubbing with Vitest - - Integration testing strategies - - TypeScript testing patterns - - Code coverage analysis - - Test performance optimization - - Your focus is on maintaining high test quality and coverage across the codebase, working primarily with: - - Test files in __tests__ directories - - Mock implementations in __mocks__ - - Test utilities and helpers - - Vitest configuration and setup - - You ensure tests are: - - Well-structured and maintainable - - Following Vitest best practices - - Properly typed with TypeScript - - Providing meaningful coverage - - Using appropriate mocking strategies - whenToUse: >- - Use this mode when you need to write, modify, or maintain tests for the codebase. + roleDefinition: |- + You are Roo, a Vitest testing specialist with deep expertise in: - Writing and maintaining Vitest test suites - Test-driven development (TDD) practices - Mocking and stubbing with Vitest - Integration testing strategies - TypeScript testing patterns - Code coverage analysis - Test performance optimization + Your focus is on maintaining high test quality and coverage across the codebase, working primarily with: - Test files in __tests__ directories - Mock implementations in __mocks__ - Test utilities and helpers - Vitest configuration and setup + You ensure tests are: - Well-structured and maintainable - Following Vitest best practices - Properly typed with TypeScript - Providing meaningful coverage - Using appropriate mocking strategies + whenToUse: Use this mode when you need to write, modify, or maintain tests for the codebase. groups: - read - browser @@ -74,12 +54,7 @@ customModes: - Tests must be run from the same directory as the `package.json` file that specifies `vitest` in `devDependencies` - slug: design-engineer name: 🎨 Design Engineer - roleDefinition: >- - You are Roo, an expert Design Engineer focused on VSCode Extension development. Your expertise includes: - - Implementing UI designs with high fidelity using React, Shadcn, Tailwind and TypeScript. - - Ensuring interfaces are responsive and adapt to different screen sizes. - - Collaborating with team members to translate broad directives into robust and detailed designs capturing edge cases. - - Maintaining uniformity and consistency across the user interface. + roleDefinition: "You are Roo, an expert Design Engineer focused on VSCode Extension development. Your expertise includes: - Implementing UI designs with high fidelity using React, Shadcn, Tailwind and TypeScript. - Ensuring interfaces are responsive and adapt to different screen sizes. - Collaborating with team members to translate broad directives into robust and detailed designs capturing edge cases. - Maintaining uniformity and consistency across the user interface." groups: - read - - edit @@ -93,32 +68,12 @@ customModes: - slug: release-engineer name: 🚀 Release Engineer roleDefinition: You are Roo, a release engineer specialized in automating the release process for software projects. You have expertise in version control, changelogs, release notes, creating changesets, and coordinating with translation teams to ensure a smooth release process. - customInstructions: >- - When preparing a release: - 1. Identify the SHA corresponding to the most recent release using GitHub CLI: `gh release view --json tagName,targetCommitish,publishedAt ` - 2. Analyze changes since the last release using: `gh pr list --state merged --json number,title,author,url,mergedAt --limit 1000 -q '[.[] | select(.mergedAt > "TIMESTAMP") | {number, title, author: .author.login, url, mergedAt}] | sort_by(.number)'` - 3. Summarize the changes and ask the user whether this should be a major, minor, or patch release - 4. Create a changeset in .changeset/v[version].md instead of directly modifying package.json. The format is: - - ``` - --- - "roo-cline": patch|minor|major - --- - - [list of changes] - ``` - - - Always include contributor attribution using format: (thanks @username!) - - Provide brief descriptions of each item to explain the change - - Order the list from most important to least important - - Example: "- Add support for Gemini 2.5 Pro caching (thanks @contributor!)" - - CRITICAL: Include EVERY SINGLE PR in the changeset - don't assume you know which ones are important. Count the total PRs to verify completeness and cross-reference the list to ensure nothing is missed. - - 5. If a major or minor release, update the English version relevant announcement files and documentation (webview-ui/src/components/chat/Announcement.tsx, README.md, and the `latestAnnouncementId` in src/core/webview/ClineProvider.ts) - 6. Ask the user to confirm the English version - 7. Use the new_task tool to create a subtask in `translate` mode with detailed instructions of which content needs to be translated into all supported languages - 8. Commit and push the changeset file to the repository - 9. The GitHub Actions workflow will automatically: + customInstructions: |- + When preparing a release: 1. Identify the SHA corresponding to the most recent release using GitHub CLI: `gh release view --json tagName,targetCommitish,publishedAt ` 2. Analyze changes since the last release using: `gh pr list --state merged --json number,title,author,url,mergedAt --limit 1000 -q '[.[] | select(.mergedAt > "TIMESTAMP") | {number, title, author: .author.login, url, mergedAt}] | sort_by(.number)'` 3. Summarize the changes and ask the user whether this should be a major, minor, or patch release 4. Create a changeset in .changeset/v[version].md instead of directly modifying package.json. The format is: + ``` --- "roo-cline": patch|minor|major --- + [list of changes] ``` + - Always include contributor attribution using format: (thanks @username!) - Provide brief descriptions of each item to explain the change - Order the list from most important to least important - Example: "- Add support for Gemini 2.5 Pro caching (thanks @contributor!)" - CRITICAL: Include EVERY SINGLE PR in the changeset - don't assume you know which ones are important. Count the total PRs to verify completeness and cross-reference the list to ensure nothing is missed. + 5. If a major or minor release, update the English version relevant announcement files and documentation (webview-ui/src/components/chat/Announcement.tsx, README.md, and the `latestAnnouncementId` in src/core/webview/ClineProvider.ts) 6. Ask the user to confirm the English version 7. Use the new_task tool to create a subtask in `translate` mode with detailed instructions of which content needs to be translated into all supported languages 8. Commit and push the changeset file to the repository 9. The GitHub Actions workflow will automatically: - Create a version bump PR when changesets are merged to main - Update the CHANGELOG.md with proper formatting - Publish the release when the version bump PR is merged @@ -140,7 +95,7 @@ customModes: source: project - slug: issue-fixer name: 🔧 Issue Fixer - roleDefinition: >- + roleDefinition: |- You are a GitHub issue resolution specialist focused on fixing bugs and implementing feature requests from GitHub issues. Your expertise includes: - Analyzing GitHub issues to understand requirements and acceptance criteria - Exploring codebases to identify all affected files and dependencies @@ -148,30 +103,20 @@ customModes: - Building new features based on detailed proposals - Ensuring all acceptance criteria are met before completion - Creating pull requests with proper documentation - - Handling PR review feedback and implementing requested changes - - Making concise, human-sounding GitHub comments that focus on technical substance + - Using GitHub CLI for all GitHub operations - You work with issues from the RooCodeInc/Roo-Code repository, transforming them into working code that addresses all requirements while maintaining code quality and consistency. You also handle partial workflows for existing PRs when changes are requested by maintainers or users through the review process. - whenToUse: Use this mode when you have a GitHub issue (bug report or feature request) that needs to be fixed or implemented, OR when you need to address feedback on an existing pull request. Provide the issue number, PR number, or URL, and this mode will guide you through understanding the requirements, implementing the solution, and preparing for submission or updates. + You work with issues from any GitHub repository, transforming them into working code that addresses all requirements while maintaining code quality and consistency. You use the GitHub CLI (gh) for all GitHub operations instead of MCP tools. + whenToUse: Use this mode when you have a GitHub issue (bug report or feature request) that needs to be fixed or implemented. Provide the issue URL, and this mode will guide you through understanding the requirements, implementing the solution, and preparing for submission. groups: - read - edit - command - - mcp source: project - slug: issue-writer name: 📝 Issue Writer - roleDefinition: >- - You are Roo, a GitHub issue creation specialist focused on crafting well-structured, detailed issues based on the project's issue templates. Your expertise includes: - - Understanding and analyzing user requirements for bug reports and feature requests - - Exploring codebases thoroughly to gather relevant technical context - - Creating comprehensive GitHub issues following XML-based templates - - Ensuring issues contain all necessary information for developers - - Using GitHub MCP tools to create issues programmatically - - You work with two primary issue types: - - Bug Reports: Documenting reproducible bugs with clear steps and expected outcomes - - Feature Proposals: Creating detailed, actionable feature requests with clear problem statements, solutions, and acceptance criteria + roleDefinition: |- + You are Roo, a GitHub issue creation specialist focused on crafting well-structured, detailed issues based on the project's issue templates. Your expertise includes: - Understanding and analyzing user requirements for bug reports and feature requests - Exploring codebases thoroughly to gather relevant technical context - Creating comprehensive GitHub issues following XML-based templates - Ensuring issues contain all necessary information for developers - Using GitHub MCP tools to create issues programmatically + You work with two primary issue types: - Bug Reports: Documenting reproducible bugs with clear steps and expected outcomes - Feature Proposals: Creating detailed, actionable feature requests with clear problem statements, solutions, and acceptance criteria whenToUse: Use this mode when you need to create a GitHub issue for bug reports or feature requests. This mode will guide you through gathering all necessary information, exploring the codebase for context, and creating a well-structured issue in the RooCodeInc/Roo-Code repository. groups: - read @@ -180,27 +125,10 @@ customModes: source: project - slug: integration-tester name: 🧪 Integration Tester - roleDefinition: >- - You are Roo, an integration testing specialist focused on VSCode E2E tests with expertise in: - - Writing and maintaining integration tests using Mocha and VSCode Test framework - - Testing Roo Code API interactions and event-driven workflows - - Creating complex multi-step task scenarios and mode switching sequences - - Validating message formats, API responses, and event emission patterns - - Test data generation and fixture management - - Coverage analysis and test scenario identification - - Your focus is on ensuring comprehensive integration test coverage for the Roo Code extension, working primarily with: - - E2E test files in apps/vscode-e2e/src/suite/ - - Test utilities and helpers - - API type definitions in packages/types/ - - Extension API testing patterns - - You ensure integration tests are: - - Comprehensive and cover critical user workflows - - Following established Mocha TDD patterns - - Using async/await with proper timeout handling - - Validating both success and failure scenarios - - Properly typed with TypeScript + roleDefinition: |- + You are Roo, an integration testing specialist focused on VSCode E2E tests with expertise in: - Writing and maintaining integration tests using Mocha and VSCode Test framework - Testing Roo Code API interactions and event-driven workflows - Creating complex multi-step task scenarios and mode switching sequences - Validating message formats, API responses, and event emission patterns - Test data generation and fixture management - Coverage analysis and test scenario identification + Your focus is on ensuring comprehensive integration test coverage for the Roo Code extension, working primarily with: - E2E test files in apps/vscode-e2e/src/suite/ - Test utilities and helpers - API type definitions in packages/types/ - Extension API testing patterns + You ensure integration tests are: - Comprehensive and cover critical user workflows - Following established Mocha TDD patterns - Using async/await with proper timeout handling - Validating both success and failure scenarios - Properly typed with TypeScript groups: - read - command @@ -210,32 +138,36 @@ customModes: source: project - slug: pr-reviewer name: 🔍 PR Reviewer - roleDefinition: >- - You are Roo, a pull request reviewer specializing in code quality, structure, and translation consistency. Your expertise includes: - - Analyzing pull request diffs and understanding code changes in context - - Evaluating code quality, identifying code smells and technical debt - - Ensuring structural consistency across the codebase - - Verifying proper internationalization (i18n) for UI changes - - Providing constructive feedback with a friendly, curious tone - - Reviewing test coverage and quality without executing tests - - Identifying opportunities for code improvements and refactoring + roleDefinition: |- + You are Roo, a critical pull request review orchestrator specializing in code quality, architectural consistency, and codebase organization. Your expertise includes: + - Orchestrating comprehensive PR reviews by delegating specialized analysis tasks + - Analyzing pull request diffs with a critical eye for code organization and patterns + - Evaluating whether changes follow established codebase patterns and conventions + - Identifying redundant or duplicate code that already exists elsewhere + - Ensuring tests are properly organized with other similar tests + - Verifying that new features follow patterns established by similar existing features + - Detecting code smells, technical debt, and architectural inconsistencies + - Delegating deep codebase analysis to specialized modes when needed + - Maintaining context through structured report files in .roo/temp/pr-[number]/ + - Ensuring proper internationalization (i18n) for UI changes + - Providing direct, constructive feedback that improves code quality + - Being appropriately critical to maintain high code standards + - Using GitHub CLI when MCP tools are unavailable - You work primarily with the RooCodeInc/Roo-Code repository, using GitHub MCP tools to fetch and review pull requests. You check out PRs locally for better context understanding and focus on providing actionable, constructive feedback that helps improve code quality. - whenToUse: Use this mode to review pull requests on the Roo-Code GitHub repository or any other repository if specified by the user. + You work primarily with the RooCodeInc/Roo-Code repository, creating context reports to track findings and delegating complex pattern analysis to specialized modes while maintaining overall review coordination. When called by other modes (Issue Fixer, PR Fixer), you focus only on analysis without commenting on the PR. + whenToUse: Use this mode to critically review pull requests, focusing on code organization, pattern consistency, and identifying redundancy or architectural issues. This mode orchestrates complex analysis tasks while maintaining review context. groups: - read - - edit - - fileRegex: \.md$ - description: Markdown files only + - fileRegex: (\.md$|\.roo/temp/pr-.*\.(json|md|txt)$) + description: Markdown files and PR review context files - mcp - command source: project - slug: docs-extractor name: 📚 Docs Extractor - roleDefinition: >- - You are Roo, a comprehensive documentation extraction specialist focused on analyzing and documenting all technical and non-technical information about features and components within codebases. - whenToUse: >- - Use this mode when you need to extract comprehensive documentation about any feature, component, or aspect of a codebase. + roleDefinition: You are Roo, a comprehensive documentation extraction specialist focused on analyzing and documenting all technical and non-technical information about features and components within codebases. + whenToUse: Use this mode when you need to extract comprehensive documentation about any feature, component, or aspect of a codebase. groups: - read - - edit @@ -245,18 +177,34 @@ customModes: - mcp - slug: pr-fixer name: 🛠️ PR Fixer - roleDefinition: "You are Roo, a pull request resolution specialist. Your focus - is on addressing feedback and resolving issues within existing pull - requests. Your expertise includes: - Analyzing PR review comments to - understand required changes. - Checking CI/CD workflow statuses to - identify failing tests. - Fetching and analyzing test logs to diagnose - failures. - Identifying and resolving merge conflicts. - Guiding the user - through the resolution process." - whenToUse: Use this mode to fix pull requests. It can analyze PR feedback from - GitHub, check for failing tests, and help resolve merge conflicts before - applying the necessary code changes. + roleDefinition: "You are Roo, a pull request resolution specialist. Your focus is on addressing feedback and resolving issues within existing pull requests. Your expertise includes: - Analyzing PR review comments to understand required changes. - Checking CI/CD workflow statuses to identify failing tests. - Fetching and analyzing test logs to diagnose failures. - Identifying and resolving merge conflicts. - Guiding the user through the resolution process." + whenToUse: Use this mode to fix pull requests. It can analyze PR feedback from GitHub, check for failing tests, and help resolve merge conflicts before applying the necessary code changes. groups: - read - edit - command - mcp + - slug: issue-fixer-orchestrator + name: 🔧 Issue Fixer Orchestrator + roleDefinition: |- + You are an orchestrator for fixing GitHub issues. Your primary role is to coordinate a series of specialized subtasks to resolve an issue from start to finish. + **Your Orchestration Responsibilities:** - Delegate analysis, implementation, and testing to specialized subtasks using the `new_task` tool. - Manage the workflow and pass context between steps using temporary files. - Present plans, results, and pull requests to the user for approval at key milestones. + **Your Core Expertise Includes:** - Analyzing GitHub issues to understand requirements and acceptance criteria. - Exploring codebases to identify all affected files and dependencies. - Guiding the implementation of high-quality fixes and features. - Ensuring comprehensive test coverage. - Overseeing the creation of well-documented pull requests. - Using the GitHub CLI (gh) for all final GitHub operations like creating a pull request. + whenToUse: Use this mode to orchestrate the process of fixing a GitHub issue. Provide a GitHub issue URL, and this mode will coordinate a series of subtasks to analyze the issue, explore the code, create a plan, implement the solution, and prepare a pull request. + groups: + - read + - edit + - command + source: project + - slug: pr-fixer-orchestrator + name: 🛠️ PR Fixer Orchestrator + roleDefinition: |- + You are an orchestrator for fixing pull requests. Your primary role is to coordinate a series of specialized subtasks to resolve PR issues from start to finish, whether or not the PR has existing context from issue fixing. + **Your Orchestration Responsibilities:** - Delegate analysis, implementation, testing, and review to specialized subtasks using the `new_task` tool. - Manage the workflow and pass context between steps using temporary files. - Present findings, plans, and results to the user for approval at key milestones. - Ensure the PR branch is properly synced with main and ready for merge. + **Your Core Expertise Includes:** - Analyzing PR feedback, failing tests, and merge conflicts. - Understanding the underlying issue or feature being implemented. - Exploring codebases to identify all affected files and dependencies. - Understanding CI/CD pipeline failures and test results. - Coordinating code fixes based on review comments. - Managing git operations including rebases and conflict resolution. - Ensuring proper testing and validation of changes. - Overseeing PR review before final submission. - Using GitHub CLI (gh) for all GitHub operations. + whenToUse: Use this mode to orchestrate the process of fixing a pull request. Provide a GitHub PR URL or number, and this mode will coordinate a series of subtasks to analyze the PR issues, understand the underlying requirements, implement fixes, resolve conflicts, test changes, and ensure the PR is ready for merge. This mode works independently and does not require any pre-existing context files. + groups: + - read + - edit + - command + source: project diff --git a/CHANGELOG.md b/CHANGELOG.md index f5fe9aef4c1c..9a629487e0a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Roo Code Changelog +## [3.22.6] - 2025-07-02 + +- Add timer-based auto approve for follow up questions (thanks @liwilliam2021!) +- Add import/export modes functionality +- Add persistent version indicator on chat screen +- Add automatic configuration import on extension startup (thanks @takakoutso!) +- Add user-configurable search score threshold slider for semantic search (thanks @hannesrudolph!) +- Add default headers and testing for litellm fetcher (thanks @andrewshu2000!) +- Fix consistent cancellation error messages for thinking vs streaming phases +- Fix AWS Bedrock cross-region inference profile mapping (thanks @KevinZhao!) +- Fix URL loading timeout issues in @ mentions (thanks @MuriloFP!) +- Fix API retry exponential backoff capped at 10 minutes (thanks @MuriloFP!) +- Fix Qdrant URL field auto-filling with default value (thanks @SannidhyaSah!) +- Fix profile context condensation threshold (thanks @PaperBoardOfficial!) +- Fix apply_diff tool documentation for multi-file capabilities +- Fix cache files excluded from rules compilation (thanks @MuriloFP!) +- Add streamlined extension installation and documentation (thanks @devxpain!) +- Prevent Architect mode from providing time estimates +- Remove context size from environment details +- Change default mode to architect for new installations +- Suppress Mermaid error rendering +- Improve Mermaid buttons with light background in light mode (thanks @chrarnoldus!) +- Add .vscode/ to write-protected files/directories +- Update AWS Bedrock cross-region inference profile mapping (thanks @KevinZhao!) + ## [3.22.5] - 2025-06-28 - Remove Gemini CLI provider while we work with Google on a better integration diff --git a/README.md b/README.md index 48055236e62a..4d6e5cd9db9f 100644 --- a/README.md +++ b/README.md @@ -49,13 +49,13 @@ Check out the [CHANGELOG](CHANGELOG.md) for detailed updates and fixes. --- -## 🎉 Roo Code 3.21 Released +## 🎉 Roo Code 3.22 Released -Roo Code 3.21 brings major new features and improvements based on your feedback! +Roo Code 3.22 brings powerful new features and significant improvements to enhance your development workflow! -- **Roo Marketplace Launch** - The marketplace is now live! The marketplace is now live! Discover and install modes and MCPs easier than ever before. -- **Gemini 2.5 Models** - Added support for new Gemini 2.5 Pro, Flash, and Flash Lite models. -- **Excel File Support & More** - Added Excel (.xlsx) file support and numerous bug fixes and improvements! +- **1-Click Task Sharing** - Share your tasks instantly with colleagues and the community with a single click. +- **Global .roo Directory Support** - Load rules and configurations from a global .roo directory for consistent settings across projects. +- **Improved Architect to Code Transitions** - Seamless handoffs from planning in Architect mode to implementation in Code mode. --- @@ -138,22 +138,54 @@ pnpm install 3. **Run the extension**: -Press `F5` (or **Run** → **Start Debugging**) in VSCode to open a new window with Roo Code running. +There are several ways to run the Roo Code extension: -Changes to the webview will appear immediately. Changes to the core extension will require a restart of the extension host. +### Development Mode (F5) -Alternatively you can build a .vsix and install it directly in VSCode: +For active development, use VSCode's built-in debugging: -```sh -pnpm vsix -``` +Press `F5` (or go to **Run** → **Start Debugging**) in VSCode. This will open a new VSCode window with the Roo Code extension running. + +- Changes to the webview will appear immediately. +- Changes to the core extension will also hot reload automatically. + +### Automated VSIX Installation -A `.vsix` file will appear in the `bin/` directory which can be installed with: +To build and install the extension as a VSIX package directly into VSCode: ```sh -code --install-extension bin/roo-cline-.vsix +pnpm install:vsix [-y] [--editor=] ``` +This command will: + +- Ask which editor command to use (code/cursor/code-insiders) - defaults to 'code' +- Uninstall any existing version of the extension. +- Build the latest VSIX package. +- Install the newly built VSIX. +- Prompt you to restart VS Code for changes to take effect. + +Options: + +- `-y`: Skip all confirmation prompts and use defaults +- `--editor=`: Specify the editor command (e.g., `--editor=cursor` or `--editor=code-insiders`) + +### Manual VSIX Installation + +If you prefer to install the VSIX package manually: + +1. First, build the VSIX package: + ```sh + pnpm vsix + ``` +2. A `.vsix` file will be generated in the `bin/` directory (e.g., `bin/roo-cline-.vsix`). +3. Install it manually using the VSCode CLI: + ```sh + code --install-extension bin/roo-cline-.vsix + ``` + +--- + We use [changesets](https://github.com/changesets/changesets) for versioning and publishing. Check our `CHANGELOG.md` for release notes. --- @@ -176,41 +208,42 @@ Thanks to all our contributors who have helped make Roo Code better! -| mrubens
mrubens
| saoudrizwan
saoudrizwan
| cte
cte
| samhvw8
samhvw8
| daniel-lxs
daniel-lxs
| hannesrudolph
hannesrudolph
| -| :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| KJ7LNW
KJ7LNW
| a8trejo
a8trejo
| ColemanRoo
ColemanRoo
| canrobins13
canrobins13
| stea9499
stea9499
| joemanley201
joemanley201
| -| System233
System233
| jr
jr
| nissa-seru
nissa-seru
| jquanton
jquanton
| NyxJae
NyxJae
| MuriloFP
MuriloFP
| -| elianiva
elianiva
| d-oit
d-oit
| punkpeye
punkpeye
| wkordalski
wkordalski
| xyOz-dev
xyOz-dev
| feifei325
feifei325
| -| qdaxb
qdaxb
| zhangtony239
zhangtony239
| cannuri
cannuri
| monotykamary
monotykamary
| sachasayan
sachasayan
| Smartsheet-JB-Brown
Smartsheet-JB-Brown
| -| dtrugman
dtrugman
| lloydchang
lloydchang
| pugazhendhi-m
pugazhendhi-m
| shariqriazz
shariqriazz
| vigneshsubbiah16
vigneshsubbiah16
| chrarnoldus
chrarnoldus
| -| Szpadel
Szpadel
| lupuletic
lupuletic
| kiwina
kiwina
| Premshay
Premshay
| psv2522
psv2522
| olweraltuve
olweraltuve
| -| diarmidmackenzie
diarmidmackenzie
| PeterDaveHello
PeterDaveHello
| aheizi
aheizi
| hassoncs
hassoncs
| ChuKhaLi
ChuKhaLi
| nbihan-mediware
nbihan-mediware
| -| RaySinner
RaySinner
| afshawnlotfi
afshawnlotfi
| StevenTCramer
StevenTCramer
| SannidhyaSah
SannidhyaSah
| pdecat
pdecat
| noritaka1166
noritaka1166
| -| kyle-apex
kyle-apex
| emshvac
emshvac
| Lunchb0ne
Lunchb0ne
| SmartManoj
SmartManoj
| vagadiya
vagadiya
| slytechnical
slytechnical
| -| dleffel
dleffel
| arthurauffray
arthurauffray
| upamune
upamune
| NamesMT
NamesMT
| taylorwilsdon
taylorwilsdon
| sammcj
sammcj
| -| Ruakij
Ruakij
| p12tic
p12tic
| gtaylor
gtaylor
| aitoroses
aitoroses
| axkirillov
axkirillov
| ross
ross
| -| mr-ryan-james
mr-ryan-james
| heyseth
heyseth
| taisukeoe
taisukeoe
| avtc
avtc
| dlab-anton
dlab-anton
| eonghk
eonghk
| -| kcwhite
kcwhite
| ronyblum
ronyblum
| teddyOOXX
teddyOOXX
| vincentsong
vincentsong
| yongjer
yongjer
| zeozeozeo
zeozeozeo
| -| ashktn
ashktn
| franekp
franekp
| yt3trees
yt3trees
| benzntech
benzntech
| anton-otee
anton-otee
| bramburn
bramburn
| -| olearycrew
olearycrew
| brunobergher
brunobergher
| catrielmuller
catrielmuller
| snoyiatk
snoyiatk
| GitlyHallows
GitlyHallows
| jcbdev
jcbdev
| -| Chenjiayuan195
Chenjiayuan195
| julionav
julionav
| KanTakahiro
KanTakahiro
| SplittyDev
SplittyDev
| mdp
mdp
| napter
napter
| -| philfung
philfung
| dairui1
dairui1
| dqroid
dqroid
| forestyoo
forestyoo
| GOODBOY008
GOODBOY008
| hatsu38
hatsu38
| -| hongzio
hongzio
| im47cn
im47cn
| shoopapa
shoopapa
| jwcraig
jwcraig
| kinandan
kinandan
| nevermorec
nevermorec
| -| bannzai
bannzai
| axmo
axmo
| asychin
asychin
| amittell
amittell
| Yoshino-Yukitaro
Yoshino-Yukitaro
| Yikai-Liao
Yikai-Liao
| -| zxdvd
zxdvd
| vladstudio
vladstudio
| tmsjngx0
tmsjngx0
| tgfjt
tgfjt
| maekawataiki
maekawataiki
| PretzelVector
PretzelVector
| -| zetaloop
zetaloop
| cdlliuy
cdlliuy
| user202729
user202729
| student20880
student20880
| shohei-ihaya
shohei-ihaya
| shivamd1810
shivamd1810
| -| shaybc
shaybc
| seedlord
seedlord
| samir-nimbly
samir-nimbly
| robertheadley
robertheadley
| refactorthis
refactorthis
| qingyuan1109
qingyuan1109
| -| pokutuna
pokutuna
| philipnext
philipnext
| village-way
village-way
| oprstchn
oprstchn
| nobu007
nobu007
| mosleyit
mosleyit
| -| moqimoqidea
moqimoqidea
| mlopezr
mlopezr
| mecab
mecab
| olup
olup
| lightrabbit
lightrabbit
| kohii
kohii
| -| linegel
linegel
| edwin-truthsearch-io
edwin-truthsearch-io
| EamonNerbonne
EamonNerbonne
| dbasclpy
dbasclpy
| dflatline
dflatline
| Deon588
Deon588
| -| dleen
dleen
| devxpain
devxpain
| CW-B-W
CW-B-W
| chadgauth
chadgauth
| thecolorblue
thecolorblue
| bogdan0083
bogdan0083
| -| benashby
benashby
| Atlogit
Atlogit
| atlasgong
atlasgong
| andreastempsch
andreastempsch
| alasano
alasano
| QuinsZouls
QuinsZouls
| -| HadesArchitect
HadesArchitect
| alarno
alarno
| nexon33
nexon33
| adilhafeez
adilhafeez
| adamwlarson
adamwlarson
| adamhill
adamhill
| -| AMHesch
AMHesch
| AlexandruSmirnov
AlexandruSmirnov
| samsilveira
samsilveira
| 01Rian
01Rian
| RSO
RSO
| SECKainersdorfer
SECKainersdorfer
| -| R-omk
R-omk
| Sarke
Sarke
| OlegOAndreev
OlegOAndreev
| kvokka
kvokka
| ecmasx
ecmasx
| mollux
mollux
| -| marvijo-code
marvijo-code
| markijbema
markijbema
| mamertofabian
mamertofabian
| monkeyDluffy6017
monkeyDluffy6017
| libertyteeth
libertyteeth
| shtse8
shtse8
| -| Rexarrior
Rexarrior
| ksze
ksze
| Jdo300
Jdo300
| hesara
hesara
| DeXtroTip
DeXtroTip
| pfitz
pfitz
| -| ExactDoug
ExactDoug
| celestial-vault
celestial-vault
| | | | | +| mrubens
mrubens
| saoudrizwan
saoudrizwan
| cte
cte
| samhvw8
samhvw8
| daniel-lxs
daniel-lxs
| hannesrudolph
hannesrudolph
| +| :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| KJ7LNW
KJ7LNW
| a8trejo
a8trejo
| ColemanRoo
ColemanRoo
| canrobins13
canrobins13
| stea9499
stea9499
| joemanley201
joemanley201
| +| System233
System233
| jr
jr
| MuriloFP
MuriloFP
| nissa-seru
nissa-seru
| jquanton
jquanton
| NyxJae
NyxJae
| +| elianiva
elianiva
| d-oit
d-oit
| punkpeye
punkpeye
| wkordalski
wkordalski
| xyOz-dev
xyOz-dev
| qdaxb
qdaxb
| +| feifei325
feifei325
| zhangtony239
zhangtony239
| Smartsheet-JB-Brown
Smartsheet-JB-Brown
| monotykamary
monotykamary
| sachasayan
sachasayan
| cannuri
cannuri
| +| vigneshsubbiah16
vigneshsubbiah16
| shariqriazz
shariqriazz
| pugazhendhi-m
pugazhendhi-m
| lloydchang
lloydchang
| dtrugman
dtrugman
| chrarnoldus
chrarnoldus
| +| Szpadel
Szpadel
| diarmidmackenzie
diarmidmackenzie
| olweraltuve
olweraltuve
| psv2522
psv2522
| Premshay
Premshay
| kiwina
kiwina
| +| lupuletic
lupuletic
| aheizi
aheizi
| SannidhyaSah
SannidhyaSah
| PeterDaveHello
PeterDaveHello
| hassoncs
hassoncs
| ChuKhaLi
ChuKhaLi
| +| nbihan-mediware
nbihan-mediware
| RaySinner
RaySinner
| afshawnlotfi
afshawnlotfi
| dleffel
dleffel
| StevenTCramer
StevenTCramer
| pdecat
pdecat
| +| noritaka1166
noritaka1166
| kyle-apex
kyle-apex
| emshvac
emshvac
| Lunchb0ne
Lunchb0ne
| SmartManoj
SmartManoj
| vagadiya
vagadiya
| +| slytechnical
slytechnical
| arthurauffray
arthurauffray
| upamune
upamune
| NamesMT
NamesMT
| taylorwilsdon
taylorwilsdon
| sammcj
sammcj
| +| Ruakij
Ruakij
| p12tic
p12tic
| gtaylor
gtaylor
| aitoroses
aitoroses
| axkirillov
axkirillov
| ross
ross
| +| mr-ryan-james
mr-ryan-james
| heyseth
heyseth
| taisukeoe
taisukeoe
| liwilliam2021
liwilliam2021
| avtc
avtc
| dlab-anton
dlab-anton
| +| eonghk
eonghk
| kcwhite
kcwhite
| ronyblum
ronyblum
| teddyOOXX
teddyOOXX
| vincentsong
vincentsong
| yongjer
yongjer
| +| zeozeozeo
zeozeozeo
| ashktn
ashktn
| franekp
franekp
| yt3trees
yt3trees
| benzntech
benzntech
| anton-otee
anton-otee
| +| bramburn
bramburn
| olearycrew
olearycrew
| brunobergher
brunobergher
| catrielmuller
catrielmuller
| devxpain
devxpain
| snoyiatk
snoyiatk
| +| GitlyHallows
GitlyHallows
| jcbdev
jcbdev
| Chenjiayuan195
Chenjiayuan195
| julionav
julionav
| KanTakahiro
KanTakahiro
| SplittyDev
SplittyDev
| +| mdp
mdp
| napter
napter
| philfung
philfung
| dairui1
dairui1
| dqroid
dqroid
| forestyoo
forestyoo
| +| GOODBOY008
GOODBOY008
| hatsu38
hatsu38
| hongzio
hongzio
| im47cn
im47cn
| shoopapa
shoopapa
| jwcraig
jwcraig
| +| kinandan
kinandan
| nevermorec
nevermorec
| bannzai
bannzai
| axmo
axmo
| asychin
asychin
| amittell
amittell
| +| Yoshino-Yukitaro
Yoshino-Yukitaro
| Yikai-Liao
Yikai-Liao
| zxdvd
zxdvd
| vladstudio
vladstudio
| tmsjngx0
tmsjngx0
| tgfjt
tgfjt
| +| maekawataiki
maekawataiki
| AlexandruSmirnov
AlexandruSmirnov
| PretzelVector
PretzelVector
| zetaloop
zetaloop
| cdlliuy
cdlliuy
| user202729
user202729
| +| takakoutso
takakoutso
| student20880
student20880
| shohei-ihaya
shohei-ihaya
| shivamd1810
shivamd1810
| shaybc
shaybc
| seedlord
seedlord
| +| samir-nimbly
samir-nimbly
| robertheadley
robertheadley
| refactorthis
refactorthis
| qingyuan1109
qingyuan1109
| pokutuna
pokutuna
| philipnext
philipnext
| +| village-way
village-way
| oprstchn
oprstchn
| nobu007
nobu007
| mosleyit
mosleyit
| moqimoqidea
moqimoqidea
| mlopezr
mlopezr
| +| mecab
mecab
| olup
olup
| lightrabbit
lightrabbit
| kohii
kohii
| celestial-vault
celestial-vault
| linegel
linegel
| +| edwin-truthsearch-io
edwin-truthsearch-io
| EamonNerbonne
EamonNerbonne
| dbasclpy
dbasclpy
| dflatline
dflatline
| Deon588
Deon588
| dleen
dleen
| +| CW-B-W
CW-B-W
| chadgauth
chadgauth
| thecolorblue
thecolorblue
| bogdan0083
bogdan0083
| benashby
benashby
| Atlogit
Atlogit
| +| atlasgong
atlasgong
| andrewshu2000
andrewshu2000
| andreastempsch
andreastempsch
| alasano
alasano
| QuinsZouls
QuinsZouls
| HadesArchitect
HadesArchitect
| +| alarno
alarno
| nexon33
nexon33
| adilhafeez
adilhafeez
| adamwlarson
adamwlarson
| adamhill
adamhill
| AMHesch
AMHesch
| +| samsilveira
samsilveira
| 01Rian
01Rian
| RSO
RSO
| SECKainersdorfer
SECKainersdorfer
| R-omk
R-omk
| Sarke
Sarke
| +| PaperBoardOfficial
PaperBoardOfficial
| OlegOAndreev
OlegOAndreev
| kvokka
kvokka
| ecmasx
ecmasx
| mollux
mollux
| marvijo-code
marvijo-code
| +| markijbema
markijbema
| mamertofabian
mamertofabian
| monkeyDluffy6017
monkeyDluffy6017
| libertyteeth
libertyteeth
| shtse8
shtse8
| Rexarrior
Rexarrior
| +| KevinZhao
KevinZhao
| ksze
ksze
| Fovty
Fovty
| Jdo300
Jdo300
| hesara
hesara
| DeXtroTip
DeXtroTip
| +| pfitz
pfitz
| ExactDoug
ExactDoug
| | | | | diff --git a/apps/web-roo-code/src/app/enterprise/page.tsx b/apps/web-roo-code/src/app/enterprise/page.tsx index c89d2abd1191..d2c38fba0577 100644 --- a/apps/web-roo-code/src/app/enterprise/page.tsx +++ b/apps/web-roo-code/src/app/enterprise/page.tsx @@ -1,9 +1,10 @@ -import { Code, CheckCircle, Shield, Users, Zap, Workflow } from "lucide-react" +import { Code, CheckCircle, Shield, Users, Zap, Workflow, Lock } from "lucide-react" import { Button } from "@/components/ui" import { AnimatedText } from "@/components/animated-text" import { AnimatedBackground } from "@/components/homepage" import { ContactForm } from "@/components/enterprise/contact-form" +import { EXTERNAL_LINKS } from "@/lib/constants" export default async function Enterprise() { return ( @@ -385,6 +386,63 @@ export default async function Enterprise() {
+ {/* Security Hook Section */} +
+
+
+
+
+
+ +
+

Enterprise-Grade Security

+

+ Built with security-first principles to meet stringent enterprise requirements while + maintaining developer productivity. +

+
    +
  • + + SOC 2 Type I Certified with Type II in observation +
  • +
  • + + End-to-end encryption for all data transmission +
  • +
  • + + Security-first architecture with explicit permissions +
  • +
  • + + Complete audit trails and compliance reporting +
  • +
  • + + Open-source transparency for security verification +
  • +
+
+
+
+ +

Security-First Design

+

+ Every feature built with enterprise security requirements in mind +

+
+ +
+
+
+
+
+ {/* CTA Section */}
diff --git a/apps/web-roo-code/src/components/chromes/footer.tsx b/apps/web-roo-code/src/components/chromes/footer.tsx index 0d322f31e1fa..57d4c8ae8b46 100644 --- a/apps/web-roo-code/src/components/chromes/footer.tsx +++ b/apps/web-roo-code/src/components/chromes/footer.tsx @@ -118,6 +118,15 @@ export function Footer() { Enterprise +
  • + + Security + +
  • Enterprise + + Security + setIsMenuOpen(false)}> Enterprise + setIsMenuOpen(false)}> + Security + mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Llicència diff --git a/locales/de/README.md b/locales/de/README.md index 05fb7d3bd30c..2dbaaa436352 100644 --- a/locales/de/README.md +++ b/locales/de/README.md @@ -50,13 +50,13 @@ Sehen Sie sich das [CHANGELOG](../../CHANGELOG.md) für detaillierte Updates und --- -## 🎉 Roo Code 3.21 veröffentlicht +## 🎉 Roo Code 3.22 veröffentlicht -Roo Code 3.21 bringt wichtige neue Funktionen und Verbesserungen basierend auf eurem Feedback! +Roo Code 3.22 bringt mächtige neue Funktionen und bedeutende Verbesserungen, um deinen Entwicklungsworkflow zu verbessern! -- **Roo Marketplace Launch** - Der Marketplace ist jetzt live! Der Marketplace ist jetzt live! Entdecke und installiere Modi und MCPs einfacher als je zuvor. -- **Gemini 2.5 Modelle** - Unterstützung für neue Gemini 2.5 Pro, Flash und Flash Lite Modelle hinzugefügt. -- **Excel-Datei-Unterstützung & Mehr** - Excel (.xlsx) Datei-Unterstützung hinzugefügt und zahlreiche Fehlerbehebungen und Verbesserungen! +- **1-Klick-Aufgaben-Teilen** - Teile deine Aufgaben sofort mit Kollegen und der Community mit einem einzigen Klick. +- **Globale .roo-Verzeichnis-Unterstützung** - Lade Regeln und Konfigurationen aus einem globalen .roo-Verzeichnis für konsistente Einstellungen über Projekte hinweg. +- **Verbesserte Übergänge von Architekt zu Code** - Nahtlose Übergaben von der Planung im Architekten-Modus zur Implementierung im Code-Modus. --- @@ -184,38 +184,39 @@ Danke an alle unsere Mitwirkenden, die geholfen haben, Roo Code zu verbessern! |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Lizenz diff --git a/locales/es/README.md b/locales/es/README.md index cd652e745ab8..45942a0ee5b6 100644 --- a/locales/es/README.md +++ b/locales/es/README.md @@ -50,13 +50,13 @@ Consulta el [CHANGELOG](../../CHANGELOG.md) para ver actualizaciones detalladas --- -## 🎉 Roo Code 3.21 Lanzado +## 🎉 Roo Code 3.22 Lanzado -¡Roo Code 3.21 trae importantes nuevas funciones y mejoras basadas en vuestros comentarios! +¡Roo Code 3.22 trae nuevas funcionalidades poderosas y mejoras significativas para mejorar tu flujo de trabajo de desarrollo! -- **Lanzamiento del Marketplace de Roo** - ¡El marketplace ya está en funcionamiento! ¡El marketplace ya está en funcionamiento! Descubre e instala modos y MCPs más fácilmente que nunca. -- **Modelos Gemini 2.5** - Se ha añadido soporte para los nuevos modelos Gemini 2.5 Pro, Flash y Flash Lite. -- **Soporte de Archivos Excel y Más** - ¡Se ha añadido soporte para archivos Excel (.xlsx) y numerosas correcciones de errores y mejoras! +- **Compartir tareas con 1 clic** - Comparte tus tareas instantáneamente con colegas y la comunidad con un solo clic. +- **Soporte de directorio .roo global** - Carga reglas y configuraciones desde un directorio .roo global para configuraciones consistentes entre proyectos. +- **Transiciones mejoradas de Arquitecto a Código** - Transferencias fluidas desde la planificación en modo Arquitecto hasta la implementación en modo Código. --- @@ -184,38 +184,39 @@ Usamos [changesets](https://github.com/changesets/changesets) para versionar y p |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Licencia diff --git a/locales/fr/README.md b/locales/fr/README.md index 9c093562e2ae..cb7c02166be3 100644 --- a/locales/fr/README.md +++ b/locales/fr/README.md @@ -50,13 +50,13 @@ Consultez le [CHANGELOG](../../CHANGELOG.md) pour des mises à jour détaillées --- -## 🎉 Roo Code 3.21 est sorti +## 🎉 Roo Code 3.22 est sorti -Roo Code 3.21 apporte de nouvelles fonctionnalités majeures et des améliorations basées sur vos retours ! +Roo Code 3.22 apporte de puissantes nouvelles fonctionnalités et des améliorations significatives pour améliorer ton flux de travail de développement ! -- **Le marketplace est maintenant en ligne ! Le marketplace est maintenant en ligne !** Découvrez et installez des modes et des MCPs plus facilement que jamais. -- **Ajout du support pour les nouveaux modèles Gemini 2.5 Pro, Flash et Flash Lite.** -- **Support des Fichiers Excel et Plus !** - Support MCP amélioré, plus de contrôles Mermaid, support de réflexion dans Amazon Bedrock, et bien plus ! +- **Partage de tâches en 1 clic** - Partage tes tâches instantanément avec tes collègues et la communauté en un seul clic. +- **Support du répertoire .roo global** - Charge les règles et configurations depuis un répertoire .roo global pour des paramètres cohérents entre les projets. +- **Transitions améliorées d'Architecte vers Code** - Transferts fluides de la planification en mode Architecte vers l'implémentation en mode Code. --- @@ -184,38 +184,39 @@ Merci à tous nos contributeurs qui ont aidé à améliorer Roo Code ! |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Licence diff --git a/locales/hi/README.md b/locales/hi/README.md index d05baccf4837..38cfae33a483 100644 --- a/locales/hi/README.md +++ b/locales/hi/README.md @@ -184,38 +184,39 @@ Roo Code को बेहतर बनाने में मदद करने |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## लाइसेंस diff --git a/locales/id/README.md b/locales/id/README.md index 0903a209a572..2dfd3000fd89 100644 --- a/locales/id/README.md +++ b/locales/id/README.md @@ -178,38 +178,39 @@ Terima kasih kepada semua kontributor kami yang telah membantu membuat Roo Code |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## License diff --git a/locales/it/README.md b/locales/it/README.md index 5de5eb99374e..34f6d3d029d4 100644 --- a/locales/it/README.md +++ b/locales/it/README.md @@ -184,38 +184,39 @@ Grazie a tutti i nostri contributori che hanno aiutato a migliorare Roo Code! |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Licenza diff --git a/locales/ja/README.md b/locales/ja/README.md index 1d31dc9da710..07b12f17921f 100644 --- a/locales/ja/README.md +++ b/locales/ja/README.md @@ -50,13 +50,13 @@ --- -## 🎉 Roo Code 3.21 リリース +## 🎉 Roo Code 3.22 リリース -Roo Code 3.21は、皆様のフィードバックに基づく新しい主要機能と改善をもたらします! +Roo Code 3.22は、開発ワークフローを向上させる強力な新機能と重要な改善をもたらします! -- **マーケットプレイスが稼働開始!マーケットプレイスが稼働開始!** これまで以上に簡単にモードとMCPを発見してインストールできます。 -- **新しいGemini 2.5 Pro、Flash、Flash Liteモデルのサポートを追加。** -- **Excel ファイルサポートなど** - MCP サポートの向上、Mermaid制御の追加、Amazon Bedrock thinking サポート、その他多数! +- **1クリックタスク共有** - 同僚やコミュニティとタスクを1クリックで瞬時に共有できます。 +- **グローバル.rooディレクトリサポート** - プロジェクト間で一貫した設定のためにグローバル.rooディレクトリからルールと設定を読み込みます。 +- **改善されたアーキテクトからコードへの移行** - アーキテクトモードでの計画からコードモードでの実装へのシームレスな引き継ぎ。 --- @@ -184,38 +184,39 @@ Roo Codeの改善に貢献してくれたすべての貢献者に感謝します |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## ライセンス diff --git a/locales/ko/README.md b/locales/ko/README.md index d03fd157b397..8d9e0381c147 100644 --- a/locales/ko/README.md +++ b/locales/ko/README.md @@ -50,13 +50,13 @@ --- -## 🎉 Roo Code 3.21 출시 +## 🎉 Roo Code 3.22 출시 -Roo Code 3.21이 여러분의 피드백을 바탕으로 한 새로운 주요 기능과 개선사항을 제공합니다! +Roo Code 3.22가 개발 워크플로우를 향상시키는 강력한 새 기능과 중요한 개선사항을 제공합니다! -- **마켓플레이스가 이제 라이브입니다! 마켓플레이스가 이제 라이브입니다!** 그 어느 때보다 쉽게 모드와 MCP를 발견하고 설치하세요. -- **새로운 Gemini 2.5 Pro, Flash, Flash Lite 모델 지원을 추가했습니다.** -- **Excel 파일 지원 등** - 향상된 MCP 지원, 더 많은 Mermaid 제어, Amazon Bedrock thinking 지원 등! +- **1클릭 작업 공유** - 동료와 커뮤니티에 한 번의 클릭으로 즉시 작업을 공유하세요. +- **글로벌 .roo 디렉토리 지원** - 프로젝트 간 일관된 설정을 위해 글로벌 .roo 디렉토리에서 규칙과 구성을 로드합니다. +- **개선된 아키텍트에서 코드로의 전환** - 아키텍트 모드에서의 계획부터 코드 모드에서의 구현까지 원활한 인수인계. --- @@ -184,38 +184,39 @@ Roo Code를 더 좋게 만드는 데 도움을 준 모든 기여자에게 감사 |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## 라이선스 diff --git a/locales/nl/README.md b/locales/nl/README.md index 218c12b725fd..b083ddf1b189 100644 --- a/locales/nl/README.md +++ b/locales/nl/README.md @@ -50,13 +50,13 @@ Bekijk de [CHANGELOG](../../CHANGELOG.md) voor gedetailleerde updates en fixes. --- -## 🎉 Roo Code 3.21 Uitgebracht +## 🎉 Roo Code 3.22 Uitgebracht -Roo Code 3.21 brengt krachtige nieuwe functies en verbeteringen op basis van jullie feedback! +Roo Code 3.22 brengt krachtige nieuwe functies en significante verbeteringen om je ontwikkelingsworkflow te verbeteren! -- **De marketplace is nu live! De marketplace is nu live!** Ontdek en installeer modi en MCP's eenvoudiger dan ooit tevoren. -- **Ondersteuning toegevoegd voor nieuwe Gemini 2.5 Pro, Flash en Flash Lite modellen.** -- **Excel Bestandsondersteuning & Meer** - Verbeterde Mermaid-controls voor betere diagramvisualisatie en nieuwe Amazon Bedrock thinking-ondersteuning voor meer geavanceerde AI-interacties! +- **1-Klik Taak Delen** - Deel je taken direct met collega's en de gemeenschap met een enkele klik. +- **Globale .roo Directory Ondersteuning** - Laad regels en configuraties vanuit een globale .roo directory voor consistente instellingen tussen projecten. +- **Verbeterde Architect naar Code Overgangen** - Naadloze overdrachten van planning in Architect-modus naar implementatie in Code-modus. --- @@ -184,38 +184,39 @@ Dank aan alle bijdragers die Roo Code beter hebben gemaakt! |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Licentie diff --git a/locales/pl/README.md b/locales/pl/README.md index 023f27cb15e4..86be4bc8461b 100644 --- a/locales/pl/README.md +++ b/locales/pl/README.md @@ -50,13 +50,13 @@ Sprawdź [CHANGELOG](../../CHANGELOG.md), aby uzyskać szczegółowe informacje --- -## 🎉 Roo Code 3.21 został wydany +## 🎉 Roo Code 3.22 został wydany -Roo Code 3.21 wprowadza potężne nowe funkcje i usprawnienia na podstawie opinii użytkowników! +Roo Code 3.22 wprowadza potężne nowe funkcje i znaczące usprawnienia, aby ulepszyć Twój przepływ pracy deweloperskiej! -- **Marketplace jest teraz na żywo! Marketplace jest teraz na żywo!** Odkrywaj i instaluj tryby oraz MCP łatwiej niż kiedykolwiek wcześniej. -- **Dodano wsparcie dla nowych modeli Gemini 2.5 Pro, Flash i Flash Lite.** -- **Wsparcie plików Excel i więcej** - Ulepszone kontrolki Mermaid dla lepszej wizualizacji diagramów oraz nowe wsparcie Amazon Bedrock thinking dla bardziej zaawansowanych interakcji z AI! +- **Udostępnianie zadań jednym kliknięciem** - Udostępniaj swoje zadania natychmiast współpracownikom i społeczności jednym kliknięciem. +- **Wsparcie globalnego katalogu .roo** - Ładuj zasady i konfiguracje z globalnego katalogu .roo dla spójnych ustawień między projektami. +- **Ulepszone przejścia z Architekta do Kodu** - Płynne przekazania od planowania w trybie Architekta do implementacji w trybie Kodu. --- @@ -184,38 +184,39 @@ Dziękujemy wszystkim naszym współtwórcom, którzy pomogli ulepszyć Roo Code |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Licencja diff --git a/locales/pt-BR/README.md b/locales/pt-BR/README.md index 74d4d36a4344..ffcdd994d321 100644 --- a/locales/pt-BR/README.md +++ b/locales/pt-BR/README.md @@ -50,13 +50,13 @@ Confira o [CHANGELOG](../../CHANGELOG.md) para atualizações e correções deta --- -## 🎉 Roo Code 3.21 foi lançado +## 🎉 Roo Code 3.22 foi lançado -O Roo Code 3.21 introduz novos recursos poderosos e melhorias baseadas no feedback dos usuários! +O Roo Code 3.22 traz novos recursos poderosos e melhorias significativas para aprimorar seu fluxo de trabalho de desenvolvimento! -- **O marketplace está agora disponível! O marketplace está agora disponível!** Descubra e instale modos e MCPs mais facilmente do que nunca. -- **Adicionado suporte para os novos modelos Gemini 2.5 Pro, Flash e Flash Lite.** -- **Suporte a Arquivos Excel e Mais** - Controles Mermaid aprimorados para melhor visualização de diagramas e novo suporte Amazon Bedrock thinking para interações de IA mais avançadas! +- **Compartilhamento de Tarefas com 1 Clique** - Compartilhe suas tarefas instantaneamente com colegas e a comunidade com um único clique. +- **Suporte a Diretório .roo Global** - Carregue regras e configurações de um diretório .roo global para configurações consistentes entre projetos. +- **Transições Aprimoradas de Arquiteto para Código** - Transferências perfeitas do planejamento no modo Arquiteto para implementação no modo Código. --- @@ -184,38 +184,39 @@ Obrigado a todos os nossos contribuidores que ajudaram a tornar o Roo Code melho |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Licença diff --git a/locales/ru/README.md b/locales/ru/README.md index 3e9133cb9752..35250c85ec56 100644 --- a/locales/ru/README.md +++ b/locales/ru/README.md @@ -50,13 +50,13 @@ --- -## 🎉 Выпущен Roo Code 3.21 +## 🎉 Выпущен Roo Code 3.22 -Roo Code 3.21 представляет экспериментальный маркетплейс и улучшения файловых операций! +Roo Code 3.22 представляет мощные новые функции и значительные улучшения для повышения эффективности вашего рабочего процесса разработки! -- **Маркетплейс теперь доступен! Маркетплейс теперь доступен!** Открывайте и устанавливайте режимы и MCP проще, чем когда-либо. -- **Добавлена поддержка новых моделей Gemini 2.5 Pro, Flash и Flash Lite.** -- **Поддержка файлов Excel и многое другое!** - Новые элементы управления Mermaid и поддержка мышления Amazon Bedrock для расширенных возможностей MCP. +- **Обмен задачами в 1 клик** - Мгновенно делитесь своими задачами с коллегами и сообществом одним кликом. +- **Поддержка глобального каталога .roo** - Загружайте правила и конфигурации из глобального каталога .roo для согласованных настроек между проектами. +- **Улучшенные переходы от Архитектора к Коду** - Плавные передачи от планирования в режиме Архитектора к реализации в режиме Кода. --- @@ -184,38 +184,39 @@ code --install-extension bin/roo-cline-.vsix |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Лицензия diff --git a/locales/tr/README.md b/locales/tr/README.md index cc26916c3229..ae52f4aeecc2 100644 --- a/locales/tr/README.md +++ b/locales/tr/README.md @@ -50,13 +50,13 @@ Detaylı güncellemeler ve düzeltmeler için [CHANGELOG](../../CHANGELOG.md) do --- -## 🎉 Roo Code 3.21 Yayınlandı +## 🎉 Roo Code 3.22 Yayınlandı -Roo Code 3.21 deneysel pazar yeri ve gelişmiş dosya işlemleri sunuyor! +Roo Code 3.22 geliştirme iş akışınızı geliştirmek için güçlü yeni özellikler ve önemli iyileştirmeler getiriyor! -- **Pazar yeri artık canlı! Pazar yeri artık canlı!** Modları ve MCP'leri her zamankinden daha kolay keşfedin ve kurun. -- **Yeni Gemini 2.5 Pro, Flash ve Flash Lite modelleri için destek eklendi.** -- **Excel Dosya Desteği ve Daha Fazlası!** - Gelişmiş MCP yetenekleri için yeni Mermaid kontrolleri ve Amazon Bedrock düşünce desteği. +- **1-Tık Görev Paylaşımı** - Görevlerinizi tek tıkla meslektaşlarınız ve toplulukla anında paylaşın. +- **Global .roo Dizin Desteği** - Projeler arası tutarlı ayarlar için global .roo dizininden kuralları ve konfigürasyonları yükleyin. +- **Gelişmiş Mimar'dan Kod'a Geçişler** - Mimar modunda planlamadan Kod modunda uygulamaya sorunsuz aktarımlar. --- @@ -184,38 +184,39 @@ Roo Code'u daha iyi hale getirmeye yardımcı olan tüm katkıda bulunanlara te |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Lisans diff --git a/locales/vi/README.md b/locales/vi/README.md index 3f21cb272910..9bc0a1a0a30f 100644 --- a/locales/vi/README.md +++ b/locales/vi/README.md @@ -50,13 +50,13 @@ Kiểm tra [CHANGELOG](../../CHANGELOG.md) để biết thông tin chi tiết v --- -## 🎉 Đã Phát Hành Roo Code 3.21 +## 🎉 Đã Phát Hành Roo Code 3.22 -Roo Code 3.21 giới thiệu marketplace thử nghiệm và cải tiến các thao tác tập tin! +Roo Code 3.22 mang đến những tính năng mới mạnh mẽ và cải tiến đáng kể để nâng cao quy trình phát triển của bạn! -- **Marketplace hiện đã hoạt động! Marketplace hiện đã hoạt động!** Khám phá và cài đặt các chế độ và MCP dễ dàng hơn bao giờ hết. -- **Đã thêm hỗ trợ cho các mô hình Gemini 2.5 Pro, Flash và Flash Lite mới.** -- **Hỗ trợ tập tin Excel và nhiều hơn nữa!** - Các điều khiển Mermaid mới và hỗ trợ suy nghĩ Amazon Bedrock cho khả năng MCP nâng cao. +- **Chia Sẻ Tác Vụ 1-Cú Nhấp** - Chia sẻ tác vụ của bạn ngay lập tức với đồng nghiệp và cộng đồng chỉ bằng một cú nhấp. +- **Hỗ Trợ Thư Mục .roo Toàn Cục** - Tải quy tắc và cấu hình từ thư mục .roo toàn cục để có cài đặt nhất quán giữa các dự án. +- **Cải Thiện Chuyển Đổi từ Architect sang Code** - Chuyển giao liền mạch từ lập kế hoạch trong chế độ Architect sang triển khai trong chế độ Code. --- @@ -184,38 +184,39 @@ Cảm ơn tất cả những người đóng góp đã giúp cải thiện Roo C |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## Giấy Phép diff --git a/locales/zh-CN/README.md b/locales/zh-CN/README.md index b6eb4e8ab543..c8e06ba65bb5 100644 --- a/locales/zh-CN/README.md +++ b/locales/zh-CN/README.md @@ -50,13 +50,13 @@ --- -## 🎉 Roo Code 3.21 已发布 +## 🎉 Roo Code 3.22 已发布 -Roo Code 3.21 根据您的反馈带来重要的新功能和改进! +Roo Code 3.22 带来强大的新功能和重大改进,提升您的开发工作流程! -- **市场现已上线!市场现已上线!** 从新市场发现和安装模式和 MCP 比以往更容易(在实验性设置中启用)。 -- **新增 Gemini 2.5 Pro、Flash 和 Flash Lite 模型支持。** 多个并发文件写入现在在实验性设置中可用,多个并发读取已从实验性功能毕业,现在位于上下文设置中。 -- **Excel 文件支持及更多功能!** - 增强的 MCP 支持、更多 Mermaid 控件、Amazon Bedrock 中的思考支持,以及更多功能! +- **一键任务分享** - 一键即可与同事和社区分享您的任务。 +- **全局 .roo 目录支持** - 从全局 .roo 目录加载规则和配置,确保项目间设置一致。 +- **改进的架构师到代码模式转换** - 从架构师模式的规划到代码模式的实现,实现无缝交接。 --- @@ -184,38 +184,39 @@ code --install-extension bin/roo-cline-.vsix |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## 许可证 diff --git a/locales/zh-TW/README.md b/locales/zh-TW/README.md index cc4c2f67c70b..770d0303a517 100644 --- a/locales/zh-TW/README.md +++ b/locales/zh-TW/README.md @@ -51,13 +51,13 @@ --- -## 🎉 Roo Code 3.21 已發布 +## 🎉 Roo Code 3.22 已發布 -Roo Code 3.21 推出實驗性市集和檔案操作改進! +Roo Code 3.22 帶來強大的新功能和重大改進,以提升您的開發工作流程! -- **市集現已上線!市集現已上線!** 從新市集探索並安裝模式和 MCP 比以往更容易(在實驗性設定中啟用)。 -- **新增 Gemini 2.5 Pro、Flash 和 Flash Lite 模型支援。** 多重同時檔案寫入現在可在實驗性設定中使用,多重同時讀取已移至上下文設定。 -- **Excel 檔案支援及更多功能!** - 新的 Mermaid 控制項和 Amazon Bedrock 思考支援,提供增強的 MCP 功能。 +- **一鍵任務分享** - 只需一鍵即可立即與同事和社群分享您的任務。 +- **全域 .roo 目錄支援** - 從全域 .roo 目錄載入規則和設定,確保專案間設定的一致性。 +- **改進的架構師到程式碼轉換** - 從架構師模式的規劃到程式碼模式的實作,實現無縫交接。 --- @@ -185,38 +185,39 @@ code --install-extension bin/roo-cline-.vsix |mrubens
    mrubens
    |saoudrizwan
    saoudrizwan
    |cte
    cte
    |samhvw8
    samhvw8
    |daniel-lxs
    daniel-lxs
    |hannesrudolph
    hannesrudolph
    | |:---:|:---:|:---:|:---:|:---:|:---:| |KJ7LNW
    KJ7LNW
    |a8trejo
    a8trejo
    |ColemanRoo
    ColemanRoo
    |canrobins13
    canrobins13
    |stea9499
    stea9499
    |joemanley201
    joemanley201
    | -|System233
    System233
    |jr
    jr
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    |MuriloFP
    MuriloFP
    | -|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |feifei325
    feifei325
    | -|qdaxb
    qdaxb
    |zhangtony239
    zhangtony239
    |cannuri
    cannuri
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    | -|dtrugman
    dtrugman
    |lloydchang
    lloydchang
    |pugazhendhi-m
    pugazhendhi-m
    |shariqriazz
    shariqriazz
    |vigneshsubbiah16
    vigneshsubbiah16
    |chrarnoldus
    chrarnoldus
    | -|Szpadel
    Szpadel
    |lupuletic
    lupuletic
    |kiwina
    kiwina
    |Premshay
    Premshay
    |psv2522
    psv2522
    |olweraltuve
    olweraltuve
    | -|diarmidmackenzie
    diarmidmackenzie
    |PeterDaveHello
    PeterDaveHello
    |aheizi
    aheizi
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    |nbihan-mediware
    nbihan-mediware
    | -|RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |StevenTCramer
    StevenTCramer
    |SannidhyaSah
    SannidhyaSah
    |pdecat
    pdecat
    |noritaka1166
    noritaka1166
    | -|kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    |slytechnical
    slytechnical
    | -|dleffel
    dleffel
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | +|System233
    System233
    |jr
    jr
    |MuriloFP
    MuriloFP
    |nissa-seru
    nissa-seru
    |jquanton
    jquanton
    |NyxJae
    NyxJae
    | +|elianiva
    elianiva
    |d-oit
    d-oit
    |punkpeye
    punkpeye
    |wkordalski
    wkordalski
    |xyOz-dev
    xyOz-dev
    |qdaxb
    qdaxb
    | +|feifei325
    feifei325
    |zhangtony239
    zhangtony239
    |Smartsheet-JB-Brown
    Smartsheet-JB-Brown
    |monotykamary
    monotykamary
    |sachasayan
    sachasayan
    |cannuri
    cannuri
    | +|vigneshsubbiah16
    vigneshsubbiah16
    |shariqriazz
    shariqriazz
    |pugazhendhi-m
    pugazhendhi-m
    |lloydchang
    lloydchang
    |dtrugman
    dtrugman
    |chrarnoldus
    chrarnoldus
    | +|Szpadel
    Szpadel
    |diarmidmackenzie
    diarmidmackenzie
    |olweraltuve
    olweraltuve
    |psv2522
    psv2522
    |Premshay
    Premshay
    |kiwina
    kiwina
    | +|lupuletic
    lupuletic
    |aheizi
    aheizi
    |SannidhyaSah
    SannidhyaSah
    |PeterDaveHello
    PeterDaveHello
    |hassoncs
    hassoncs
    |ChuKhaLi
    ChuKhaLi
    | +|nbihan-mediware
    nbihan-mediware
    |RaySinner
    RaySinner
    |afshawnlotfi
    afshawnlotfi
    |dleffel
    dleffel
    |StevenTCramer
    StevenTCramer
    |pdecat
    pdecat
    | +|noritaka1166
    noritaka1166
    |kyle-apex
    kyle-apex
    |emshvac
    emshvac
    |Lunchb0ne
    Lunchb0ne
    |SmartManoj
    SmartManoj
    |vagadiya
    vagadiya
    | +|slytechnical
    slytechnical
    |arthurauffray
    arthurauffray
    |upamune
    upamune
    |NamesMT
    NamesMT
    |taylorwilsdon
    taylorwilsdon
    |sammcj
    sammcj
    | |Ruakij
    Ruakij
    |p12tic
    p12tic
    |gtaylor
    gtaylor
    |aitoroses
    aitoroses
    |axkirillov
    axkirillov
    |ross
    ross
    | -|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    |eonghk
    eonghk
    | -|kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    |zeozeozeo
    zeozeozeo
    | -|ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    |bramburn
    bramburn
    | -|olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |snoyiatk
    snoyiatk
    |GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    | -|Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    |mdp
    mdp
    |napter
    napter
    | -|philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    |GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    | -|hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    |kinandan
    kinandan
    |nevermorec
    nevermorec
    | -|bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    |Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    | -|zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    |maekawataiki
    maekawataiki
    |PretzelVector
    PretzelVector
    | -|zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    | -|shaybc
    shaybc
    |seedlord
    seedlord
    |samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    | -|pokutuna
    pokutuna
    |philipnext
    philipnext
    |village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    | -|moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    |mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    | -|linegel
    linegel
    |edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    | -|dleen
    dleen
    |devxpain
    devxpain
    |CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    | -|benashby
    benashby
    |Atlogit
    Atlogit
    |atlasgong
    atlasgong
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    | -|HadesArchitect
    HadesArchitect
    |alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    | -|AMHesch
    AMHesch
    |AlexandruSmirnov
    AlexandruSmirnov
    |samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    | -|R-omk
    R-omk
    |Sarke
    Sarke
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    | -|marvijo-code
    marvijo-code
    |markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    | -|Rexarrior
    Rexarrior
    |ksze
    ksze
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    |pfitz
    pfitz
    | -|ExactDoug
    ExactDoug
    |celestial-vault
    celestial-vault
    | | | | | +|mr-ryan-james
    mr-ryan-james
    |heyseth
    heyseth
    |taisukeoe
    taisukeoe
    |liwilliam2021
    liwilliam2021
    |avtc
    avtc
    |dlab-anton
    dlab-anton
    | +|eonghk
    eonghk
    |kcwhite
    kcwhite
    |ronyblum
    ronyblum
    |teddyOOXX
    teddyOOXX
    |vincentsong
    vincentsong
    |yongjer
    yongjer
    | +|zeozeozeo
    zeozeozeo
    |ashktn
    ashktn
    |franekp
    franekp
    |yt3trees
    yt3trees
    |benzntech
    benzntech
    |anton-otee
    anton-otee
    | +|bramburn
    bramburn
    |olearycrew
    olearycrew
    |brunobergher
    brunobergher
    |catrielmuller
    catrielmuller
    |devxpain
    devxpain
    |snoyiatk
    snoyiatk
    | +|GitlyHallows
    GitlyHallows
    |jcbdev
    jcbdev
    |Chenjiayuan195
    Chenjiayuan195
    |julionav
    julionav
    |KanTakahiro
    KanTakahiro
    |SplittyDev
    SplittyDev
    | +|mdp
    mdp
    |napter
    napter
    |philfung
    philfung
    |dairui1
    dairui1
    |dqroid
    dqroid
    |forestyoo
    forestyoo
    | +|GOODBOY008
    GOODBOY008
    |hatsu38
    hatsu38
    |hongzio
    hongzio
    |im47cn
    im47cn
    |shoopapa
    shoopapa
    |jwcraig
    jwcraig
    | +|kinandan
    kinandan
    |nevermorec
    nevermorec
    |bannzai
    bannzai
    |axmo
    axmo
    |asychin
    asychin
    |amittell
    amittell
    | +|Yoshino-Yukitaro
    Yoshino-Yukitaro
    |Yikai-Liao
    Yikai-Liao
    |zxdvd
    zxdvd
    |vladstudio
    vladstudio
    |tmsjngx0
    tmsjngx0
    |tgfjt
    tgfjt
    | +|maekawataiki
    maekawataiki
    |AlexandruSmirnov
    AlexandruSmirnov
    |PretzelVector
    PretzelVector
    |zetaloop
    zetaloop
    |cdlliuy
    cdlliuy
    |user202729
    user202729
    | +|takakoutso
    takakoutso
    |student20880
    student20880
    |shohei-ihaya
    shohei-ihaya
    |shivamd1810
    shivamd1810
    |shaybc
    shaybc
    |seedlord
    seedlord
    | +|samir-nimbly
    samir-nimbly
    |robertheadley
    robertheadley
    |refactorthis
    refactorthis
    |qingyuan1109
    qingyuan1109
    |pokutuna
    pokutuna
    |philipnext
    philipnext
    | +|village-way
    village-way
    |oprstchn
    oprstchn
    |nobu007
    nobu007
    |mosleyit
    mosleyit
    |moqimoqidea
    moqimoqidea
    |mlopezr
    mlopezr
    | +|mecab
    mecab
    |olup
    olup
    |lightrabbit
    lightrabbit
    |kohii
    kohii
    |celestial-vault
    celestial-vault
    |linegel
    linegel
    | +|edwin-truthsearch-io
    edwin-truthsearch-io
    |EamonNerbonne
    EamonNerbonne
    |dbasclpy
    dbasclpy
    |dflatline
    dflatline
    |Deon588
    Deon588
    |dleen
    dleen
    | +|CW-B-W
    CW-B-W
    |chadgauth
    chadgauth
    |thecolorblue
    thecolorblue
    |bogdan0083
    bogdan0083
    |benashby
    benashby
    |Atlogit
    Atlogit
    | +|atlasgong
    atlasgong
    |andrewshu2000
    andrewshu2000
    |andreastempsch
    andreastempsch
    |alasano
    alasano
    |QuinsZouls
    QuinsZouls
    |HadesArchitect
    HadesArchitect
    | +|alarno
    alarno
    |nexon33
    nexon33
    |adilhafeez
    adilhafeez
    |adamwlarson
    adamwlarson
    |adamhill
    adamhill
    |AMHesch
    AMHesch
    | +|samsilveira
    samsilveira
    |01Rian
    01Rian
    |RSO
    RSO
    |SECKainersdorfer
    SECKainersdorfer
    |R-omk
    R-omk
    |Sarke
    Sarke
    | +|PaperBoardOfficial
    PaperBoardOfficial
    |OlegOAndreev
    OlegOAndreev
    |kvokka
    kvokka
    |ecmasx
    ecmasx
    |mollux
    mollux
    |marvijo-code
    marvijo-code
    | +|markijbema
    markijbema
    |mamertofabian
    mamertofabian
    |monkeyDluffy6017
    monkeyDluffy6017
    |libertyteeth
    libertyteeth
    |shtse8
    shtse8
    |Rexarrior
    Rexarrior
    | +|KevinZhao
    KevinZhao
    |ksze
    ksze
    |Fovty
    Fovty
    |Jdo300
    Jdo300
    |hesara
    hesara
    |DeXtroTip
    DeXtroTip
    | +|pfitz
    pfitz
    |ExactDoug
    ExactDoug
    | | | | | ## 授權 diff --git a/package.json b/package.json index 074189502952..61f1f6cdaf91 100644 --- a/package.json +++ b/package.json @@ -19,6 +19,7 @@ "vsix": "turbo vsix --log-order grouped --output-logs new-only", "vsix:nightly": "turbo vsix:nightly --log-order grouped --output-logs new-only", "clean": "turbo clean --log-order grouped --output-logs new-only && rimraf dist out bin .vite-port .turbo", + "install:vsix": "pnpm install --frozen-lockfile && pnpm clean && pnpm vsix && node scripts/install-vsix.js", "changeset:version": "cp CHANGELOG.md src/CHANGELOG.md && changeset version && cp -vf src/CHANGELOG.md .", "knip": "knip --include files", "update-contributors": "node scripts/update-contributors.js", diff --git a/packages/cloud/src/CloudService.ts b/packages/cloud/src/CloudService.ts index 12aafee9dcc2..32ea443cd6f8 100644 --- a/packages/cloud/src/CloudService.ts +++ b/packages/cloud/src/CloudService.ts @@ -10,8 +10,11 @@ import type { import { TelemetryService } from "@roo-code/telemetry" import { CloudServiceCallbacks } from "./types" -import { AuthService } from "./AuthService" -import { SettingsService } from "./SettingsService" +import type { AuthService } from "./auth" +import { WebAuthService, StaticTokenAuthService } from "./auth" +import type { SettingsService } from "./SettingsService" +import { CloudSettingsService } from "./CloudSettingsService" +import { StaticSettingsService } from "./StaticSettingsService" import { TelemetryClient } from "./TelemetryClient" import { ShareService, TaskNotFoundError } from "./ShareService" @@ -43,7 +46,13 @@ export class CloudService { } try { - this.authService = new AuthService(this.context, this.log) + const cloudToken = process.env.ROO_CODE_CLOUD_TOKEN + if (cloudToken && cloudToken.length > 0) { + this.authService = new StaticTokenAuthService(this.context, cloudToken, this.log) + } else { + this.authService = new WebAuthService(this.context, this.log) + } + await this.authService.initialize() this.authService.on("attempting-session", this.authListener) @@ -52,13 +61,20 @@ export class CloudService { this.authService.on("logged-out", this.authListener) this.authService.on("user-info", this.authListener) - this.settingsService = new SettingsService( - this.context, - this.authService, - () => this.callbacks.stateChanged?.(), - this.log, - ) - this.settingsService.initialize() + // Check for static settings environment variable + const staticOrgSettings = process.env.ROO_CODE_CLOUD_ORG_SETTINGS + if (staticOrgSettings && staticOrgSettings.length > 0) { + this.settingsService = new StaticSettingsService(staticOrgSettings, this.log) + } else { + const cloudSettingsService = new CloudSettingsService( + this.context, + this.authService, + () => this.callbacks.stateChanged?.(), + this.log, + ) + cloudSettingsService.initialize() + this.settingsService = cloudSettingsService + } this.telemetryClient = new TelemetryClient(this.authService, this.settingsService) diff --git a/packages/cloud/src/CloudSettingsService.ts b/packages/cloud/src/CloudSettingsService.ts new file mode 100644 index 000000000000..6692d8141d8f --- /dev/null +++ b/packages/cloud/src/CloudSettingsService.ts @@ -0,0 +1,136 @@ +import * as vscode from "vscode" + +import { + ORGANIZATION_ALLOW_ALL, + OrganizationAllowList, + OrganizationSettings, + organizationSettingsSchema, +} from "@roo-code/types" + +import { getRooCodeApiUrl } from "./Config" +import type { AuthService } from "./auth" +import { RefreshTimer } from "./RefreshTimer" +import type { SettingsService } from "./SettingsService" + +const ORGANIZATION_SETTINGS_CACHE_KEY = "organization-settings" + +export class CloudSettingsService implements SettingsService { + private context: vscode.ExtensionContext + private authService: AuthService + private settings: OrganizationSettings | undefined = undefined + private timer: RefreshTimer + private log: (...args: unknown[]) => void + + constructor( + context: vscode.ExtensionContext, + authService: AuthService, + callback: () => void, + log?: (...args: unknown[]) => void, + ) { + this.context = context + this.authService = authService + this.log = log || console.log + + this.timer = new RefreshTimer({ + callback: async () => { + return await this.fetchSettings(callback) + }, + successInterval: 30000, + initialBackoffMs: 1000, + maxBackoffMs: 30000, + }) + } + + public initialize(): void { + this.loadCachedSettings() + + // Clear cached settings if we have missed a log out. + if (this.authService.getState() == "logged-out" && this.settings) { + this.removeSettings() + } + + this.authService.on("active-session", () => { + this.timer.start() + }) + + this.authService.on("logged-out", () => { + this.timer.stop() + this.removeSettings() + }) + + if (this.authService.hasActiveSession()) { + this.timer.start() + } + } + + private async fetchSettings(callback: () => void): Promise { + const token = this.authService.getSessionToken() + + if (!token) { + return false + } + + try { + const response = await fetch(`${getRooCodeApiUrl()}/api/organization-settings`, { + headers: { + Authorization: `Bearer ${token}`, + }, + }) + + if (!response.ok) { + this.log( + "[cloud-settings] Failed to fetch organization settings:", + response.status, + response.statusText, + ) + return false + } + + const data = await response.json() + const result = organizationSettingsSchema.safeParse(data) + + if (!result.success) { + this.log("[cloud-settings] Invalid organization settings format:", result.error) + return false + } + + const newSettings = result.data + + if (!this.settings || this.settings.version !== newSettings.version) { + this.settings = newSettings + await this.cacheSettings() + callback() + } + + return true + } catch (error) { + this.log("[cloud-settings] Error fetching organization settings:", error) + return false + } + } + + private async cacheSettings(): Promise { + await this.context.globalState.update(ORGANIZATION_SETTINGS_CACHE_KEY, this.settings) + } + + private loadCachedSettings(): void { + this.settings = this.context.globalState.get(ORGANIZATION_SETTINGS_CACHE_KEY) + } + + public getAllowList(): OrganizationAllowList { + return this.settings?.allowList || ORGANIZATION_ALLOW_ALL + } + + public getSettings(): OrganizationSettings | undefined { + return this.settings + } + + private async removeSettings(): Promise { + this.settings = undefined + await this.cacheSettings() + } + + public dispose(): void { + this.timer.stop() + } +} diff --git a/packages/cloud/src/SettingsService.ts b/packages/cloud/src/SettingsService.ts index 68c6f2fe486a..c1027dc25cb4 100644 --- a/packages/cloud/src/SettingsService.ts +++ b/packages/cloud/src/SettingsService.ts @@ -1,135 +1,23 @@ -import * as vscode from "vscode" - -import { - ORGANIZATION_ALLOW_ALL, - OrganizationAllowList, - OrganizationSettings, - organizationSettingsSchema, -} from "@roo-code/types" - -import { getRooCodeApiUrl } from "./Config" -import { AuthService } from "./AuthService" -import { RefreshTimer } from "./RefreshTimer" - -const ORGANIZATION_SETTINGS_CACHE_KEY = "organization-settings" - -export class SettingsService { - private context: vscode.ExtensionContext - private authService: AuthService - private settings: OrganizationSettings | undefined = undefined - private timer: RefreshTimer - private log: (...args: unknown[]) => void - - constructor( - context: vscode.ExtensionContext, - authService: AuthService, - callback: () => void, - log?: (...args: unknown[]) => void, - ) { - this.context = context - this.authService = authService - this.log = log || console.log - - this.timer = new RefreshTimer({ - callback: async () => { - return await this.fetchSettings(callback) - }, - successInterval: 30000, - initialBackoffMs: 1000, - maxBackoffMs: 30000, - }) - } - - public initialize(): void { - this.loadCachedSettings() - - // Clear cached settings if we have missed a log out. - if (this.authService.getState() == "logged-out" && this.settings) { - this.removeSettings() - } - - this.authService.on("active-session", () => { - this.timer.start() - }) - - this.authService.on("logged-out", () => { - this.timer.stop() - this.removeSettings() - }) - - if (this.authService.hasActiveSession()) { - this.timer.start() - } - } - - private async fetchSettings(callback: () => void): Promise { - const token = this.authService.getSessionToken() - - if (!token) { - return false - } - - try { - const response = await fetch(`${getRooCodeApiUrl()}/api/organization-settings`, { - headers: { - Authorization: `Bearer ${token}`, - }, - }) - - if (!response.ok) { - this.log( - "[cloud-settings] Failed to fetch organization settings:", - response.status, - response.statusText, - ) - return false - } - - const data = await response.json() - const result = organizationSettingsSchema.safeParse(data) - - if (!result.success) { - this.log("[cloud-settings] Invalid organization settings format:", result.error) - return false - } - - const newSettings = result.data - - if (!this.settings || this.settings.version !== newSettings.version) { - this.settings = newSettings - await this.cacheSettings() - callback() - } - - return true - } catch (error) { - this.log("[cloud-settings] Error fetching organization settings:", error) - return false - } - } - - private async cacheSettings(): Promise { - await this.context.globalState.update(ORGANIZATION_SETTINGS_CACHE_KEY, this.settings) - } - - private loadCachedSettings(): void { - this.settings = this.context.globalState.get(ORGANIZATION_SETTINGS_CACHE_KEY) - } - - public getAllowList(): OrganizationAllowList { - return this.settings?.allowList || ORGANIZATION_ALLOW_ALL - } - - public getSettings(): OrganizationSettings | undefined { - return this.settings - } - - public async removeSettings(): Promise { - this.settings = undefined - await this.cacheSettings() - } - - public dispose(): void { - this.timer.stop() - } +import type { OrganizationAllowList, OrganizationSettings } from "@roo-code/types" + +/** + * Interface for settings services that provide organization settings + */ +export interface SettingsService { + /** + * Get the organization allow list + * @returns The organization allow list or default if none available + */ + getAllowList(): OrganizationAllowList + + /** + * Get the current organization settings + * @returns The organization settings or undefined if none available + */ + getSettings(): OrganizationSettings | undefined + + /** + * Dispose of the settings service and clean up resources + */ + dispose(): void } diff --git a/packages/cloud/src/ShareService.ts b/packages/cloud/src/ShareService.ts index 07176d3e9ded..5dcc7cae3f87 100644 --- a/packages/cloud/src/ShareService.ts +++ b/packages/cloud/src/ShareService.ts @@ -2,7 +2,7 @@ import * as vscode from "vscode" import { shareResponseSchema } from "@roo-code/types" import { getRooCodeApiUrl } from "./Config" -import type { AuthService } from "./AuthService" +import type { AuthService } from "./auth" import type { SettingsService } from "./SettingsService" import { getUserAgent } from "./utils" diff --git a/packages/cloud/src/StaticSettingsService.ts b/packages/cloud/src/StaticSettingsService.ts new file mode 100644 index 000000000000..3aac37bda5eb --- /dev/null +++ b/packages/cloud/src/StaticSettingsService.ts @@ -0,0 +1,41 @@ +import { + ORGANIZATION_ALLOW_ALL, + OrganizationAllowList, + OrganizationSettings, + organizationSettingsSchema, +} from "@roo-code/types" + +import type { SettingsService } from "./SettingsService" + +export class StaticSettingsService implements SettingsService { + private settings: OrganizationSettings + private log: (...args: unknown[]) => void + + constructor(envValue: string, log?: (...args: unknown[]) => void) { + this.log = log || console.log + this.settings = this.parseEnvironmentSettings(envValue) + } + + private parseEnvironmentSettings(envValue: string): OrganizationSettings { + try { + const decodedValue = Buffer.from(envValue, "base64").toString("utf-8") + const parsedJson = JSON.parse(decodedValue) + return organizationSettingsSchema.parse(parsedJson) + } catch (error) { + this.log(`[StaticSettingsService] failed to parse static settings: ${error.message}`, error) + throw new Error("Failed to parse static settings", { cause: error }) + } + } + + public getAllowList(): OrganizationAllowList { + return this.settings?.allowList || ORGANIZATION_ALLOW_ALL + } + + public getSettings(): OrganizationSettings | undefined { + return this.settings + } + + public dispose(): void { + // No resources to clean up for static settings + } +} diff --git a/packages/cloud/src/TelemetryClient.ts b/packages/cloud/src/TelemetryClient.ts index ea48fcf26915..e33843a30c6d 100644 --- a/packages/cloud/src/TelemetryClient.ts +++ b/packages/cloud/src/TelemetryClient.ts @@ -7,8 +7,8 @@ import { import { BaseTelemetryClient } from "@roo-code/telemetry" import { getRooCodeApiUrl } from "./Config" -import { AuthService } from "./AuthService" -import { SettingsService } from "./SettingsService" +import type { AuthService } from "./auth" +import type { SettingsService } from "./SettingsService" export class TelemetryClient extends BaseTelemetryClient { constructor( diff --git a/packages/cloud/src/__tests__/CloudService.integration.test.ts b/packages/cloud/src/__tests__/CloudService.integration.test.ts new file mode 100644 index 000000000000..f3cef2771884 --- /dev/null +++ b/packages/cloud/src/__tests__/CloudService.integration.test.ts @@ -0,0 +1,146 @@ +// npx vitest run src/__tests__/CloudService.integration.test.ts + +import * as vscode from "vscode" +import { CloudService } from "../CloudService" +import { StaticSettingsService } from "../StaticSettingsService" +import { CloudSettingsService } from "../CloudSettingsService" + +vi.mock("vscode", () => ({ + ExtensionContext: vi.fn(), + window: { + showInformationMessage: vi.fn(), + showErrorMessage: vi.fn(), + }, + env: { + openExternal: vi.fn(), + }, + Uri: { + parse: vi.fn(), + }, +})) + +describe("CloudService Integration - Settings Service Selection", () => { + let mockContext: vscode.ExtensionContext + + beforeEach(() => { + CloudService.resetInstance() + + mockContext = { + subscriptions: [], + workspaceState: { + get: vi.fn(), + update: vi.fn(), + keys: vi.fn().mockReturnValue([]), + }, + secrets: { + get: vi.fn(), + store: vi.fn(), + delete: vi.fn(), + onDidChange: vi.fn().mockReturnValue({ dispose: vi.fn() }), + }, + globalState: { + get: vi.fn(), + update: vi.fn(), + setKeysForSync: vi.fn(), + keys: vi.fn().mockReturnValue([]), + }, + extensionUri: { scheme: "file", path: "/mock/path" }, + extensionPath: "/mock/path", + extensionMode: 1, + asAbsolutePath: vi.fn((relativePath: string) => `/mock/path/${relativePath}`), + storageUri: { scheme: "file", path: "/mock/storage" }, + extension: { + packageJSON: { + version: "1.0.0", + }, + }, + } as unknown as vscode.ExtensionContext + }) + + afterEach(() => { + CloudService.resetInstance() + delete process.env.ROO_CODE_CLOUD_ORG_SETTINGS + delete process.env.ROO_CODE_CLOUD_TOKEN + }) + + it("should use CloudSettingsService when no environment variable is set", async () => { + // Ensure no environment variables are set + delete process.env.ROO_CODE_CLOUD_ORG_SETTINGS + delete process.env.ROO_CODE_CLOUD_TOKEN + + const cloudService = await CloudService.createInstance(mockContext) + + // Access the private settingsService to check its type + const settingsService = (cloudService as unknown as { settingsService: unknown }).settingsService + expect(settingsService).toBeInstanceOf(CloudSettingsService) + }) + + it("should use StaticSettingsService when ROO_CODE_CLOUD_ORG_SETTINGS is set", async () => { + const validSettings = { + version: 1, + cloudSettings: { + recordTaskMessages: true, + enableTaskSharing: true, + taskShareExpirationDays: 30, + }, + defaultSettings: { + enableCheckpoints: true, + }, + allowList: { + allowAll: true, + providers: {}, + }, + } + + // Set the environment variable + process.env.ROO_CODE_CLOUD_ORG_SETTINGS = Buffer.from(JSON.stringify(validSettings)).toString("base64") + + const cloudService = await CloudService.createInstance(mockContext) + + // Access the private settingsService to check its type + const settingsService = (cloudService as unknown as { settingsService: unknown }).settingsService + expect(settingsService).toBeInstanceOf(StaticSettingsService) + + // Verify the settings are correctly loaded + expect(cloudService.getAllowList()).toEqual(validSettings.allowList) + }) + + it("should throw error when ROO_CODE_CLOUD_ORG_SETTINGS contains invalid data", async () => { + // Set invalid environment variable + process.env.ROO_CODE_CLOUD_ORG_SETTINGS = "invalid-base64-data" + + await expect(CloudService.createInstance(mockContext)).rejects.toThrow("Failed to initialize CloudService") + }) + + it("should prioritize static token auth when both environment variables are set", async () => { + const validSettings = { + version: 1, + cloudSettings: { + recordTaskMessages: true, + enableTaskSharing: true, + taskShareExpirationDays: 30, + }, + defaultSettings: { + enableCheckpoints: true, + }, + allowList: { + allowAll: true, + providers: {}, + }, + } + + // Set both environment variables + process.env.ROO_CODE_CLOUD_TOKEN = "test-token" + process.env.ROO_CODE_CLOUD_ORG_SETTINGS = Buffer.from(JSON.stringify(validSettings)).toString("base64") + + const cloudService = await CloudService.createInstance(mockContext) + + // Should use StaticSettingsService for settings + const settingsService = (cloudService as unknown as { settingsService: unknown }).settingsService + expect(settingsService).toBeInstanceOf(StaticSettingsService) + + // Should use StaticTokenAuthService for auth (from the existing logic) + expect(cloudService.isAuthenticated()).toBe(true) + expect(cloudService.hasActiveSession()).toBe(true) + }) +}) diff --git a/packages/cloud/src/__tests__/CloudService.test.ts b/packages/cloud/src/__tests__/CloudService.test.ts index 6ed8c9741c52..1384b6de6b61 100644 --- a/packages/cloud/src/__tests__/CloudService.test.ts +++ b/packages/cloud/src/__tests__/CloudService.test.ts @@ -4,8 +4,8 @@ import * as vscode from "vscode" import type { ClineMessage } from "@roo-code/types" import { CloudService } from "../CloudService" -import { AuthService } from "../AuthService" -import { SettingsService } from "../SettingsService" +import { WebAuthService } from "../auth/WebAuthService" +import { CloudSettingsService } from "../CloudSettingsService" import { ShareService, TaskNotFoundError } from "../ShareService" import { TelemetryClient } from "../TelemetryClient" import { TelemetryService } from "@roo-code/telemetry" @@ -27,9 +27,9 @@ vi.mock("vscode", () => ({ vi.mock("@roo-code/telemetry") -vi.mock("../AuthService") +vi.mock("../auth/WebAuthService") -vi.mock("../SettingsService") +vi.mock("../CloudSettingsService") vi.mock("../ShareService") @@ -149,8 +149,8 @@ describe("CloudService", () => { }, } - vi.mocked(AuthService).mockImplementation(() => mockAuthService as unknown as AuthService) - vi.mocked(SettingsService).mockImplementation(() => mockSettingsService as unknown as SettingsService) + vi.mocked(WebAuthService).mockImplementation(() => mockAuthService as unknown as WebAuthService) + vi.mocked(CloudSettingsService).mockImplementation(() => mockSettingsService as unknown as CloudSettingsService) vi.mocked(ShareService).mockImplementation(() => mockShareService as unknown as ShareService) vi.mocked(TelemetryClient).mockImplementation(() => mockTelemetryClient as unknown as TelemetryClient) @@ -175,8 +175,8 @@ describe("CloudService", () => { const cloudService = await CloudService.createInstance(mockContext, callbacks) expect(cloudService).toBeInstanceOf(CloudService) - expect(AuthService).toHaveBeenCalledWith(mockContext, expect.any(Function)) - expect(SettingsService).toHaveBeenCalledWith( + expect(WebAuthService).toHaveBeenCalledWith(mockContext, expect.any(Function)) + expect(CloudSettingsService).toHaveBeenCalledWith( mockContext, mockAuthService, expect.any(Function), diff --git a/packages/cloud/src/__tests__/ShareService.test.ts b/packages/cloud/src/__tests__/ShareService.test.ts index dd2e5f1ae515..dd5b66960336 100644 --- a/packages/cloud/src/__tests__/ShareService.test.ts +++ b/packages/cloud/src/__tests__/ShareService.test.ts @@ -4,7 +4,7 @@ import type { MockedFunction } from "vitest" import * as vscode from "vscode" import { ShareService, TaskNotFoundError } from "../ShareService" -import type { AuthService } from "../AuthService" +import type { AuthService } from "../auth" import type { SettingsService } from "../SettingsService" // Mock fetch diff --git a/packages/cloud/src/__tests__/StaticSettingsService.test.ts b/packages/cloud/src/__tests__/StaticSettingsService.test.ts new file mode 100644 index 000000000000..26c0ada9cd47 --- /dev/null +++ b/packages/cloud/src/__tests__/StaticSettingsService.test.ts @@ -0,0 +1,102 @@ +// npx vitest run src/__tests__/StaticSettingsService.test.ts + +import { StaticSettingsService } from "../StaticSettingsService" + +describe("StaticSettingsService", () => { + const validSettings = { + version: 1, + cloudSettings: { + recordTaskMessages: true, + enableTaskSharing: true, + taskShareExpirationDays: 30, + }, + defaultSettings: { + enableCheckpoints: true, + maxOpenTabsContext: 10, + }, + allowList: { + allowAll: false, + providers: { + anthropic: { + allowAll: true, + }, + }, + }, + } + + const validBase64 = Buffer.from(JSON.stringify(validSettings)).toString("base64") + + describe("constructor", () => { + it("should parse valid base64 encoded JSON settings", () => { + const service = new StaticSettingsService(validBase64) + expect(service.getSettings()).toEqual(validSettings) + }) + + it("should throw error for invalid base64", () => { + expect(() => new StaticSettingsService("invalid-base64!@#")).toThrow("Failed to parse static settings") + }) + + it("should throw error for invalid JSON", () => { + const invalidJson = Buffer.from("{ invalid json }").toString("base64") + expect(() => new StaticSettingsService(invalidJson)).toThrow("Failed to parse static settings") + }) + + it("should throw error for invalid schema", () => { + const invalidSettings = { invalid: "schema" } + const invalidBase64 = Buffer.from(JSON.stringify(invalidSettings)).toString("base64") + expect(() => new StaticSettingsService(invalidBase64)).toThrow("Failed to parse static settings") + }) + }) + + describe("getAllowList", () => { + it("should return the allow list from settings", () => { + const service = new StaticSettingsService(validBase64) + expect(service.getAllowList()).toEqual(validSettings.allowList) + }) + }) + + describe("getSettings", () => { + it("should return the parsed settings", () => { + const service = new StaticSettingsService(validBase64) + expect(service.getSettings()).toEqual(validSettings) + }) + }) + + describe("dispose", () => { + it("should be a no-op for static settings", () => { + const service = new StaticSettingsService(validBase64) + expect(() => service.dispose()).not.toThrow() + }) + }) + + describe("logging", () => { + it("should use provided logger for errors", () => { + const mockLog = vi.fn() + expect(() => new StaticSettingsService("invalid-base64!@#", mockLog)).toThrow() + + expect(mockLog).toHaveBeenCalledWith( + expect.stringContaining("[StaticSettingsService] failed to parse static settings:"), + expect.any(Error), + ) + }) + + it("should use console.log as default logger for errors", () => { + const consoleSpy = vi.spyOn(console, "log").mockImplementation(() => {}) + expect(() => new StaticSettingsService("invalid-base64!@#")).toThrow() + + expect(consoleSpy).toHaveBeenCalledWith( + expect.stringContaining("[StaticSettingsService] failed to parse static settings:"), + expect.any(Error), + ) + + consoleSpy.mockRestore() + }) + + it("should not log anything for successful parsing", () => { + const mockLog = vi.fn() + new StaticSettingsService(validBase64, mockLog) + + expect(mockLog).not.toHaveBeenCalled() + }) + }) +}) diff --git a/packages/cloud/src/__tests__/auth/StaticTokenAuthService.spec.ts b/packages/cloud/src/__tests__/auth/StaticTokenAuthService.spec.ts new file mode 100644 index 000000000000..cbf3a7b998f9 --- /dev/null +++ b/packages/cloud/src/__tests__/auth/StaticTokenAuthService.spec.ts @@ -0,0 +1,174 @@ +import { describe, it, expect, beforeEach, vi } from "vitest" +import * as vscode from "vscode" + +import { StaticTokenAuthService } from "../../auth/StaticTokenAuthService" + +// Mock vscode +vi.mock("vscode", () => ({ + window: { + showInformationMessage: vi.fn(), + }, + env: { + openExternal: vi.fn(), + uriScheme: "vscode", + }, + Uri: { + parse: vi.fn(), + }, +})) + +describe("StaticTokenAuthService", () => { + let authService: StaticTokenAuthService + let mockContext: vscode.ExtensionContext + let mockLog: (...args: unknown[]) => void + const testToken = "test-static-token" + + beforeEach(() => { + mockLog = vi.fn() + + // Create a minimal mock that satisfies the constructor requirements + const mockContextPartial = { + extension: { + packageJSON: { + publisher: "TestPublisher", + name: "test-extension", + }, + }, + globalState: { + get: vi.fn(), + update: vi.fn(), + }, + secrets: { + get: vi.fn(), + store: vi.fn(), + delete: vi.fn(), + onDidChange: vi.fn(), + }, + subscriptions: [], + } + + // Use type assertion for test mocking + mockContext = mockContextPartial as unknown as vscode.ExtensionContext + + authService = new StaticTokenAuthService(mockContext, testToken, mockLog) + }) + + afterEach(() => { + vi.clearAllMocks() + }) + + describe("constructor", () => { + it("should create instance and log static token mode", () => { + expect(authService).toBeInstanceOf(StaticTokenAuthService) + expect(mockLog).toHaveBeenCalledWith("[auth] Using static token authentication mode") + }) + + it("should use console.log as default logger", () => { + const serviceWithoutLog = new StaticTokenAuthService( + mockContext as unknown as vscode.ExtensionContext, + testToken, + ) + // Can't directly test console.log usage, but constructor should not throw + expect(serviceWithoutLog).toBeInstanceOf(StaticTokenAuthService) + }) + }) + + describe("initialize", () => { + it("should start in active-session state", async () => { + await authService.initialize() + expect(authService.getState()).toBe("active-session") + }) + + it("should emit active-session event on initialize", async () => { + const spy = vi.fn() + authService.on("active-session", spy) + + await authService.initialize() + + expect(spy).toHaveBeenCalledWith({ previousState: "initializing" }) + }) + + it("should log successful initialization", async () => { + await authService.initialize() + expect(mockLog).toHaveBeenCalledWith("[auth] Static token auth service initialized in active-session state") + }) + }) + + describe("getSessionToken", () => { + it("should return the provided token", () => { + expect(authService.getSessionToken()).toBe(testToken) + }) + + it("should return different token when constructed with different token", () => { + const differentToken = "different-token" + const differentService = new StaticTokenAuthService(mockContext, differentToken, mockLog) + expect(differentService.getSessionToken()).toBe(differentToken) + }) + }) + + describe("getUserInfo", () => { + it("should return empty object", () => { + expect(authService.getUserInfo()).toEqual({}) + }) + }) + + describe("getStoredOrganizationId", () => { + it("should return null", () => { + expect(authService.getStoredOrganizationId()).toBeNull() + }) + }) + + describe("authentication state methods", () => { + it("should always return true for isAuthenticated", () => { + expect(authService.isAuthenticated()).toBe(true) + }) + + it("should always return true for hasActiveSession", () => { + expect(authService.hasActiveSession()).toBe(true) + }) + + it("should always return true for hasOrIsAcquiringActiveSession", () => { + expect(authService.hasOrIsAcquiringActiveSession()).toBe(true) + }) + + it("should return active-session for getState", () => { + expect(authService.getState()).toBe("active-session") + }) + }) + + describe("disabled authentication methods", () => { + const expectedErrorMessage = "Authentication methods are disabled in StaticTokenAuthService" + + it("should throw error for login", async () => { + await expect(authService.login()).rejects.toThrow(expectedErrorMessage) + }) + + it("should throw error for logout", async () => { + await expect(authService.logout()).rejects.toThrow(expectedErrorMessage) + }) + + it("should throw error for handleCallback", async () => { + await expect(authService.handleCallback("code", "state")).rejects.toThrow(expectedErrorMessage) + }) + + it("should throw error for handleCallback with organization", async () => { + await expect(authService.handleCallback("code", "state", "org_123")).rejects.toThrow(expectedErrorMessage) + }) + }) + + describe("event emission", () => { + it("should be able to register and emit events", async () => { + const activeSessionSpy = vi.fn() + const userInfoSpy = vi.fn() + + authService.on("active-session", activeSessionSpy) + authService.on("user-info", userInfoSpy) + + await authService.initialize() + + expect(activeSessionSpy).toHaveBeenCalledWith({ previousState: "initializing" }) + // user-info event is not emitted in static token mode + expect(userInfoSpy).not.toHaveBeenCalled() + }) + }) +}) diff --git a/packages/cloud/src/__tests__/AuthService.spec.ts b/packages/cloud/src/__tests__/auth/WebAuthService.spec.ts similarity index 95% rename from packages/cloud/src/__tests__/AuthService.spec.ts rename to packages/cloud/src/__tests__/auth/WebAuthService.spec.ts index 944bcd2b2439..0e6681c20ba8 100644 --- a/packages/cloud/src/__tests__/AuthService.spec.ts +++ b/packages/cloud/src/__tests__/auth/WebAuthService.spec.ts @@ -4,15 +4,15 @@ import { vi, Mock, beforeEach, afterEach, describe, it, expect } from "vitest" import crypto from "crypto" import * as vscode from "vscode" -import { AuthService } from "../AuthService" -import { RefreshTimer } from "../RefreshTimer" -import * as Config from "../Config" -import * as utils from "../utils" +import { WebAuthService } from "../../auth/WebAuthService" +import { RefreshTimer } from "../../RefreshTimer" +import * as Config from "../../Config" +import * as utils from "../../utils" // Mock external dependencies -vi.mock("../RefreshTimer") -vi.mock("../Config") -vi.mock("../utils") +vi.mock("../../RefreshTimer") +vi.mock("../../Config") +vi.mock("../../utils") vi.mock("crypto") // Mock fetch globally @@ -34,8 +34,8 @@ vi.mock("vscode", () => ({ }, })) -describe("AuthService", () => { - let authService: AuthService +describe("WebAuthService", () => { + let authService: WebAuthService let mockTimer: { start: Mock stop: Mock @@ -97,7 +97,8 @@ describe("AuthService", () => { stop: vi.fn(), reset: vi.fn(), } - vi.mocked(RefreshTimer).mockImplementation(() => mockTimer as unknown as RefreshTimer) + const MockedRefreshTimer = vi.mocked(RefreshTimer) + MockedRefreshTimer.mockImplementation(() => mockTimer as unknown as RefreshTimer) // Setup config mocks - use production URL by default to maintain existing test behavior vi.mocked(Config.getClerkBaseUrl).mockReturnValue("https://clerk.roocode.com") @@ -112,7 +113,7 @@ describe("AuthService", () => { // Setup log mock mockLog = vi.fn() - authService = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + authService = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) }) afterEach(() => { @@ -138,9 +139,9 @@ describe("AuthService", () => { }) it("should use console.log as default logger", () => { - const serviceWithoutLog = new AuthService(mockContext as unknown as vscode.ExtensionContext) + const serviceWithoutLog = new WebAuthService(mockContext as unknown as vscode.ExtensionContext) // Can't directly test console.log usage, but constructor should not throw - expect(serviceWithoutLog).toBeInstanceOf(AuthService) + expect(serviceWithoutLog).toBeInstanceOf(WebAuthService) }) }) @@ -434,7 +435,7 @@ describe("AuthService", () => { const credentials = { clientToken: "test-token", sessionId: "test-session" } mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - const authenticatedService = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const authenticatedService = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) await authenticatedService.initialize() expect(authenticatedService.isAuthenticated()).toBe(true) @@ -460,7 +461,7 @@ describe("AuthService", () => { const credentials = { clientToken: "test-token", sessionId: "test-session" } mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - const attemptingService = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const attemptingService = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) await attemptingService.initialize() expect(attemptingService.hasOrIsAcquiringActiveSession()).toBe(true) @@ -960,7 +961,7 @@ describe("AuthService", () => { // Mock getClerkBaseUrl to return production URL vi.mocked(Config.getClerkBaseUrl).mockReturnValue("https://clerk.roocode.com") - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) const credentials = { clientToken: "test-token", sessionId: "test-session" } await service.initialize() @@ -977,7 +978,7 @@ describe("AuthService", () => { // Mock getClerkBaseUrl to return custom URL vi.mocked(Config.getClerkBaseUrl).mockReturnValue(customUrl) - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) const credentials = { clientToken: "test-token", sessionId: "test-session" } await service.initialize() @@ -993,7 +994,7 @@ describe("AuthService", () => { const customUrl = "https://custom.clerk.com" vi.mocked(Config.getClerkBaseUrl).mockReturnValue(customUrl) - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) const credentials = { clientToken: "test-token", sessionId: "test-session" } mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) @@ -1008,7 +1009,7 @@ describe("AuthService", () => { const customUrl = "https://custom.clerk.com" vi.mocked(Config.getClerkBaseUrl).mockReturnValue(customUrl) - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) await service.initialize() await service["clearCredentials"]() @@ -1027,7 +1028,7 @@ describe("AuthService", () => { return { dispose: vi.fn() } }) - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) await service.initialize() // Simulate credentials change event with scoped key @@ -1054,7 +1055,7 @@ describe("AuthService", () => { return { dispose: vi.fn() } }) - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) await service.initialize() const inactiveSessionSpy = vi.fn() @@ -1078,7 +1079,7 @@ describe("AuthService", () => { return { dispose: vi.fn() } }) - const service = new AuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) + const service = new WebAuthService(mockContext as unknown as vscode.ExtensionContext, mockLog) await service.initialize() const inactiveSessionSpy = vi.fn() diff --git a/packages/cloud/src/auth/AuthService.ts b/packages/cloud/src/auth/AuthService.ts new file mode 100644 index 000000000000..11ed5161edad --- /dev/null +++ b/packages/cloud/src/auth/AuthService.ts @@ -0,0 +1,33 @@ +import EventEmitter from "events" +import type { CloudUserInfo } from "@roo-code/types" + +export interface AuthServiceEvents { + "attempting-session": [data: { previousState: AuthState }] + "inactive-session": [data: { previousState: AuthState }] + "active-session": [data: { previousState: AuthState }] + "logged-out": [data: { previousState: AuthState }] + "user-info": [data: { userInfo: CloudUserInfo }] +} + +export type AuthState = "initializing" | "logged-out" | "active-session" | "attempting-session" | "inactive-session" + +export interface AuthService extends EventEmitter { + // Lifecycle + initialize(): Promise + + // Authentication methods + login(): Promise + logout(): Promise + handleCallback(code: string | null, state: string | null, organizationId?: string | null): Promise + + // State methods + getState(): AuthState + isAuthenticated(): boolean + hasActiveSession(): boolean + hasOrIsAcquiringActiveSession(): boolean + + // Token and user info + getSessionToken(): string | undefined + getUserInfo(): CloudUserInfo | null + getStoredOrganizationId(): string | null +} diff --git a/packages/cloud/src/auth/StaticTokenAuthService.ts b/packages/cloud/src/auth/StaticTokenAuthService.ts new file mode 100644 index 000000000000..11fc18d3fb2c --- /dev/null +++ b/packages/cloud/src/auth/StaticTokenAuthService.ts @@ -0,0 +1,68 @@ +import EventEmitter from "events" +import * as vscode from "vscode" +import type { CloudUserInfo } from "@roo-code/types" +import type { AuthService, AuthServiceEvents, AuthState } from "./AuthService" + +export class StaticTokenAuthService extends EventEmitter implements AuthService { + private state: AuthState = "active-session" + private token: string + private log: (...args: unknown[]) => void + + constructor(context: vscode.ExtensionContext, token: string, log?: (...args: unknown[]) => void) { + super() + this.token = token + this.log = log || console.log + this.log("[auth] Using static token authentication mode") + } + + public async initialize(): Promise { + const previousState: AuthState = "initializing" + this.state = "active-session" + this.emit("active-session", { previousState }) + this.log("[auth] Static token auth service initialized in active-session state") + } + + public async login(): Promise { + throw new Error("Authentication methods are disabled in StaticTokenAuthService") + } + + public async logout(): Promise { + throw new Error("Authentication methods are disabled in StaticTokenAuthService") + } + + public async handleCallback( + _code: string | null, + _state: string | null, + _organizationId?: string | null, + ): Promise { + throw new Error("Authentication methods are disabled in StaticTokenAuthService") + } + + public getState(): AuthState { + return this.state + } + + public getSessionToken(): string | undefined { + return this.token + } + + public isAuthenticated(): boolean { + return true + } + + public hasActiveSession(): boolean { + return true + } + + public hasOrIsAcquiringActiveSession(): boolean { + return true + } + + public getUserInfo(): CloudUserInfo | null { + return {} + } + + public getStoredOrganizationId(): string | null { + return null + } +} diff --git a/packages/cloud/src/AuthService.ts b/packages/cloud/src/auth/WebAuthService.ts similarity index 96% rename from packages/cloud/src/AuthService.ts rename to packages/cloud/src/auth/WebAuthService.ts index cd8e1362c165..d14cbe67d84c 100644 --- a/packages/cloud/src/AuthService.ts +++ b/packages/cloud/src/auth/WebAuthService.ts @@ -6,17 +6,10 @@ import { z } from "zod" import type { CloudUserInfo, CloudOrganizationMembership } from "@roo-code/types" -import { getClerkBaseUrl, getRooCodeApiUrl, PRODUCTION_CLERK_BASE_URL } from "./Config" -import { RefreshTimer } from "./RefreshTimer" -import { getUserAgent } from "./utils" - -export interface AuthServiceEvents { - "attempting-session": [data: { previousState: AuthState }] - "inactive-session": [data: { previousState: AuthState }] - "active-session": [data: { previousState: AuthState }] - "logged-out": [data: { previousState: AuthState }] - "user-info": [data: { userInfo: CloudUserInfo }] -} +import { getClerkBaseUrl, getRooCodeApiUrl, PRODUCTION_CLERK_BASE_URL } from "../Config" +import { RefreshTimer } from "../RefreshTimer" +import { getUserAgent } from "../utils" +import type { AuthService, AuthServiceEvents, AuthState } from "./AuthService" const authCredentialsSchema = z.object({ clientToken: z.string().min(1, "Client token cannot be empty"), @@ -28,8 +21,6 @@ type AuthCredentials = z.infer const AUTH_STATE_KEY = "clerk-auth-state" -type AuthState = "initializing" | "logged-out" | "active-session" | "attempting-session" | "inactive-session" - const clerkSignInResponseSchema = z.object({ response: z.object({ created_session_id: z.string(), @@ -85,7 +76,7 @@ class InvalidClientTokenError extends Error { } } -export class AuthService extends EventEmitter { +export class WebAuthService extends EventEmitter implements AuthService { private context: vscode.ExtensionContext private timer: RefreshTimer private state: AuthState = "initializing" diff --git a/packages/cloud/src/auth/index.ts b/packages/cloud/src/auth/index.ts new file mode 100644 index 000000000000..b04a805295a5 --- /dev/null +++ b/packages/cloud/src/auth/index.ts @@ -0,0 +1,3 @@ +export type { AuthService, AuthServiceEvents, AuthState } from "./AuthService" +export { WebAuthService } from "./WebAuthService" +export { StaticTokenAuthService } from "./StaticTokenAuthService" diff --git a/packages/types/src/codebase-index.ts b/packages/types/src/codebase-index.ts index e86c17627ff5..0ad19d8676a2 100644 --- a/packages/types/src/codebase-index.ts +++ b/packages/types/src/codebase-index.ts @@ -1,5 +1,19 @@ import { z } from "zod" +/** + * Codebase Index Constants + */ +export const CODEBASE_INDEX_DEFAULTS = { + MIN_SEARCH_RESULTS: 10, + MAX_SEARCH_RESULTS: 200, + DEFAULT_SEARCH_RESULTS: 50, + SEARCH_RESULTS_STEP: 10, + MIN_SEARCH_SCORE: 0, + MAX_SEARCH_SCORE: 1, + DEFAULT_SEARCH_MIN_SCORE: 0.4, + SEARCH_SCORE_STEP: 0.05, +} as const + /** * CodebaseIndexConfig */ @@ -7,9 +21,19 @@ import { z } from "zod" export const codebaseIndexConfigSchema = z.object({ codebaseIndexEnabled: z.boolean().optional(), codebaseIndexQdrantUrl: z.string().optional(), - codebaseIndexEmbedderProvider: z.enum(["openai", "ollama", "openai-compatible"]).optional(), + codebaseIndexEmbedderProvider: z.enum(["openai", "ollama", "openai-compatible", "gemini"]).optional(), codebaseIndexEmbedderBaseUrl: z.string().optional(), codebaseIndexEmbedderModelId: z.string().optional(), + codebaseIndexEmbedderModelDimension: z.number().optional(), + codebaseIndexSearchMinScore: z.number().min(0).max(1).optional(), + codebaseIndexSearchMaxResults: z + .number() + .min(CODEBASE_INDEX_DEFAULTS.MIN_SEARCH_RESULTS) + .max(CODEBASE_INDEX_DEFAULTS.MAX_SEARCH_RESULTS) + .optional(), + // OpenAI Compatible specific fields + codebaseIndexOpenAiCompatibleBaseUrl: z.string().optional(), + codebaseIndexOpenAiCompatibleModelDimension: z.number().optional(), }) export type CodebaseIndexConfig = z.infer @@ -22,6 +46,7 @@ export const codebaseIndexModelsSchema = z.object({ openai: z.record(z.string(), z.object({ dimension: z.number() })).optional(), ollama: z.record(z.string(), z.object({ dimension: z.number() })).optional(), "openai-compatible": z.record(z.string(), z.object({ dimension: z.number() })).optional(), + gemini: z.record(z.string(), z.object({ dimension: z.number() })).optional(), }) export type CodebaseIndexModels = z.infer @@ -36,6 +61,7 @@ export const codebaseIndexProviderSchema = z.object({ codebaseIndexOpenAiCompatibleBaseUrl: z.string().optional(), codebaseIndexOpenAiCompatibleApiKey: z.string().optional(), codebaseIndexOpenAiCompatibleModelDimension: z.number().optional(), + codebaseIndexGeminiApiKey: z.string().optional(), }) export type CodebaseIndexProvider = z.infer diff --git a/packages/types/src/followup.ts b/packages/types/src/followup.ts new file mode 100644 index 000000000000..1a5424cd11e0 --- /dev/null +++ b/packages/types/src/followup.ts @@ -0,0 +1,41 @@ +import { z } from "zod" + +/** + * Interface for follow-up data structure used in follow-up questions + * This represents the data structure for follow-up questions that the LLM can ask + * to gather more information needed to complete a task. + */ +export interface FollowUpData { + /** The question being asked by the LLM */ + question?: string + /** Array of suggested answers that the user can select */ + suggest?: Array +} + +/** + * Interface for a suggestion item with optional mode switching + */ +export interface SuggestionItem { + /** The text of the suggestion */ + answer: string + /** Optional mode to switch to when selecting this suggestion */ + mode?: string +} + +/** + * Zod schema for SuggestionItem + */ +export const suggestionItemSchema = z.object({ + answer: z.string(), + mode: z.string().optional(), +}) + +/** + * Zod schema for FollowUpData + */ +export const followUpDataSchema = z.object({ + question: z.string().optional(), + suggest: z.array(suggestionItemSchema).optional(), +}) + +export type FollowUpDataType = z.infer diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index e713cafa4c7f..79a09ff01756 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -45,6 +45,9 @@ export const globalSettingsSchema = z.object({ alwaysAllowModeSwitch: z.boolean().optional(), alwaysAllowSubtasks: z.boolean().optional(), alwaysAllowExecute: z.boolean().optional(), + alwaysAllowFollowupQuestions: z.boolean().optional(), + followupAutoApproveTimeoutMs: z.number().optional(), + alwaysAllowUpdateTodoList: z.boolean().optional(), allowedCommands: z.array(z.string()).optional(), allowedMaxRequests: z.number().nullish(), autoCondenseContext: z.boolean().optional(), @@ -105,6 +108,8 @@ export const globalSettingsSchema = z.object({ historyPreviewCollapsed: z.boolean().optional(), profileThresholds: z.record(z.string(), z.number()).optional(), hasOpenedModeSelector: z.boolean().optional(), + lastModeExportPath: z.string().optional(), + lastModeImportPath: z.string().optional(), }) export type GlobalSettings = z.infer @@ -143,6 +148,7 @@ export const SECRET_STATE_KEYS = [ "codeIndexOpenAiKey", "codeIndexQdrantApiKey", "codebaseIndexOpenAiCompatibleApiKey", + "codebaseIndexGeminiApiKey", ] as const satisfies readonly (keyof ProviderSettings)[] export type SecretState = Pick @@ -189,6 +195,9 @@ export const EVALS_SETTINGS: RooCodeSettings = { alwaysAllowModeSwitch: true, alwaysAllowSubtasks: true, alwaysAllowExecute: true, + alwaysAllowFollowupQuestions: true, + alwaysAllowUpdateTodoList: true, + followupAutoApproveTimeoutMs: 0, allowedCommands: ["*"], browserToolEnabled: false, diff --git a/packages/types/src/index.ts b/packages/types/src/index.ts index 345bc3e311a4..44937da235bf 100644 --- a/packages/types/src/index.ts +++ b/packages/types/src/index.ts @@ -4,6 +4,7 @@ export * from "./api.js" export * from "./codebase-index.js" export * from "./cloud.js" export * from "./experiment.js" +export * from "./followup.js" export * from "./global-settings.js" export * from "./history.js" export * from "./ipc.js" @@ -19,3 +20,4 @@ export * from "./terminal.js" export * from "./tool.js" export * from "./type-fu.js" export * from "./vscode.js" +export * from "./todo.js" diff --git a/packages/types/src/message.ts b/packages/types/src/message.ts index 914f02ecd669..49d5203c6647 100644 --- a/packages/types/src/message.ts +++ b/packages/types/src/message.ts @@ -106,6 +106,7 @@ export const clineSays = [ "condense_context", "condense_context_error", "codebase_search_result", + "user_edit_todos", ] as const export const clineSaySchema = z.enum(clineSays) diff --git a/packages/types/src/todo.ts b/packages/types/src/todo.ts new file mode 100644 index 000000000000..4e874e175051 --- /dev/null +++ b/packages/types/src/todo.ts @@ -0,0 +1,19 @@ +import { z } from "zod" + +/** + * TodoStatus + */ +export const todoStatusSchema = z.enum(["pending", "in_progress", "completed"] as const) + +export type TodoStatus = z.infer + +/** + * TodoItem + */ +export const todoItemSchema = z.object({ + id: z.string(), + content: z.string(), + status: todoStatusSchema, +}) + +export type TodoItem = z.infer diff --git a/packages/types/src/tool.ts b/packages/types/src/tool.ts index 9e807d639d29..7a3fd2119996 100644 --- a/packages/types/src/tool.ts +++ b/packages/types/src/tool.ts @@ -33,6 +33,7 @@ export const toolNames = [ "new_task", "fetch_instructions", "codebase_search", + "update_todo_list", ] as const export const toolNamesSchema = z.enum(toolNames) diff --git a/scripts/install-vsix.js b/scripts/install-vsix.js new file mode 100644 index 000000000000..0ed9b6d37625 --- /dev/null +++ b/scripts/install-vsix.js @@ -0,0 +1,91 @@ +const { execSync } = require("child_process") +const fs = require("fs") +const readline = require("readline") + +// detect "yes" flags +const autoYes = process.argv.includes("-y") + +// detect editor command from args or default to "code" +const editorArg = process.argv.find((arg) => arg.startsWith("--editor=")) +const defaultEditor = editorArg ? editorArg.split("=")[1] : "code" + +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, +}) + +const askQuestion = (question) => { + return new Promise((resolve) => { + rl.question(question, (answer) => { + resolve(answer) + }) + }) +} + +async function main() { + try { + const packageJson = JSON.parse(fs.readFileSync("./src/package.json", "utf-8")) + const name = packageJson.name + const version = packageJson.version + const vsixFileName = `./bin/${name}-${version}.vsix` + const publisher = packageJson.publisher + const extensionId = `${publisher}.${name}` + + console.log("\n🚀 Roo Code VSIX Installer") + console.log("========================") + console.log("\nThis script will:") + console.log("1. Uninstall any existing version of the Roo Code extension") + console.log("2. Install the newly built VSIX package") + console.log(`\nExtension: ${extensionId}`) + console.log(`VSIX file: ${vsixFileName}`) + + // Ask for editor command if not provided + let editorCommand = defaultEditor + if (!editorArg && !autoYes) { + const editorAnswer = await askQuestion( + "\nWhich editor command to use? (code/cursor/code-insiders) [default: code]: ", + ) + if (editorAnswer.trim()) { + editorCommand = editorAnswer.trim() + } + } + + // skip prompt if auto-yes + const answer = autoYes ? "y" : await askQuestion("\nDo you wish to continue? (y/n): ") + + if (answer.toLowerCase() !== "y") { + console.log("Installation cancelled.") + rl.close() + process.exit(0) + } + + console.log(`\nProceeding with installation using '${editorCommand}' command...`) + + try { + execSync(`${editorCommand} --uninstall-extension ${extensionId}`, { stdio: "inherit" }) + } catch (e) { + console.log("Extension not installed, skipping uninstall step") + } + + if (!fs.existsSync(vsixFileName)) { + console.error(`\n❌ VSIX file not found: ${vsixFileName}`) + console.error("Make sure the build completed successfully") + rl.close() + process.exit(1) + } + + execSync(`${editorCommand} --install-extension ${vsixFileName}`, { stdio: "inherit" }) + + console.log(`\n✅ Successfully installed extension from ${vsixFileName}`) + console.log("\n⚠️ IMPORTANT: You need to restart VS Code for the changes to take effect.") + console.log(" Please close and reopen VS Code to use the updated extension.\n") + + rl.close() + } catch (error) { + console.error("\n❌ Failed to install extension:", error.message) + rl.close() + process.exit(1) + } +} + +main() diff --git a/src/api/providers/fetchers/openrouter.ts b/src/api/providers/fetchers/openrouter.ts index a98484ba0e4e..027f8c54fbfd 100644 --- a/src/api/providers/fetchers/openrouter.ts +++ b/src/api/providers/fetchers/openrouter.ts @@ -190,10 +190,8 @@ export const parseOpenRouterModel = ({ const supportsPromptCache = typeof cacheWritesPrice !== "undefined" && typeof cacheReadsPrice !== "undefined" - const useMaxTokens = OPEN_ROUTER_REASONING_BUDGET_MODELS.has(id) || id.startsWith("anthropic/") - const modelInfo: ModelInfo = { - maxTokens: useMaxTokens ? maxTokens || 0 : 0, + maxTokens: maxTokens || Math.ceil(model.context_length * 0.2), contextWindow: model.context_length, supportsImages: modality?.includes("image") ?? false, supportsPromptCache, diff --git a/src/core/assistant-message/presentAssistantMessage.ts b/src/core/assistant-message/presentAssistantMessage.ts index 21c973ab5076..ee3fa148b415 100644 --- a/src/core/assistant-message/presentAssistantMessage.ts +++ b/src/core/assistant-message/presentAssistantMessage.ts @@ -26,6 +26,7 @@ import { attemptCompletionTool } from "../tools/attemptCompletionTool" import { newTaskTool } from "../tools/newTaskTool" import { checkpointSave } from "../checkpoints" +import { updateTodoListTool } from "../tools/updateTodoListTool" import { formatResponse } from "../prompts/responses" import { validateToolUse } from "../tools/validateToolUse" @@ -205,6 +206,8 @@ export async function presentAssistantMessage(cline: Task) { return `[${block.name} to '${block.params.mode_slug}'${block.params.reason ? ` because: ${block.params.reason}` : ""}]` case "codebase_search": // Add case for the new tool return `[${block.name} for '${block.params.query}']` + case "update_todo_list": + return `[${block.name}]` case "new_task": { const mode = block.params.mode ?? defaultModeSlug const message = block.params.message ?? "(no message)" @@ -410,6 +413,9 @@ export async function presentAssistantMessage(cline: Task) { case "write_to_file": await writeToFileTool(cline, block, askApproval, handleError, pushToolResult, removeClosingTag) break + case "update_todo_list": + await updateTodoListTool(cline, block, askApproval, handleError, pushToolResult, removeClosingTag) + break case "apply_diff": { // Get the provider and state to check experiment settings const provider = cline.providerRef.deref() diff --git a/src/core/config/ContextProxy.ts b/src/core/config/ContextProxy.ts index c4324fbb1368..5535cd2ff4c9 100644 --- a/src/core/config/ContextProxy.ts +++ b/src/core/config/ContextProxy.ts @@ -147,6 +147,23 @@ export class ContextProxy { : this.originalContext.secrets.store(key, value) } + /** + * Refresh secrets from storage and update cache + * This is useful when you need to ensure the cache has the latest values + */ + async refreshSecrets(): Promise { + const promises = SECRET_STATE_KEYS.map(async (key) => { + try { + this.secretCache[key] = await this.originalContext.secrets.get(key) + } catch (error) { + logger.error( + `Error refreshing secret ${key}: ${error instanceof Error ? error.message : String(error)}`, + ) + } + }) + await Promise.all(promises) + } + private getAllSecretState(): SecretState { return Object.fromEntries(SECRET_STATE_KEYS.map((key) => [key, this.getSecret(key)])) } diff --git a/src/core/config/CustomModesManager.ts b/src/core/config/CustomModesManager.ts index b96293ee49e3..9f29185eba6f 100644 --- a/src/core/config/CustomModesManager.ts +++ b/src/core/config/CustomModesManager.ts @@ -5,10 +5,11 @@ import * as fs from "fs/promises" import * as yaml from "yaml" import stripBom from "strip-bom" -import { type ModeConfig, customModesSettingsSchema } from "@roo-code/types" +import { type ModeConfig, type PromptComponent, customModesSettingsSchema, modeConfigSchema } from "@roo-code/types" import { fileExistsAtPath } from "../../utils/fs" import { getWorkspacePath } from "../../utils/path" +import { getGlobalRooDirectory } from "../../services/roo-config" import { logger } from "../../utils/logging" import { GlobalFileNames } from "../../shared/globalFileNames" import { ensureSettingsDirectoryExists } from "../../utils/globalContext" @@ -16,6 +17,31 @@ import { t } from "../../i18n" const ROOMODES_FILENAME = ".roomodes" +// Type definitions for import/export functionality +interface RuleFile { + relativePath: string + content: string +} + +interface ExportedModeConfig extends ModeConfig { + rulesFiles?: RuleFile[] +} + +interface ImportData { + customModes: ExportedModeConfig[] +} + +interface ExportResult { + success: boolean + yaml?: string + error?: string +} + +interface ImportResult { + success: boolean + error?: string +} + export class CustomModesManager { private static readonly cacheTTL = 10_000 @@ -501,6 +527,383 @@ export class CustomModesManager { } } + /** + * Checks if a mode has associated rules files in the .roo/rules-{slug}/ directory + * @param slug - The mode identifier to check + * @returns True if the mode has rules files with content, false otherwise + */ + public async checkRulesDirectoryHasContent(slug: string): Promise { + try { + // Get workspace path + const workspacePath = getWorkspacePath() + if (!workspacePath) { + return false + } + + // Check if .roomodes file exists and contains this mode + // This ensures we can only consolidate rules for modes that have been customized + const roomodesPath = path.join(workspacePath, ROOMODES_FILENAME) + try { + const roomodesExists = await fileExistsAtPath(roomodesPath) + if (roomodesExists) { + const roomodesContent = await fs.readFile(roomodesPath, "utf-8") + const roomodesData = yaml.parse(roomodesContent) + const roomodesModes = roomodesData?.customModes || [] + + // Check if this specific mode exists in .roomodes + const modeInRoomodes = roomodesModes.find((m: any) => m.slug === slug) + if (!modeInRoomodes) { + return false // Mode not customized in .roomodes, cannot consolidate + } + } else { + // If no .roomodes file exists, check if it's in global custom modes + const allModes = await this.getCustomModes() + const mode = allModes.find((m) => m.slug === slug) + + if (!mode) { + return false // Not a custom mode, cannot consolidate + } + } + } catch (error) { + // If we can't read .roomodes, fall back to checking custom modes + const allModes = await this.getCustomModes() + const mode = allModes.find((m) => m.slug === slug) + + if (!mode) { + return false // Not a custom mode, cannot consolidate + } + } + + // Check for .roo/rules-{slug}/ directory + const modeRulesDir = path.join(workspacePath, ".roo", `rules-${slug}`) + + try { + const stats = await fs.stat(modeRulesDir) + if (!stats.isDirectory()) { + return false + } + } catch (error) { + return false + } + + // Check if directory has any content files + try { + const entries = await fs.readdir(modeRulesDir, { withFileTypes: true }) + + for (const entry of entries) { + if (entry.isFile()) { + // Use path.join with modeRulesDir and entry.name for compatibility + const filePath = path.join(modeRulesDir, entry.name) + const content = await fs.readFile(filePath, "utf-8") + if (content.trim()) { + return true // Found at least one file with content + } + } + } + + return false // No files with content found + } catch (error) { + return false + } + } catch (error) { + logger.error("Failed to check rules directory for mode", { + slug, + error: error instanceof Error ? error.message : String(error), + }) + return false + } + } + + /** + * Exports a mode configuration with its associated rules files into a shareable YAML format + * @param slug - The mode identifier to export + * @param customPrompts - Optional custom prompts to merge into the export + * @returns Success status with YAML content or error message + */ + public async exportModeWithRules(slug: string, customPrompts?: PromptComponent): Promise { + try { + // Import modes from shared to check built-in modes + const { modes: builtInModes } = await import("../../shared/modes") + + // Get all current modes + const allModes = await this.getCustomModes() + let mode = allModes.find((m) => m.slug === slug) + + // If mode not found in custom modes, check if it's a built-in mode that has been customized + if (!mode) { + const workspacePath = getWorkspacePath() + if (!workspacePath) { + return { success: false, error: "No workspace found" } + } + + const roomodesPath = path.join(workspacePath, ROOMODES_FILENAME) + try { + const roomodesExists = await fileExistsAtPath(roomodesPath) + if (roomodesExists) { + const roomodesContent = await fs.readFile(roomodesPath, "utf-8") + const roomodesData = yaml.parse(roomodesContent) + const roomodesModes = roomodesData?.customModes || [] + + // Find the mode in .roomodes + mode = roomodesModes.find((m: any) => m.slug === slug) + } + } catch (error) { + // Continue to check built-in modes + } + + // If still not found, check if it's a built-in mode + if (!mode) { + const builtInMode = builtInModes.find((m) => m.slug === slug) + if (builtInMode) { + // Use the built-in mode as the base + mode = { ...builtInMode } + } else { + return { success: false, error: "Mode not found" } + } + } + } + + // Get workspace path + const workspacePath = getWorkspacePath() + if (!workspacePath) { + return { success: false, error: "No workspace found" } + } + + // Check for .roo/rules-{slug}/ directory + const modeRulesDir = path.join(workspacePath, ".roo", `rules-${slug}`) + + let rulesFiles: RuleFile[] = [] + try { + const stats = await fs.stat(modeRulesDir) + if (stats.isDirectory()) { + // Extract content specific to this mode by looking for the mode-specific rules + const entries = await fs.readdir(modeRulesDir, { withFileTypes: true }) + + for (const entry of entries) { + if (entry.isFile()) { + // Use path.join with modeRulesDir and entry.name for compatibility + const filePath = path.join(modeRulesDir, entry.name) + const content = await fs.readFile(filePath, "utf-8") + if (content.trim()) { + // Calculate relative path from .roo directory + const relativePath = path.relative(path.join(workspacePath, ".roo"), filePath) + rulesFiles.push({ relativePath, content: content.trim() }) + } + } + } + } + } catch (error) { + // Directory doesn't exist, which is fine - mode might not have rules + } + + // Create an export mode with rules files preserved + const exportMode: ExportedModeConfig = { + ...mode, + // Remove source property for export + source: "project" as const, + } + + // Merge custom prompts if provided + if (customPrompts) { + if (customPrompts.roleDefinition) exportMode.roleDefinition = customPrompts.roleDefinition + if (customPrompts.description) exportMode.description = customPrompts.description + if (customPrompts.whenToUse) exportMode.whenToUse = customPrompts.whenToUse + if (customPrompts.customInstructions) exportMode.customInstructions = customPrompts.customInstructions + } + + // Add rules files if any exist + if (rulesFiles.length > 0) { + exportMode.rulesFiles = rulesFiles + } + + // Generate YAML + const exportData = { + customModes: [exportMode], + } + + const yamlContent = yaml.stringify(exportData) + + return { success: true, yaml: yamlContent } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + logger.error("Failed to export mode with rules", { slug, error: errorMessage }) + return { success: false, error: errorMessage } + } + } + + /** + * Imports modes from YAML content, including their associated rules files + * @param yamlContent - The YAML content containing mode configurations + * @param source - Target level for import: "global" (all projects) or "project" (current workspace only) + * @returns Success status with optional error message + */ + public async importModeWithRules( + yamlContent: string, + source: "global" | "project" = "project", + ): Promise { + try { + // Parse the YAML content with proper type validation + let importData: ImportData + try { + const parsed = yaml.parse(yamlContent) + + // Validate the structure + if (!parsed?.customModes || !Array.isArray(parsed.customModes) || parsed.customModes.length === 0) { + return { success: false, error: "Invalid import format: Expected 'customModes' array in YAML" } + } + + importData = parsed as ImportData + } catch (parseError) { + return { + success: false, + error: `Invalid YAML format: ${parseError instanceof Error ? parseError.message : "Failed to parse YAML"}`, + } + } + + // Check workspace availability early if importing at project level + if (source === "project") { + const workspacePath = getWorkspacePath() + if (!workspacePath) { + return { success: false, error: "No workspace found" } + } + } + + // Process each mode in the import + for (const importMode of importData.customModes) { + const { rulesFiles, ...modeConfig } = importMode + + // Validate the mode configuration + const validationResult = modeConfigSchema.safeParse(modeConfig) + if (!validationResult.success) { + logger.error(`Invalid mode configuration for ${modeConfig.slug}`, { + errors: validationResult.error.errors, + }) + return { + success: false, + error: `Invalid mode configuration for ${modeConfig.slug}: ${validationResult.error.errors.map((e) => e.message).join(", ")}`, + } + } + + // Check for existing mode conflicts + const existingModes = await this.getCustomModes() + const existingMode = existingModes.find((m) => m.slug === importMode.slug) + if (existingMode) { + logger.info(`Overwriting existing mode: ${importMode.slug}`) + } + + // Import the mode configuration with the specified source + await this.updateCustomMode(importMode.slug, { + ...modeConfig, + source: source, // Use the provided source parameter + }) + + // Handle project-level imports + if (source === "project") { + const workspacePath = getWorkspacePath() + + // Always remove the existing rules folder for this mode if it exists + // This ensures that if the imported mode has no rules, the folder is cleaned up + const rulesFolderPath = path.join(workspacePath, ".roo", `rules-${importMode.slug}`) + try { + await fs.rm(rulesFolderPath, { recursive: true, force: true }) + logger.info(`Removed existing rules folder for mode ${importMode.slug}`) + } catch (error) { + // It's okay if the folder doesn't exist + logger.debug(`No existing rules folder to remove for mode ${importMode.slug}`) + } + + // Only create new rules files if they exist in the import + if (rulesFiles && Array.isArray(rulesFiles) && rulesFiles.length > 0) { + // Import the new rules files with path validation + for (const ruleFile of rulesFiles) { + if (ruleFile.relativePath && ruleFile.content) { + // Validate the relative path to prevent path traversal attacks + const normalizedRelativePath = path.normalize(ruleFile.relativePath) + + // Ensure the path doesn't contain traversal sequences + if (normalizedRelativePath.includes("..") || path.isAbsolute(normalizedRelativePath)) { + logger.error(`Invalid file path detected: ${ruleFile.relativePath}`) + continue // Skip this file but continue with others + } + + const targetPath = path.join(workspacePath, ".roo", normalizedRelativePath) + const normalizedTargetPath = path.normalize(targetPath) + const expectedBasePath = path.normalize(path.join(workspacePath, ".roo")) + + // Ensure the resolved path stays within the .roo directory + if (!normalizedTargetPath.startsWith(expectedBasePath)) { + logger.error(`Path traversal attempt detected: ${ruleFile.relativePath}`) + continue // Skip this file but continue with others + } + + // Ensure directory exists + const targetDir = path.dirname(targetPath) + await fs.mkdir(targetDir, { recursive: true }) + + // Write the file + await fs.writeFile(targetPath, ruleFile.content, "utf-8") + } + } + } + } else if (source === "global" && rulesFiles && Array.isArray(rulesFiles)) { + // For global imports, preserve the rules files structure in the global .roo directory + const globalRooDir = getGlobalRooDirectory() + + // Always remove the existing rules folder for this mode if it exists + // This ensures that if the imported mode has no rules, the folder is cleaned up + const rulesFolderPath = path.join(globalRooDir, `rules-${importMode.slug}`) + try { + await fs.rm(rulesFolderPath, { recursive: true, force: true }) + logger.info(`Removed existing global rules folder for mode ${importMode.slug}`) + } catch (error) { + // It's okay if the folder doesn't exist + logger.debug(`No existing global rules folder to remove for mode ${importMode.slug}`) + } + + // Import the new rules files with path validation + for (const ruleFile of rulesFiles) { + if (ruleFile.relativePath && ruleFile.content) { + // Validate the relative path to prevent path traversal attacks + const normalizedRelativePath = path.normalize(ruleFile.relativePath) + + // Ensure the path doesn't contain traversal sequences + if (normalizedRelativePath.includes("..") || path.isAbsolute(normalizedRelativePath)) { + logger.error(`Invalid file path detected: ${ruleFile.relativePath}`) + continue // Skip this file but continue with others + } + + const targetPath = path.join(globalRooDir, normalizedRelativePath) + const normalizedTargetPath = path.normalize(targetPath) + const expectedBasePath = path.normalize(globalRooDir) + + // Ensure the resolved path stays within the global .roo directory + if (!normalizedTargetPath.startsWith(expectedBasePath)) { + logger.error(`Path traversal attempt detected: ${ruleFile.relativePath}`) + continue // Skip this file but continue with others + } + + // Ensure directory exists + const targetDir = path.dirname(targetPath) + await fs.mkdir(targetDir, { recursive: true }) + + // Write the file + await fs.writeFile(targetPath, ruleFile.content, "utf-8") + } + } + } + } + + // Refresh the modes after import + await this.refreshMergedState() + + return { success: true } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + logger.error("Failed to import mode with rules", { error: errorMessage }) + return { success: false, error: errorMessage } + } + } + private clearCache(): void { this.cachedModes = null this.cachedAt = 0 diff --git a/src/core/config/__tests__/CustomModesManager.spec.ts b/src/core/config/__tests__/CustomModesManager.spec.ts index 2af801b6460f..c325a27f7516 100644 --- a/src/core/config/__tests__/CustomModesManager.spec.ts +++ b/src/core/config/__tests__/CustomModesManager.spec.ts @@ -27,7 +27,14 @@ vi.mock("vscode", () => ({ }, })) -vi.mock("fs/promises") +vi.mock("fs/promises", () => ({ + mkdir: vi.fn(), + readFile: vi.fn(), + writeFile: vi.fn(), + stat: vi.fn(), + readdir: vi.fn(), + rm: vi.fn(), +})) vi.mock("../../../utils/fs") vi.mock("../../../utils/path") @@ -41,7 +48,8 @@ describe("CustomModesManager", () => { // Use path.sep to ensure correct path separators for the current platform const mockStoragePath = `${path.sep}mock${path.sep}settings` const mockSettingsPath = path.join(mockStoragePath, "settings", GlobalFileNames.customModes) - const mockRoomodes = `${path.sep}mock${path.sep}workspace${path.sep}.roomodes` + const mockWorkspacePath = path.resolve("/mock/workspace") + const mockRoomodes = path.join(mockWorkspacePath, ".roomodes") beforeEach(() => { mockOnUpdate = vi.fn() @@ -57,14 +65,19 @@ describe("CustomModesManager", () => { }, } as unknown as vscode.ExtensionContext - mockWorkspaceFolders = [{ uri: { fsPath: "/mock/workspace" } }] + // mockWorkspacePath is now defined at the top level + mockWorkspaceFolders = [{ uri: { fsPath: mockWorkspacePath } }] ;(vscode.workspace as any).workspaceFolders = mockWorkspaceFolders ;(vscode.workspace.onDidSaveTextDocument as Mock).mockReturnValue({ dispose: vi.fn() }) - ;(getWorkspacePath as Mock).mockReturnValue("/mock/workspace") + ;(getWorkspacePath as Mock).mockReturnValue(mockWorkspacePath) ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { return path === mockSettingsPath || path === mockRoomodes }) ;(fs.mkdir as Mock).mockResolvedValue(undefined) + ;(fs.writeFile as Mock).mockResolvedValue(undefined) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([]) + ;(fs.rm as Mock).mockResolvedValue(undefined) ;(fs.readFile as Mock).mockImplementation(async (path: string) => { if (path === mockSettingsPath) { return yaml.stringify({ customModes: [] }) @@ -786,5 +799,777 @@ describe("CustomModesManager", () => { ], }) }) + + describe("importModeWithRules", () => { + it("should return error when YAML content is invalid", async () => { + const invalidYaml = "invalid yaml content" + + const result = await manager.importModeWithRules(invalidYaml) + + expect(result.success).toBe(false) + expect(result.error).toContain("Invalid import format") + }) + + it("should return error when no custom modes found in YAML", async () => { + const emptyYaml = yaml.stringify({ customModes: [] }) + + const result = await manager.importModeWithRules(emptyYaml) + + expect(result.success).toBe(false) + expect(result.error).toBe("Invalid import format: Expected 'customModes' array in YAML") + }) + + it("should return error when no workspace is available", async () => { + ;(getWorkspacePath as Mock).mockReturnValue(null) + const validYaml = yaml.stringify({ + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + }, + ], + }) + + const result = await manager.importModeWithRules(validYaml) + + expect(result.success).toBe(false) + expect(result.error).toBe("No workspace found") + }) + + it("should successfully import mode without rules files", async () => { + const importYaml = yaml.stringify({ + customModes: [ + { + slug: "imported-mode", + name: "Imported Mode", + roleDefinition: "Imported Role", + groups: ["read", "edit"], + }, + ], + }) + + let roomodesContent: any = null + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + if (path === mockRoomodes && roomodesContent) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.writeFile as Mock).mockImplementation(async (path: string, content: string) => { + if (path === mockRoomodes) { + roomodesContent = yaml.parse(content) + } + return Promise.resolve() + }) + + const result = await manager.importModeWithRules(importYaml) + + expect(result.success).toBe(true) + expect(fs.writeFile).toHaveBeenCalledWith( + expect.stringContaining(".roomodes"), + expect.stringContaining("imported-mode"), + "utf-8", + ) + }) + + it("should successfully import mode with rules files", async () => { + const importYaml = yaml.stringify({ + customModes: [ + { + slug: "imported-mode", + name: "Imported Mode", + roleDefinition: "Imported Role", + groups: ["read"], + rulesFiles: [ + { + relativePath: "rules-imported-mode/rule1.md", + content: "Rule 1 content", + }, + { + relativePath: "rules-imported-mode/subfolder/rule2.md", + content: "Rule 2 content", + }, + ], + }, + ], + }) + + let roomodesContent: any = null + let writtenFiles: Record = {} + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + if (path === mockRoomodes && roomodesContent) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.writeFile as Mock).mockImplementation(async (path: string, content: string) => { + if (path === mockRoomodes) { + roomodesContent = yaml.parse(content) + } else { + writtenFiles[path] = content + } + return Promise.resolve() + }) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + const result = await manager.importModeWithRules(importYaml) + + expect(result.success).toBe(true) + + // Verify mode was imported + expect(fs.writeFile).toHaveBeenCalledWith( + expect.stringContaining(".roomodes"), + expect.stringContaining("imported-mode"), + "utf-8", + ) + + // Verify rules files were created + expect(fs.mkdir).toHaveBeenCalledWith(expect.stringContaining("rules-imported-mode"), { + recursive: true, + }) + expect(fs.mkdir).toHaveBeenCalledWith( + expect.stringContaining(path.join("rules-imported-mode", "subfolder")), + { recursive: true }, + ) + + // Verify file contents + const rule1Path = Object.keys(writtenFiles).find((p) => p.includes("rule1.md")) + const rule2Path = Object.keys(writtenFiles).find((p) => p.includes("rule2.md")) + expect(writtenFiles[rule1Path!]).toBe("Rule 1 content") + expect(writtenFiles[rule2Path!]).toBe("Rule 2 content") + }) + + it("should import multiple modes at once", async () => { + const importYaml = yaml.stringify({ + customModes: [ + { + slug: "mode1", + name: "Mode 1", + roleDefinition: "Role 1", + groups: ["read"], + }, + { + slug: "mode2", + name: "Mode 2", + roleDefinition: "Role 2", + groups: ["edit"], + rulesFiles: [ + { + relativePath: "rules-mode2/rule.md", + content: "Mode 2 rules", + }, + ], + }, + ], + }) + + let roomodesContent: any = null + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + if (path === mockRoomodes && roomodesContent) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.writeFile as Mock).mockImplementation(async (path: string, content: string) => { + if (path === mockRoomodes) { + roomodesContent = yaml.parse(content) + } + return Promise.resolve() + }) + + const result = await manager.importModeWithRules(importYaml) + + expect(result.success).toBe(true) + expect(roomodesContent.customModes).toHaveLength(2) + expect(roomodesContent.customModes[0].slug).toBe("mode1") + expect(roomodesContent.customModes[1].slug).toBe("mode2") + }) + + it("should handle import errors gracefully", async () => { + const importYaml = yaml.stringify({ + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + rulesFiles: [ + { + relativePath: "rules-test-mode/rule.md", + content: "Rule content", + }, + ], + }, + ], + }) + + // Mock fs.readFile to work normally + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + if (path === mockRoomodes) { + throw new Error("File not found") + } + throw new Error("File not found") + }) + + // Mock fs.mkdir to fail when creating rules directory + ;(fs.mkdir as Mock).mockRejectedValue(new Error("Permission denied")) + + // Mock fs.writeFile to work normally for .roomodes but we won't get there + ;(fs.writeFile as Mock).mockResolvedValue(undefined) + + const result = await manager.importModeWithRules(importYaml) + + expect(result.success).toBe(false) + expect(result.error).toContain("Permission denied") + }) + + it("should prevent path traversal attacks in import", async () => { + const maliciousYaml = yaml.stringify({ + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + rulesFiles: [ + { + relativePath: "../../../etc/passwd", + content: "malicious content", + }, + { + relativePath: "rules-test-mode/../../../sensitive.txt", + content: "malicious content", + }, + { + relativePath: "/absolute/path/file.txt", + content: "malicious content", + }, + ], + }, + ], + }) + + let writtenFiles: string[] = [] + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + throw new Error("File not found") + }) + ;(fs.writeFile as Mock).mockImplementation(async (path: string) => { + writtenFiles.push(path) + return Promise.resolve() + }) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + const result = await manager.importModeWithRules(maliciousYaml) + + expect(result.success).toBe(true) + + // Verify that no files were written outside the .roo directory + const mockWorkspacePath = path.resolve("/mock/workspace") + const writtenRuleFiles = writtenFiles.filter((p) => !p.includes(".roomodes")) + writtenRuleFiles.forEach((filePath) => { + const normalizedPath = path.normalize(filePath) + const expectedBasePath = path.normalize(path.join(mockWorkspacePath, ".roo")) + expect(normalizedPath.startsWith(expectedBasePath)).toBe(true) + }) + + // Verify that malicious paths were not written + expect(writtenFiles.some((p) => p.includes("etc/passwd"))).toBe(false) + expect(writtenFiles.some((p) => p.includes("sensitive.txt"))).toBe(false) + expect(writtenFiles.some((p) => path.isAbsolute(p) && !p.startsWith(mockWorkspacePath))).toBe(false) + }) + + it("should handle malformed YAML gracefully", async () => { + const malformedYaml = ` + customModes: + - slug: test-mode + name: Test Mode + roleDefinition: Test Role + groups: [read + invalid yaml here + ` + + const result = await manager.importModeWithRules(malformedYaml) + + expect(result.success).toBe(false) + expect(result.error).toContain("Invalid YAML format") + }) + + it("should validate mode configuration during import", async () => { + const invalidModeYaml = yaml.stringify({ + customModes: [ + { + slug: "test-mode", + name: "", // Invalid: empty name + roleDefinition: "", // Invalid: empty role definition + groups: ["invalid-group"], // Invalid group + }, + ], + }) + + const result = await manager.importModeWithRules(invalidModeYaml) + + expect(result.success).toBe(false) + expect(result.error).toContain("Invalid mode configuration") + }) + + it("should remove existing rules folder when importing mode without rules", async () => { + const importYaml = yaml.stringify({ + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + // No rulesFiles property - this mode has no rules + }, + ], + }) + + let roomodesContent: any = null + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + if (path === mockRoomodes && roomodesContent) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.writeFile as Mock).mockImplementation(async (path: string, content: string) => { + if (path === mockRoomodes) { + roomodesContent = yaml.parse(content) + } + return Promise.resolve() + }) + ;(fs.rm as Mock).mockResolvedValue(undefined) + + const result = await manager.importModeWithRules(importYaml) + + expect(result.success).toBe(true) + + // Verify that fs.rm was called to remove the existing rules folder + expect(fs.rm).toHaveBeenCalledWith(expect.stringContaining(path.join(".roo", "rules-test-mode")), { + recursive: true, + force: true, + }) + + // Verify mode was imported + expect(fs.writeFile).toHaveBeenCalledWith( + expect.stringContaining(".roomodes"), + expect.stringContaining("test-mode"), + "utf-8", + ) + }) + + it("should remove existing rules folder and create new ones when importing mode with rules", async () => { + const importYaml = yaml.stringify({ + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + rulesFiles: [ + { + relativePath: "rules-test-mode/new-rule.md", + content: "New rule content", + }, + ], + }, + ], + }) + + let roomodesContent: any = null + let writtenFiles: Record = {} + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + if (path === mockRoomodes && roomodesContent) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.writeFile as Mock).mockImplementation(async (path: string, content: string) => { + if (path === mockRoomodes) { + roomodesContent = yaml.parse(content) + } else { + writtenFiles[path] = content + } + return Promise.resolve() + }) + ;(fs.rm as Mock).mockResolvedValue(undefined) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + const result = await manager.importModeWithRules(importYaml) + + expect(result.success).toBe(true) + + // Verify that fs.rm was called to remove the existing rules folder + expect(fs.rm).toHaveBeenCalledWith(expect.stringContaining(path.join(".roo", "rules-test-mode")), { + recursive: true, + force: true, + }) + + // Verify new rules files were created + expect(fs.mkdir).toHaveBeenCalledWith(expect.stringContaining("rules-test-mode"), { recursive: true }) + + // Verify file contents + const newRulePath = Object.keys(writtenFiles).find((p) => p.includes("new-rule.md")) + expect(writtenFiles[newRulePath!]).toBe("New rule content") + }) + }) + }) + + describe("checkRulesDirectoryHasContent", () => { + it("should return false when no workspace is available", async () => { + ;(getWorkspacePath as Mock).mockReturnValue(null) + + const result = await manager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(false) + }) + + it("should return false when mode is not in .roomodes file", async () => { + const roomodesContent = { customModes: [{ slug: "other-mode", name: "Other Mode" }] } + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + + const result = await manager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(false) + }) + + it("should return false when .roomodes doesn't exist and mode is not a custom mode", async () => { + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockSettingsPath + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + throw new Error("File not found") + }) + + const result = await manager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(false) + }) + + it("should return false when rules directory doesn't exist", async () => { + const roomodesContent = { customModes: [{ slug: "test-mode", name: "Test Mode" }] } + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockRejectedValue(new Error("Directory not found")) + + const result = await manager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(false) + }) + + it("should return false when rules directory is empty", async () => { + const roomodesContent = { customModes: [{ slug: "test-mode", name: "Test Mode" }] } + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([]) + + const result = await manager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(false) + }) + + it("should return true when rules directory has content files", async () => { + const roomodesContent = { customModes: [{ slug: "test-mode", name: "Test Mode" }] } + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + if (path.includes("rules-test-mode")) { + return "Some rule content" + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([ + { name: "rule1.md", isFile: () => true, parentPath: "/mock/workspace/.roo/rules-test-mode" }, + ]) + + const result = await manager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(true) + }) + + it("should work with global custom modes when .roomodes doesn't exist", async () => { + const settingsContent = { + customModes: [{ slug: "test-mode", name: "Test Mode", groups: ["read"], roleDefinition: "Test Role" }], + } + + // Create a fresh manager instance to avoid cache issues + const freshManager = new CustomModesManager(mockContext, mockOnUpdate) + + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockSettingsPath // .roomodes doesn't exist + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify(settingsContent) + } + if (path.includes("rules-test-mode")) { + return "Some rule content" + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([ + { name: "rule1.md", isFile: () => true, parentPath: "/mock/workspace/.roo/rules-test-mode" }, + ]) + + const result = await freshManager.checkRulesDirectoryHasContent("test-mode") + + expect(result).toBe(true) + }) + }) + + describe("exportModeWithRules", () => { + it("should return error when no workspace is available", async () => { + // Create a fresh manager instance to avoid cache issues + const freshManager = new CustomModesManager(mockContext, mockOnUpdate) + + // Mock no workspace folders + ;(vscode.workspace as any).workspaceFolders = [] + ;(getWorkspacePath as Mock).mockReturnValue(null) + ;(fileExistsAtPath as Mock).mockResolvedValue(false) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + throw new Error("File not found") + }) + + const result = await freshManager.exportModeWithRules("test-mode") + + expect(result.success).toBe(false) + expect(result.error).toBe("No workspace found") + }) + + it("should return error when mode is not found", async () => { + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + throw new Error("File not found") + }) + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockSettingsPath + }) + + const result = await manager.exportModeWithRules("test-mode") + + expect(result.success).toBe(false) + expect(result.error).toBe("Mode not found") + }) + + it("should successfully export mode without rules when rules directory doesn't exist", async () => { + const roomodesContent = { + customModes: [{ slug: "test-mode", name: "Test Mode", roleDefinition: "Test Role", groups: ["read"] }], + } + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockRejectedValue(new Error("Directory not found")) + + const result = await manager.exportModeWithRules("test-mode") + + expect(result.success).toBe(true) + expect(result.yaml).toContain("test-mode") + expect(result.yaml).toContain("Test Mode") + }) + + it("should successfully export mode without rules when no rule files are found", async () => { + const roomodesContent = { + customModes: [{ slug: "test-mode", name: "Test Mode", roleDefinition: "Test Role", groups: ["read"] }], + } + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([]) + + const result = await manager.exportModeWithRules("test-mode") + + expect(result.success).toBe(true) + expect(result.yaml).toContain("test-mode") + }) + + it("should successfully export mode with rules for a custom mode in .roomodes", async () => { + const roomodesContent = { + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + customInstructions: "Existing instructions", + }, + ], + } + + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + if (path.includes("rules-test-mode")) { + return "New rule content from files" + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([ + { name: "rule1.md", isFile: () => true, parentPath: "/mock/workspace/.roo/rules-test-mode" }, + ]) + + const result = await manager.exportModeWithRules("test-mode") + + expect(result.success).toBe(true) + expect(result.yaml).toContain("test-mode") + expect(result.yaml).toContain("Existing instructions") + expect(result.yaml).toContain("New rule content from files") + // Should NOT delete the rules directory + expect(fs.rm).not.toHaveBeenCalled() + }) + + it("should successfully export mode with rules for a built-in mode customized in .roomodes", async () => { + const roomodesContent = { + customModes: [ + { + slug: "code", + name: "Custom Code Mode", + roleDefinition: "Custom Role", + groups: ["read"], + }, + ], + } + + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + if (path.includes("rules-code")) { + return "Custom rules for code mode" + } + if (path === mockSettingsPath) { + return yaml.stringify({ customModes: [] }) + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([ + { name: "rule1.md", isFile: () => true, parentPath: "/mock/workspace/.roo/rules-code" }, + ]) + + const result = await manager.exportModeWithRules("code") + + expect(result.success).toBe(true) + expect(result.yaml).toContain("Custom Code Mode") + expect(result.yaml).toContain("Custom rules for code mode") + // Should NOT delete the rules directory + expect(fs.rm).not.toHaveBeenCalled() + }) + + it("should handle file read errors gracefully", async () => { + const roomodesContent = { + customModes: [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test Role", + groups: ["read"], + }, + ], + } + + ;(fileExistsAtPath as Mock).mockImplementation(async (path: string) => { + return path === mockRoomodes + }) + ;(fs.readFile as Mock).mockImplementation(async (path: string) => { + if (path === mockRoomodes) { + return yaml.stringify(roomodesContent) + } + if (path.includes("rules-test-mode")) { + throw new Error("Permission denied") + } + throw new Error("File not found") + }) + ;(fs.stat as Mock).mockResolvedValue({ isDirectory: () => true }) + ;(fs.readdir as Mock).mockResolvedValue([ + { name: "rule1.md", isFile: () => true, parentPath: "/mock/workspace/.roo/rules-test-mode" }, + ]) + + const result = await manager.exportModeWithRules("test-mode") + + // Should still succeed even if file read fails + expect(result.success).toBe(true) + expect(result.yaml).toContain("test-mode") + }) }) }) diff --git a/src/core/config/__tests__/importExport.spec.ts b/src/core/config/__tests__/importExport.spec.ts index 1f6bd5f28e77..052bfc77f892 100644 --- a/src/core/config/__tests__/importExport.spec.ts +++ b/src/core/config/__tests__/importExport.spec.ts @@ -595,5 +595,1024 @@ describe("importExport", () => { expect(vscode.Uri.file).toHaveBeenCalledWith(path.join("/mock/home", "Documents", "roo-code-settings.json")) }) + + describe("codebase indexing export", () => { + it("should export correct base URL for OpenAI Compatible provider", async () => { + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + const mockProviderProfiles = { + currentApiConfigName: "openai-compatible-provider", + apiConfigs: { + "openai-compatible-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-compatible-id", + // Remove OpenAI Compatible settings from provider profile + }, + "ollama-provider": { + apiProvider: "ollama" as ProviderName, + id: "ollama-id", + codebaseIndexOllamaBaseUrl: "http://localhost:11434", + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "http://localhost:11434", // Wrong URL from Ollama + // OpenAI Compatible settings are now stored directly in codebaseIndexConfig + codebaseIndexOpenAiCompatibleBaseUrl: "https://custom-openai-api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + } + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + expect(safeWriteJson).toHaveBeenCalledWith("/mock/path/roo-code-settings.json", { + providerProfiles: mockProviderProfiles, + globalSettings: mockGlobalSettings, + }) + }) + + it("should export model dimension for OpenAI Compatible provider", async () => { + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + const mockProviderProfiles = { + currentApiConfigName: "test-provider", + apiConfigs: { + "test-provider": { + apiProvider: "openai" as ProviderName, + id: "test-id", + // Remove OpenAI Compatible settings from provider profile + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "custom-embedding-model", + codebaseIndexEmbedderBaseUrl: "", + // OpenAI Compatible settings are now stored directly in codebaseIndexConfig + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 768, + }, + } + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + // Settings are now exported as-is from codebaseIndexConfig + expect( + exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleModelDimension, + ).toBe(768) + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleBaseUrl).toBe( + "https://api.example.com/v1", + ) + }) + + it("should not mix settings between different providers", async () => { + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + const mockProviderProfiles = { + currentApiConfigName: "openai-compatible-provider", + apiConfigs: { + "openai-compatible-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-compatible-id", + // Remove OpenAI Compatible settings from provider profile + }, + "ollama-provider": { + apiProvider: "ollama" as ProviderName, + id: "ollama-id", + codebaseIndexOllamaBaseUrl: "http://localhost:11434", + }, + "anthropic-provider": { + apiProvider: "anthropic" as ProviderName, + id: "anthropic-id", + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "http://localhost:11434", // Wrong URL from Ollama + // OpenAI Compatible settings are now stored directly in codebaseIndexConfig + codebaseIndexOpenAiCompatibleBaseUrl: "https://openai-compatible.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + } + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + // Settings are now exported as-is from codebaseIndexConfig + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleBaseUrl).toBe( + "https://openai-compatible.example.com/v1", + ) + expect( + exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleModelDimension, + ).toBe(1536) + // The generic embedder base URL is still there + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexEmbedderBaseUrl).toBe( + "http://localhost:11434", + ) + }) + + it("should handle missing provider-specific settings gracefully", async () => { + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + const mockProviderProfiles = { + currentApiConfigName: "incomplete-provider", + apiConfigs: { + "incomplete-provider": { + apiProvider: "openai" as ProviderName, + id: "incomplete-id", + // Missing codebaseIndexOpenAiCompatibleBaseUrl and dimension + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://fallback.example.com/v1", + }, + } + + // Mock getGlobalState to return undefined (no settings) + mockContextProxy.getGlobalState = vi.fn().mockReturnValue(undefined) + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Should not throw an error and should preserve original settings + expect(safeWriteJson).toHaveBeenCalledWith("/mock/path/roo-code-settings.json", { + providerProfiles: mockProviderProfiles, + globalSettings: mockGlobalSettings, // Should remain unchanged + }) + }) + + it("should maintain backward compatibility with existing exports", async () => { + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + const mockProviderProfiles = { + currentApiConfigName: "openai-provider", + apiConfigs: { + "openai-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-id", + // Regular OpenAI provider without OpenAI Compatible settings + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai" as const, // Not openai-compatible + codebaseIndexEmbedderModelId: "text-embedding-ada-002", + codebaseIndexEmbedderBaseUrl: "https://api.openai.com/v1", + }, + } + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Should not modify settings for non-openai-compatible providers + expect(safeWriteJson).toHaveBeenCalledWith("/mock/path/roo-code-settings.json", { + providerProfiles: mockProviderProfiles, + globalSettings: mockGlobalSettings, // Should remain unchanged + }) + }) + + it("should handle missing current provider gracefully", async () => { + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + const mockProviderProfiles = { + currentApiConfigName: "nonexistent-provider", + apiConfigs: { + "other-provider": { + apiProvider: "openai" as ProviderName, + id: "other-id", + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://fallback.example.com/v1", + }, + } + + // Mock getGlobalState to return undefined (no settings) + mockContextProxy.getGlobalState = vi.fn().mockReturnValue(undefined) + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Should not throw an error and should preserve original settings + expect(safeWriteJson).toHaveBeenCalledWith("/mock/path/roo-code-settings.json", { + providerProfiles: mockProviderProfiles, + globalSettings: mockGlobalSettings, // Should remain unchanged + }) + }) + }) + + describe("import with OpenAI Compatible codebase indexing settings", () => { + it("should properly import OpenAI Compatible settings in codebaseIndexConfig", async () => { + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + + const mockFileContent = JSON.stringify({ + providerProfiles: { + currentApiConfigName: "openai-compatible-provider", + apiConfigs: { + "openai-compatible-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-compatible-id", + // Provider-specific settings remain in provider profile + codebaseIndexOpenAiCompatibleBaseUrl: "https://old-url.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 512, + }, + }, + modeApiConfigs: {}, + }, + globalSettings: { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://imported-url.example.com/v1", + codebaseIndexEmbedderModelDimension: 1536, + // OpenAI Compatible settings are now stored directly here + codebaseIndexOpenAiCompatibleBaseUrl: "https://imported-url.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + }, + }) + + ;(fs.readFile as Mock).mockResolvedValue(mockFileContent) + + const previousProviderProfiles = { + currentApiConfigName: "default", + apiConfigs: { default: { apiProvider: "anthropic" as ProviderName, id: "default-id" } }, + } + + mockProviderSettingsManager.export.mockResolvedValue(previousProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { + name: "openai-compatible-provider", + id: "openai-compatible-id", + apiProvider: "openai" as ProviderName, + }, + { name: "default", id: "default-id", apiProvider: "anthropic" as ProviderName }, + ]) + + const result = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(result.success).toBe(true) + + // Verify that the global settings were imported correctly + expect(mockContextProxy.setValues).toHaveBeenCalledWith( + expect.objectContaining({ + codebaseIndexConfig: expect.objectContaining({ + codebaseIndexOpenAiCompatibleBaseUrl: "https://imported-url.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }), + }), + ) + + // Provider profiles are imported as-is + const importedProviderProfiles = mockProviderSettingsManager.import.mock.calls[0][0] + const importedProvider = importedProviderProfiles.apiConfigs["openai-compatible-provider"] + + // Provider still has its own settings (not modified by import) + expect(importedProvider.codebaseIndexOpenAiCompatibleBaseUrl).toBe("https://old-url.example.com/v1") + expect(importedProvider.codebaseIndexOpenAiCompatibleModelDimension).toBe(512) + }) + + it("should handle missing OpenAI Compatible settings gracefully during import", async () => { + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + + const mockFileContent = JSON.stringify({ + providerProfiles: { + currentApiConfigName: "openai-compatible-provider", + apiConfigs: { + "openai-compatible-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-compatible-id", + }, + }, + modeApiConfigs: {}, + }, + globalSettings: { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + // Missing base URL and model dimension + }, + }, + }) + + ;(fs.readFile as Mock).mockResolvedValue(mockFileContent) + + const previousProviderProfiles = { + currentApiConfigName: "default", + apiConfigs: { default: { apiProvider: "anthropic" as ProviderName, id: "default-id" } }, + } + + mockProviderSettingsManager.export.mockResolvedValue(previousProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { + name: "openai-compatible-provider", + id: "openai-compatible-id", + apiProvider: "openai" as ProviderName, + }, + ]) + + const result = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(result.success).toBe(true) + // Should not throw an error when settings are missing + }) + + it("should not modify provider settings for non-openai-compatible providers during import", async () => { + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + + const mockFileContent = JSON.stringify({ + providerProfiles: { + currentApiConfigName: "anthropic-provider", + apiConfigs: { + "anthropic-provider": { + apiProvider: "anthropic" as ProviderName, + id: "anthropic-id", + }, + }, + modeApiConfigs: {}, + }, + globalSettings: { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai" as const, // Not openai-compatible + codebaseIndexEmbedderModelId: "text-embedding-ada-002", + codebaseIndexEmbedderBaseUrl: "https://api.openai.com/v1", + codebaseIndexEmbedderModelDimension: 1536, + }, + }, + }) + + ;(fs.readFile as Mock).mockResolvedValue(mockFileContent) + + const previousProviderProfiles = { + currentApiConfigName: "default", + apiConfigs: { default: { apiProvider: "anthropic" as ProviderName, id: "default-id" } }, + } + + mockProviderSettingsManager.export.mockResolvedValue(previousProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { name: "anthropic-provider", id: "anthropic-id", apiProvider: "anthropic" as ProviderName }, + ]) + + const result = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(result.success).toBe(true) + + // Verify that the provider settings were not modified with OpenAI Compatible fields + const importedProviderProfiles = mockProviderSettingsManager.import.mock.calls[0][0] + const importedProvider = importedProviderProfiles.apiConfigs["anthropic-provider"] + + expect(importedProvider.codebaseIndexOpenAiCompatibleBaseUrl).toBeUndefined() + expect(importedProvider.codebaseIndexOpenAiCompatibleModelDimension).toBeUndefined() + }) + }) + + it("should preserve model dimension exactly in export/import roundtrip", async () => { + // This test specifically isolates the model dimension export/import roundtrip + // to catch the exact issue the user is experiencing + + const testModelDimension = 768 + + // Step 1: Set up a provider without OpenAI Compatible settings in profile + const mockProviderProfiles = { + currentApiConfigName: "test-openai-compatible", + apiConfigs: { + "test-openai-compatible": { + apiProvider: "openai" as ProviderName, + id: "test-id", + // Remove OpenAI Compatible settings from provider profile + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "custom-embedding-model", + codebaseIndexEmbedderBaseUrl: "https://api.example.com/v1", + codebaseIndexEmbedderModelDimension: testModelDimension, + // OpenAI Compatible settings are now stored directly in codebaseIndexConfig + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: testModelDimension, + }, + } + + // Step 2: Mock export operation + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/test-settings.json", + }) + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + // Step 3: Export settings + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Step 4: Verify the exported data includes the model dimension + expect(safeWriteJson).toHaveBeenCalledWith("/mock/path/test-settings.json", { + providerProfiles: mockProviderProfiles, + globalSettings: mockGlobalSettings, + }) + + // Step 5: Get the exported data for import test + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + const exportedFileContent = JSON.stringify(exportedData) + + // Step 6: Mock import operation + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/test-settings.json" }]) + ;(fs.readFile as Mock).mockResolvedValue(exportedFileContent) + + // Reset mocks for import + vi.clearAllMocks() + mockProviderSettingsManager.export.mockResolvedValue({ + currentApiConfigName: "default", + apiConfigs: { default: { apiProvider: "anthropic" as ProviderName, id: "default-id" } }, + }) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { name: "test-openai-compatible", id: "test-id", apiProvider: "openai" as ProviderName }, + ]) + + // Step 7: Import the settings back + const importResult = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + // Step 8: Verify import was successful + expect(importResult.success).toBe(true) + + // Step 9: Verify that the model dimension was preserved exactly in global settings + const importedGlobalSettings = mockContextProxy.setValues.mock.calls[0][0] + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleModelDimension).toBe( + testModelDimension, + ) + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleBaseUrl).toBe( + "https://api.example.com/v1", + ) + + // Step 10: Verify that the embedder settings were imported correctly + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexEmbedderModelDimension).toBe( + testModelDimension, + ) + }) + + it("should handle edge case model dimension values (0, null) correctly", async () => { + // Test with model dimension = 0 (which is falsy but valid) + const testModelDimension = 0 + + const mockProviderProfiles = { + currentApiConfigName: "test-openai-compatible", + apiConfigs: { + "test-openai-compatible": { + apiProvider: "openai" as ProviderName, + id: "test-id", + // Remove OpenAI Compatible settings from provider profile + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "custom-embedding-model", + codebaseIndexEmbedderBaseUrl: "https://api.example.com/v1", + // OpenAI Compatible settings are now stored directly in codebaseIndexConfig + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: testModelDimension, // 0 is a valid value + }, + } + + // Mock export operation + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/test-settings.json", + }) + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + // Export settings + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Verify the exported data includes the model dimension even when it's 0 + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleModelDimension).toBe(0) + + // Test import roundtrip + const exportedFileContent = JSON.stringify(exportedData) + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/test-settings.json" }]) + ;(fs.readFile as Mock).mockResolvedValue(exportedFileContent) + + // Reset mocks for import + vi.clearAllMocks() + mockProviderSettingsManager.export.mockResolvedValue({ + currentApiConfigName: "default", + apiConfigs: { default: { apiProvider: "anthropic" as ProviderName, id: "default-id" } }, + }) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { name: "test-openai-compatible", id: "test-id", apiProvider: "openai" as ProviderName }, + ]) + + // Import the settings back + const importResult = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(importResult.success).toBe(true) + + // Verify that model dimension 0 was preserved in global settings + const setValuesCall = mockContextProxy.setValues.mock.calls[0][0] + expect(setValuesCall.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleModelDimension).toBe(0) + }) + + it("should handle missing model dimension gracefully", async () => { + // Test when model dimension is undefined in global state + const mockProviderProfiles = { + currentApiConfigName: "test-openai-compatible", + apiConfigs: { + "test-openai-compatible": { + apiProvider: "openai" as ProviderName, + id: "test-id", + // Remove OpenAI Compatible settings from provider profile + }, + }, + modeApiConfigs: {}, + } + + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "custom-embedding-model", + codebaseIndexEmbedderBaseUrl: "https://api.example.com/v1", + }, + } + + // Mock getGlobalState to return undefined for model dimension + mockContextProxy.getGlobalState = vi.fn().mockImplementation((key: string) => { + if (key === "codebaseIndexOpenAiCompatibleBaseUrl") { + return "https://api.example.com/v1" + } + if (key === "codebaseIndexOpenAiCompatibleModelDimension") { + return undefined + } + return undefined + }) + + // Mock export operation + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/test-settings.json", + }) + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + // Export settings + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Verify the exported data does NOT include model dimension when it's undefined + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexEmbedderModelDimension).toBeUndefined() + }) + + it("should handle provider mismatch during import - BUG REPRODUCTION", async () => { + // This test reproduces the bug where model dimension is lost when importing + // settings where the current provider is different from the exported provider + + // Step 1: Create exported settings from "provider-a" with model dimension + const exportedSettings = { + providerProfiles: { + currentApiConfigName: "provider-a", + apiConfigs: { + "provider-a": { + apiProvider: "openai" as ProviderName, + id: "provider-a-id", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api-a.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + "provider-b": { + apiProvider: "anthropic" as ProviderName, + id: "provider-b-id", + }, + }, + modeApiConfigs: {}, + }, + globalSettings: { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://api-a.example.com/v1", + codebaseIndexEmbedderModelDimension: 1536, + }, + }, + } + + // Step 2: Set up import environment where current provider is "provider-b" (different!) + const currentProviderProfiles = { + currentApiConfigName: "provider-b", // Different from exported settings! + apiConfigs: { + "provider-b": { + apiProvider: "anthropic" as ProviderName, + id: "provider-b-id", + }, + }, + } + + // Step 3: Mock import operation + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + ;(fs.readFile as Mock).mockResolvedValue(JSON.stringify(exportedSettings)) + + mockProviderSettingsManager.export.mockResolvedValue(currentProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { name: "provider-a", id: "provider-a-id", apiProvider: "openai" as ProviderName }, + { name: "provider-b", id: "provider-b-id", apiProvider: "anthropic" as ProviderName }, + ]) + + // Step 4: Import the settings + const importResult = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(importResult.success).toBe(true) + + // Step 5: Check what was imported + const importedProviderProfiles = mockProviderSettingsManager.import.mock.calls[0][0] + + // The bug: provider-a should have its model dimension preserved, but it might be lost + // because the import logic only updates the CURRENT provider (provider-b) + const providerA = importedProviderProfiles.apiConfigs["provider-a"] + const providerB = importedProviderProfiles.apiConfigs["provider-b"] + + // This should pass but might fail due to the bug + expect(providerA.codebaseIndexOpenAiCompatibleModelDimension).toBe(1536) + expect(providerA.codebaseIndexOpenAiCompatibleBaseUrl).toBe("https://api-a.example.com/v1") + + // Provider B should not have OpenAI Compatible settings + expect(providerB.codebaseIndexOpenAiCompatibleModelDimension).toBeUndefined() + expect(providerB.codebaseIndexOpenAiCompatibleBaseUrl).toBeUndefined() + }) + + it("should NOT copy OpenAI Compatible settings to provider profiles - FIXED BEHAVIOR", async () => { + // This test verifies the FIXED behavior: OpenAI Compatible settings stay in global settings only + + const exportedSettings = { + providerProfiles: { + currentApiConfigName: "openai-compatible-provider", + apiConfigs: { + "openai-compatible-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-compatible-id", + // NO OpenAI Compatible settings here in the fixed version + }, + "anthropic-provider": { + apiProvider: "anthropic" as ProviderName, + id: "anthropic-id", + }, + }, + modeApiConfigs: {}, + }, + globalSettings: { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://new-url.example.com/v1", + codebaseIndexEmbedderModelDimension: 1536, + // OpenAI Compatible settings are stored here + codebaseIndexOpenAiCompatibleBaseUrl: "https://new-url.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + }, + } + + const currentProviderProfiles = { + currentApiConfigName: "anthropic-provider", + apiConfigs: { + "anthropic-provider": { + apiProvider: "anthropic" as ProviderName, + id: "anthropic-id", + }, + }, + } + + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + ;(fs.readFile as Mock).mockResolvedValue(JSON.stringify(exportedSettings)) + + mockProviderSettingsManager.export.mockResolvedValue(currentProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { + name: "openai-compatible-provider", + id: "openai-compatible-id", + apiProvider: "openai" as ProviderName, + }, + { name: "anthropic-provider", id: "anthropic-id", apiProvider: "anthropic" as ProviderName }, + ]) + + const importResult = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(importResult.success).toBe(true) + + // Verify OpenAI Compatible settings are imported to global settings + const importedGlobalSettings = mockContextProxy.setValues.mock.calls[0][0] + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleBaseUrl).toBe( + "https://new-url.example.com/v1", + ) + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleModelDimension).toBe(1536) + + // Verify provider profiles do NOT have OpenAI Compatible settings + const importedProviderProfiles = mockProviderSettingsManager.import.mock.calls[0][0] + const openaiCompatibleProvider = importedProviderProfiles.apiConfigs["openai-compatible-provider"] + const anthropicProvider = importedProviderProfiles.apiConfigs["anthropic-provider"] + + // Neither provider should have OpenAI Compatible settings + expect(openaiCompatibleProvider.codebaseIndexOpenAiCompatibleBaseUrl).toBeUndefined() + expect(openaiCompatibleProvider.codebaseIndexOpenAiCompatibleModelDimension).toBeUndefined() + expect(anthropicProvider.codebaseIndexOpenAiCompatibleBaseUrl).toBeUndefined() + expect(anthropicProvider.codebaseIndexOpenAiCompatibleModelDimension).toBeUndefined() + }) + + it("should keep OpenAI Compatible settings in global state only - FIXED BEHAVIOR", async () => { + // This test verifies that OpenAI Compatible settings remain in global state + // and are NOT copied to provider profiles + + const exportedSettings = { + providerProfiles: { + currentApiConfigName: "anthropic-provider", + apiConfigs: { + "anthropic-provider": { + apiProvider: "anthropic" as ProviderName, + id: "anthropic-id", + }, + "openai-compatible-provider": { + apiProvider: "openai" as ProviderName, + id: "openai-compatible-id", + // NO OpenAI Compatible settings in provider profiles + }, + }, + modeApiConfigs: {}, + }, + globalSettings: { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://updated.example.com/v1", + codebaseIndexEmbedderModelDimension: 1536, + // OpenAI Compatible settings are stored here + codebaseIndexOpenAiCompatibleBaseUrl: "https://updated.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + }, + } + + const currentProviderProfiles = { + currentApiConfigName: "default", + apiConfigs: { + default: { + apiProvider: "openai" as ProviderName, + id: "default-id", + }, + }, + } + + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + ;(fs.readFile as Mock).mockResolvedValue(JSON.stringify(exportedSettings)) + + mockProviderSettingsManager.export.mockResolvedValue(currentProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { name: "anthropic-provider", id: "anthropic-id", apiProvider: "anthropic" as ProviderName }, + { + name: "openai-compatible-provider", + id: "openai-compatible-id", + apiProvider: "openai" as ProviderName, + }, + { name: "default", id: "default-id", apiProvider: "openai" as ProviderName }, + ]) + + const importResult = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(importResult.success).toBe(true) + + // Verify OpenAI Compatible settings are imported to global settings + const importedGlobalSettings = mockContextProxy.setValues.mock.calls[0][0] + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleBaseUrl).toBe( + "https://updated.example.com/v1", + ) + expect(importedGlobalSettings.codebaseIndexConfig?.codebaseIndexOpenAiCompatibleModelDimension).toBe(1536) + + // Verify NO provider profiles have OpenAI Compatible settings + const importedProviderProfiles = mockProviderSettingsManager.import.mock.calls[0][0] + const anthropicProvider = importedProviderProfiles.apiConfigs["anthropic-provider"] + const openaiCompatibleProvider = importedProviderProfiles.apiConfigs["openai-compatible-provider"] + + // Neither provider should have OpenAI Compatible settings + expect(anthropicProvider.codebaseIndexOpenAiCompatibleBaseUrl).toBeUndefined() + expect(anthropicProvider.codebaseIndexOpenAiCompatibleModelDimension).toBeUndefined() + expect(openaiCompatibleProvider.codebaseIndexOpenAiCompatibleBaseUrl).toBeUndefined() + expect(openaiCompatibleProvider.codebaseIndexOpenAiCompatibleModelDimension).toBeUndefined() + }) + + it("should export OpenAI Compatible settings from global state when provider is openai-compatible", async () => { + // This test reproduces the bug where codebaseIndexEmbedderModelDimension is missing from exported JSON + // when the OpenAI Compatible settings are stored in global state via contextProxy + + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + // Set up provider profiles - note that the OpenAI Compatible provider does NOT have + // the codebaseIndexOpenAiCompatibleBaseUrl and codebaseIndexOpenAiCompatibleModelDimension + // fields in the provider profile itself + const mockProviderProfiles = { + currentApiConfigName: "openrouter-provider", // Current provider is OpenRouter + apiConfigs: { + "openrouter-provider": { + apiProvider: "openrouter" as ProviderName, + id: "openrouter-id", + // OpenRouter doesn't have OpenAI Compatible fields + }, + }, + modeApiConfigs: {}, + } + + // The global settings now include OpenAI Compatible settings directly in codebaseIndexConfig + const mockGlobalSettings = { + mode: "code", + codebaseIndexConfig: { + codebaseIndexEnabled: true, + codebaseIndexEmbedderProvider: "openai-compatible" as const, + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexEmbedderBaseUrl: "https://custom-api.example.com/v1", + codebaseIndexEmbedderModelDimension: 1536, + // OpenAI Compatible settings are now included directly + codebaseIndexOpenAiCompatibleBaseUrl: "https://custom-api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1536, + }, + } + + mockProviderSettingsManager.export.mockResolvedValue(mockProviderProfiles) + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Verify that the exported JSON contains the OpenAI Compatible settings + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + + // With the fix, these values are now properly exported + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleModelDimension).toBe( + 1536, + ) + expect(exportedData.globalSettings.codebaseIndexConfig.codebaseIndexOpenAiCompatibleBaseUrl).toBe( + "https://custom-api.example.com/v1", + ) + }) }) }) diff --git a/src/core/config/importExport.ts b/src/core/config/importExport.ts index 6ffbbf71366a..c3d6f9c2159f 100644 --- a/src/core/config/importExport.ts +++ b/src/core/config/importExport.ts @@ -68,6 +68,9 @@ export async function importSettingsFromPath( (globalSettings.customModes ?? []).map((mode) => customModesManager.updateCustomMode(mode.slug, mode)), ) + // OpenAI Compatible settings are now correctly stored in codebaseIndexConfig + // They will be imported automatically with the config - no special handling needed + await providerSettingsManager.import(providerProfiles) await contextProxy.setValues(globalSettings) @@ -161,10 +164,16 @@ export const exportSettings = async ({ providerSettingsManager, contextProxy }: return } + // OpenAI Compatible settings are now correctly stored in codebaseIndexConfig + // No workaround needed - they will be exported automatically with the config + const dirname = path.dirname(uri.fsPath) await fs.mkdir(dirname, { recursive: true }) await safeWriteJson(uri.fsPath, { providerProfiles, globalSettings }) - } catch (e) {} + } catch (e) { + console.error("Failed to export settings:", e) + // Don't re-throw - the UI will handle showing error messages + } } /** diff --git a/src/core/environment/__tests__/getEnvironmentDetails.spec.ts b/src/core/environment/__tests__/getEnvironmentDetails.spec.ts index 02423f8ebdab..a1b8691e7037 100644 --- a/src/core/environment/__tests__/getEnvironmentDetails.spec.ts +++ b/src/core/environment/__tests__/getEnvironmentDetails.spec.ts @@ -146,7 +146,6 @@ describe("getEnvironmentDetails", () => { expect(result).toContain("# VSCode Visible Files") expect(result).toContain("# VSCode Open Tabs") expect(result).toContain("# Current Time") - expect(result).toContain("# Current Context Size (Tokens)") expect(result).toContain("# Current Cost") expect(result).toContain("# Current Mode") expect(result).toContain("test-model") @@ -191,6 +190,19 @@ describe("getEnvironmentDetails", () => { expect(listFiles).not.toHaveBeenCalled() }) + it("should skip file listing when maxWorkspaceFiles is 0", async () => { + mockProvider.getState.mockResolvedValue({ + ...mockState, + maxWorkspaceFiles: 0, + }) + + const result = await getEnvironmentDetails(mockCline as Task, true) + + expect(listFiles).not.toHaveBeenCalled() + expect(result).toContain("Workspace files context disabled") + expect(formatResponse.formatFilesList).not.toHaveBeenCalled() + }) + it("should include recently modified files if any", async () => { ;(mockCline.fileContextTracker!.getAndClearRecentlyModifiedFiles as Mock).mockReturnValue([ "modified1.ts", @@ -301,25 +313,6 @@ describe("getEnvironmentDetails", () => { expect(mockInactiveTerminal.getCurrentWorkingDirectory).toHaveBeenCalled() }) - it("should include warning when file writing is not allowed", async () => { - ;(isToolAllowedForMode as Mock).mockReturnValue(false) - ;(getModeBySlug as Mock).mockImplementation((slug: string) => { - if (slug === "code") { - return { name: "💻 Code" } - } - - if (slug === defaultModeSlug) { - return { name: "Default Mode" } - } - - return null - }) - - const result = await getEnvironmentDetails(mockCline as Task) - - expect(result).toContain("NOTE: You are currently in '💻 Code' mode, which does not allow write operations") - }) - it("should include experiment-specific details when Power Steering is enabled", async () => { mockState.experiments = { [EXPERIMENT_IDS.POWER_STEERING]: true } ;(experiments.isEnabled as Mock).mockReturnValue(true) diff --git a/src/core/environment/getEnvironmentDetails.ts b/src/core/environment/getEnvironmentDetails.ts index 944eb9419088..8d4f157f4d6f 100644 --- a/src/core/environment/getEnvironmentDetails.ts +++ b/src/core/environment/getEnvironmentDetails.ts @@ -18,6 +18,7 @@ import { arePathsEqual } from "../../utils/path" import { formatResponse } from "../prompts/responses" import { Task } from "../task/Task" +import { formatReminderSection } from "./reminder" export async function getEnvironmentDetails(cline: Task, includeFileDetails: boolean = false) { let details = "" @@ -197,13 +198,8 @@ export async function getEnvironmentDetails(cline: Task, includeFileDetails: boo // Add context tokens information. const { contextTokens, totalCost } = getApiMetrics(cline.clineMessages) - const { id: modelId, info: modelInfo } = cline.api.getModel() - const contextWindow = modelInfo.contextWindow + const { id: modelId } = cline.api.getModel() - const contextPercentage = - contextTokens && contextWindow ? Math.round((contextTokens / contextWindow) * 100) : undefined - - details += `\n\n# Current Context Size (Tokens)\n${contextTokens ? `${contextTokens.toLocaleString()} (${contextPercentage}%)` : "(Not available)"}` details += `\n\n# Current Cost\n${totalCost !== null ? `$${totalCost.toFixed(2)}` : "(Not available)"}` // Add current mode and any mode-specific warnings. @@ -237,16 +233,6 @@ export async function getEnvironmentDetails(cline: Task, includeFileDetails: boo } } - // Add warning if not in code mode. - if ( - !isToolAllowedForMode("write_to_file", currentMode, customModes ?? [], { apply_diff: cline.diffEnabled }) && - !isToolAllowedForMode("apply_diff", currentMode, customModes ?? [], { apply_diff: cline.diffEnabled }) - ) { - const currentModeName = getModeBySlug(currentMode, customModes)?.name ?? currentMode - const defaultModeName = getModeBySlug(defaultModeSlug, customModes)?.name ?? defaultModeSlug - details += `\n\nNOTE: You are currently in '${currentModeName}' mode, which does not allow write operations. To write files, the user will need to switch to a mode that supports file writing, such as '${defaultModeName}' mode.` - } - if (includeFileDetails) { details += `\n\n# Current Workspace Directory (${cline.cwd.toPosix()}) Files\n` const isDesktop = arePathsEqual(cline.cwd, path.join(os.homedir(), "Desktop")) @@ -257,20 +243,27 @@ export async function getEnvironmentDetails(cline: Task, includeFileDetails: boo details += "(Desktop files not shown automatically. Use list_files to explore if needed.)" } else { const maxFiles = maxWorkspaceFiles ?? 200 - const [files, didHitLimit] = await listFiles(cline.cwd, true, maxFiles) - const { showRooIgnoredFiles = true } = state ?? {} - - const result = formatResponse.formatFilesList( - cline.cwd, - files, - didHitLimit, - cline.rooIgnoreController, - showRooIgnoredFiles, - ) - - details += result + + // Early return for limit of 0 + if (maxFiles === 0) { + details += "(Workspace files context disabled. Use list_files to explore if needed.)" + } else { + const [files, didHitLimit] = await listFiles(cline.cwd, true, maxFiles) + const { showRooIgnoredFiles = true } = state ?? {} + + const result = formatResponse.formatFilesList( + cline.cwd, + files, + didHitLimit, + cline.rooIgnoreController, + showRooIgnoredFiles, + ) + + details += result + } } } - return `\n${details.trim()}\n` + const reminderSection = formatReminderSection(cline.todoList) + return `\n${details.trim()}\n${reminderSection}\n` } diff --git a/src/core/environment/reminder.ts b/src/core/environment/reminder.ts new file mode 100644 index 000000000000..eb1b39dfb5a8 --- /dev/null +++ b/src/core/environment/reminder.ts @@ -0,0 +1,38 @@ +import { TodoItem, TodoStatus } from "@roo-code/types" + +/** + * Format the reminders section as a markdown block in English, with basic instructions. + */ +export function formatReminderSection(todoList?: TodoItem[]): string { + if (!todoList || todoList.length === 0) { + return "" + } + const statusMap: Record = { + pending: "Pending", + in_progress: "In Progress", + completed: "Completed", + } + const lines: string[] = [ + "====", + "", + "REMINDERS", + "", + "Below is your current list of reminders for this task. Keep them updated as you progress.", + "", + ] + + lines.push("| # | Content | Status |") + lines.push("|---|---------|--------|") + todoList.forEach((item, idx) => { + const escapedContent = item.content.replace(/\\/g, "\\\\").replace(/\|/g, "\\|") + lines.push(`| ${idx + 1} | ${escapedContent} | ${statusMap[item.status] || item.status} |`) + }) + lines.push("") + + lines.push( + "", + "IMPORTANT: When task status changes, remember to call the `update_todo_list` tool to update your progress.", + "", + ) + return lines.join("\n") +} diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap index 98ef98fcad8d..31b49ec68280 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap @@ -368,6 +368,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -483,9 +553,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap index e66530943278..33b6addf28d3 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap @@ -265,6 +265,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap index 7bed96ad2111..b2b0abfad4d9 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap @@ -417,6 +417,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -545,9 +615,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap index b3032babade1..0eecf0482ef0 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap @@ -417,6 +417,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -551,9 +621,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap index 66eeae3e7e63..731f4b29a7f1 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap @@ -373,6 +373,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -488,9 +558,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap index 98ef98fcad8d..31b49ec68280 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap @@ -368,6 +368,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -483,9 +553,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap index a525fba65603..fda93c1c77cb 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap @@ -421,6 +421,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -539,9 +609,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap index 98ef98fcad8d..31b49ec68280 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap @@ -368,6 +368,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -483,9 +553,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap index e6b1dc7a852c..8db4d0b37707 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap @@ -456,6 +456,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -571,9 +641,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap index 98ef98fcad8d..31b49ec68280 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap @@ -368,6 +368,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -483,9 +553,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap index 25cf8be0893d..44d5b58327fd 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap @@ -421,6 +421,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -539,9 +609,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap index b3032babade1..0eecf0482ef0 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap @@ -417,6 +417,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -551,9 +621,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap index 98ef98fcad8d..31b49ec68280 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap @@ -368,6 +368,76 @@ Example: +## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. + + # Tool Use Guidelines 1. In tags, assess what information you already have and what information you need to proceed with the task. @@ -483,9 +553,9 @@ Mode-specific Instructions: 4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it. -5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file. +5. Use the switch_mode tool to request that the user switch to another mode to implement the solution. -6. Use the switch_mode tool to request that the user switch to another mode to implement the solution. +**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.** Rules: # Rules from .clinerules-architect: diff --git a/src/core/prompts/instructions/create-mode.ts b/src/core/prompts/instructions/create-mode.ts index 47e998ff4c0c..ea99a6690118 100644 --- a/src/core/prompts/instructions/create-mode.ts +++ b/src/core/prompts/instructions/create-mode.ts @@ -26,6 +26,7 @@ If asked to create a project mode, create it in .roomodes in the workspace root. * groups: Array of allowed tool groups (can be empty). Each group can be specified either as a string (e.g., "edit" to allow editing any file) or with file restrictions (e.g., ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }] to only allow editing markdown files) - The following fields are optional but highly recommended: + * description: A short, human-readable description of what this mode does (5 words) * whenToUse: A clear description of when this mode should be selected and what types of tasks it's best suited for. This helps the Orchestrator mode make better decisions. * customInstructions: Additional instructions for how the mode should operate @@ -36,6 +37,7 @@ Both files should follow this structure (in YAML format): customModes: - slug: designer # Required: unique slug with lowercase letters, numbers, and hyphens name: Designer # Required: mode display name + description: UI/UX design systems expert # Optional but recommended: short description (5 words) roleDefinition: >- You are Roo, a UI/UX expert specializing in design systems and frontend development. Your expertise includes: - Creating and maintaining design systems @@ -43,8 +45,8 @@ customModes: - Working with CSS, HTML, and modern frontend frameworks - Ensuring consistent user experiences across platforms # Required: non-empty whenToUse: >- - Use this mode when creating or modifying UI components, implementing design systems, - or ensuring responsive web interfaces. This mode is especially effective with CSS, + Use this mode when creating or modifying UI components, implementing design systems, + or ensuring responsive web interfaces. This mode is especially effective with CSS, HTML, and modern frontend frameworks. # Optional but recommended groups: # Required: array of tool groups (can be empty) - read # Read files group (read_file, fetch_instructions, search_files, list_files, list_code_definition_names) diff --git a/src/core/prompts/sections/__tests__/custom-instructions.spec.ts b/src/core/prompts/sections/__tests__/custom-instructions.spec.ts index 111cefaf271e..9c8e0031432b 100644 --- a/src/core/prompts/sections/__tests__/custom-instructions.spec.ts +++ b/src/core/prompts/sections/__tests__/custom-instructions.spec.ts @@ -221,6 +221,106 @@ describe("loadRuleFiles", () => { expect(readFileMock).toHaveBeenCalledWith(expectedFile2Path, "utf-8") }) + it("should filter out cache files from .roo/rules/ directory", async () => { + // Simulate .roo/rules directory exists + statMock.mockResolvedValueOnce({ + isDirectory: vi.fn().mockReturnValue(true), + } as any) + + // Simulate listing files including cache files + readdirMock.mockResolvedValueOnce([ + { name: "rule1.txt", isFile: () => true, isSymbolicLink: () => false, parentPath: "/fake/path/.roo/rules" }, + { name: ".DS_Store", isFile: () => true, isSymbolicLink: () => false, parentPath: "/fake/path/.roo/rules" }, + { name: "Thumbs.db", isFile: () => true, isSymbolicLink: () => false, parentPath: "/fake/path/.roo/rules" }, + { name: "rule2.md", isFile: () => true, isSymbolicLink: () => false, parentPath: "/fake/path/.roo/rules" }, + { name: "cache.log", isFile: () => true, isSymbolicLink: () => false, parentPath: "/fake/path/.roo/rules" }, + { + name: "backup.bak", + isFile: () => true, + isSymbolicLink: () => false, + parentPath: "/fake/path/.roo/rules", + }, + { name: "temp.tmp", isFile: () => true, isSymbolicLink: () => false, parentPath: "/fake/path/.roo/rules" }, + { + name: "script.pyc", + isFile: () => true, + isSymbolicLink: () => false, + parentPath: "/fake/path/.roo/rules", + }, + ] as any) + + statMock.mockImplementation((path) => { + return Promise.resolve({ + isFile: vi.fn().mockReturnValue(true), + }) as any + }) + + readFileMock.mockImplementation((filePath: PathLike) => { + const pathStr = filePath.toString() + const normalizedPath = pathStr.replace(/\\/g, "/") + + // Only rule files should be read - cache files should be skipped + if (normalizedPath === "/fake/path/.roo/rules/rule1.txt") { + return Promise.resolve("rule 1 content") + } + if (normalizedPath === "/fake/path/.roo/rules/rule2.md") { + return Promise.resolve("rule 2 content") + } + + // Cache files should not be read due to filtering + // If they somehow are read, return recognizable content + if (normalizedPath === "/fake/path/.roo/rules/.DS_Store") { + return Promise.resolve("DS_STORE_BINARY_CONTENT") + } + if (normalizedPath === "/fake/path/.roo/rules/Thumbs.db") { + return Promise.resolve("THUMBS_DB_CONTENT") + } + if (normalizedPath === "/fake/path/.roo/rules/backup.bak") { + return Promise.resolve("BACKUP_CONTENT") + } + if (normalizedPath === "/fake/path/.roo/rules/cache.log") { + return Promise.resolve("LOG_CONTENT") + } + if (normalizedPath === "/fake/path/.roo/rules/temp.tmp") { + return Promise.resolve("TEMP_CONTENT") + } + if (normalizedPath === "/fake/path/.roo/rules/script.pyc") { + return Promise.resolve("PYTHON_BYTECODE") + } + + return Promise.reject({ code: "ENOENT" }) + }) + + const result = await loadRuleFiles("/fake/path") + + // Should contain rule files + expect(result).toContain("rule 1 content") + expect(result).toContain("rule 2 content") + + // Should NOT contain cache file content - they should be filtered out + expect(result).not.toContain("DS_STORE_BINARY_CONTENT") + expect(result).not.toContain("THUMBS_DB_CONTENT") + expect(result).not.toContain("BACKUP_CONTENT") + expect(result).not.toContain("LOG_CONTENT") + expect(result).not.toContain("TEMP_CONTENT") + expect(result).not.toContain("PYTHON_BYTECODE") + + // Verify cache files are not read at all + const expectedCacheFiles = [ + "/fake/path/.roo/rules/.DS_Store", + "/fake/path/.roo/rules/Thumbs.db", + "/fake/path/.roo/rules/backup.bak", + "/fake/path/.roo/rules/cache.log", + "/fake/path/.roo/rules/temp.tmp", + "/fake/path/.roo/rules/script.pyc", + ] + + for (const cacheFile of expectedCacheFiles) { + const expectedPath = process.platform === "win32" ? cacheFile.replace(/\//g, "\\") : cacheFile + expect(readFileMock).not.toHaveBeenCalledWith(expectedPath, "utf-8") + } + }) + it("should fall back to .roorules when .roo/rules/ is empty", async () => { // Simulate .roo/rules directory exists statMock.mockResolvedValueOnce({ diff --git a/src/core/prompts/sections/custom-instructions.ts b/src/core/prompts/sections/custom-instructions.ts index 0e1ddfd24fb9..3c8558a57f4a 100644 --- a/src/core/prompts/sections/custom-instructions.ts +++ b/src/core/prompts/sections/custom-instructions.ts @@ -123,6 +123,10 @@ async function readTextFilesFromDirectory(dirPath: string): Promise item !== null) } catch (err) { return [] @@ -297,3 +301,44 @@ The following additional instructions are provided by the user, and should be fo ${joinedSections}` : "" } + +/** + * Check if a file should be included in rule compilation. + * Excludes cache files and system files that shouldn't be processed as rules. + */ +function shouldIncludeRuleFile(filename: string): boolean { + const basename = path.basename(filename) + + const cachePatterns = [ + "*.DS_Store", + "*.bak", + "*.cache", + "*.crdownload", + "*.db", + "*.dmp", + "*.dump", + "*.eslintcache", + "*.lock", + "*.log", + "*.old", + "*.part", + "*.partial", + "*.pyc", + "*.pyo", + "*.stackdump", + "*.swo", + "*.swp", + "*.temp", + "*.tmp", + "Thumbs.db", + ] + + return !cachePatterns.some((pattern) => { + if (pattern.startsWith("*.")) { + const extension = pattern.slice(1) + return basename.endsWith(extension) + } else { + return basename === pattern + } + }) +} diff --git a/src/core/prompts/system.ts b/src/core/prompts/system.ts index 61fd9df81e19..be3b91f1466a 100644 --- a/src/core/prompts/system.ts +++ b/src/core/prompts/system.ts @@ -1,7 +1,7 @@ import * as vscode from "vscode" import * as os from "os" -import type { ModeConfig, PromptComponent, CustomModePrompts } from "@roo-code/types" +import type { ModeConfig, PromptComponent, CustomModePrompts, TodoItem } from "@roo-code/types" import { Mode, modes, defaultModeSlug, getModeBySlug, getGroupName, getModeSelection } from "../../shared/modes" import { DiffStrategy } from "../../shared/tools" @@ -44,6 +44,7 @@ async function generatePrompt( rooIgnoreInstructions?: string, partialReadsEnabled?: boolean, settings?: Record, + todoList?: TodoItem[], ): Promise { if (!context) { throw new Error("Extension context is required for generating system prompt") @@ -122,6 +123,7 @@ export const SYSTEM_PROMPT = async ( rooIgnoreInstructions?: string, partialReadsEnabled?: boolean, settings?: Record, + todoList?: TodoItem[], ): Promise => { if (!context) { throw new Error("Extension context is required for generating system prompt") @@ -195,5 +197,6 @@ ${customInstructions}` rooIgnoreInstructions, partialReadsEnabled, settings, + todoList, ) } diff --git a/src/core/prompts/tools/index.ts b/src/core/prompts/tools/index.ts index 736c716a2797..3fd5a636a4f7 100644 --- a/src/core/prompts/tools/index.ts +++ b/src/core/prompts/tools/index.ts @@ -22,6 +22,7 @@ import { getAccessMcpResourceDescription } from "./access-mcp-resource" import { getSwitchModeDescription } from "./switch-mode" import { getNewTaskDescription } from "./new-task" import { getCodebaseSearchDescription } from "./codebase-search" +import { getUpdateTodoListDescription } from "./update-todo-list" import { CodeIndexManager } from "../../../services/code-index/manager" // Map of tool names to their description functions @@ -45,6 +46,7 @@ const toolDescriptionMap: Record string | undefined> search_and_replace: (args) => getSearchAndReplaceDescription(args), apply_diff: (args) => args.diffStrategy ? args.diffStrategy.getToolDescription({ cwd: args.cwd, toolOptions: args.toolOptions }) : "", + update_todo_list: (args) => getUpdateTodoListDescription(args), } export function getToolDescriptionsForMode( diff --git a/src/core/prompts/tools/update-todo-list.ts b/src/core/prompts/tools/update-todo-list.ts new file mode 100644 index 000000000000..528d5a1b512e --- /dev/null +++ b/src/core/prompts/tools/update-todo-list.ts @@ -0,0 +1,76 @@ +import { ToolArgs } from "./types" + +/** + * Get the description for the update_todo_list tool. + */ +export function getUpdateTodoListDescription(args?: ToolArgs): string { + return `## update_todo_list + +**Description:** +Replace the entire TODO list with an updated checklist reflecting the current state. Always provide the full list; the system will overwrite the previous one. This tool is designed for step-by-step task tracking, allowing you to confirm completion of each step before updating, update multiple task statuses at once (e.g., mark one as completed and start the next), and dynamically add new todos discovered during long or complex tasks. + +**Checklist Format:** +- Use a single-level markdown checklist (no nesting or subtasks). +- List todos in the intended execution order. +- Status options: + - [ ] Task description (pending) + - [x] Task description (completed) + - [-] Task description (in progress) + +**Status Rules:** +- [ ] = pending (not started) +- [x] = completed (fully finished, no unresolved issues) +- [-] = in_progress (currently being worked on) + +**Core Principles:** +- Before updating, always confirm which todos have been completed since the last update. +- You may update multiple statuses in a single update (e.g., mark the previous as completed and the next as in progress). +- When a new actionable item is discovered during a long or complex task, add it to the todo list immediately. +- Do not remove any unfinished todos unless explicitly instructed. +- Always retain all unfinished tasks, updating their status as needed. +- Only mark a task as completed when it is fully accomplished (no partials, no unresolved dependencies). +- If a task is blocked, keep it as in_progress and add a new todo describing what needs to be resolved. +- Remove tasks only if they are no longer relevant or if the user requests deletion. + +**Usage Example:** + + +[x] Analyze requirements +[x] Design architecture +[-] Implement core logic +[ ] Write tests +[ ] Update documentation + + + +*After completing "Implement core logic" and starting "Write tests":* + + +[x] Analyze requirements +[x] Design architecture +[x] Implement core logic +[-] Write tests +[ ] Update documentation +[ ] Add performance benchmarks + + + +**When to Use:** +- The task involves multiple steps or requires ongoing tracking. +- You need to update the status of several todos at once. +- New actionable items are discovered during task execution. +- The user requests a todo list or provides multiple tasks. +- The task is complex and benefits from clear, stepwise progress tracking. + +**When NOT to Use:** +- There is only a single, trivial task. +- The task can be completed in one or two simple steps. +- The request is purely conversational or informational. + +**Task Management Guidelines:** +- Mark task as completed immediately after all work of the current task is done. +- Start the next task by marking it as in_progress. +- Add new todos as soon as they are identified. +- Use clear, descriptive task names. +` +} diff --git a/src/core/prompts/utilities/mermaid.ts b/src/core/prompts/utilities/mermaid.ts new file mode 100644 index 000000000000..9c90ffb84ba2 --- /dev/null +++ b/src/core/prompts/utilities/mermaid.ts @@ -0,0 +1,31 @@ +/** + * Prompts for Mermaid diagram-related tasks + */ + +/** + * Generate a prompt for fixing invalid Mermaid diagram syntax + * @param error - The error message from Mermaid parser + * @param invalidCode - The invalid Mermaid code that needs fixing + * @returns The formatted prompt for the AI to fix the Mermaid syntax + */ +export const mermaidFixPrompt = (error: string, invalidCode: string): string => { + return `You are a Mermaid diagram syntax expert. Fix the following invalid Mermaid diagram syntax and return ONLY the corrected Mermaid code without any explanations or markdown formatting. + +Error: ${error} + +Invalid Mermaid code: +\`\`\` +${invalidCode} +\`\`\` + +Requirements: +1. Return ONLY the corrected Mermaid syntax +2. Do not include markdown code blocks or explanations +3. Ensure the syntax is valid according to Mermaid specifications +4. Enclose labels and edge label within double quotes even when you do not think it necessary to ensure the syntax is robust +5. Do not point to multiple nodes with one edge, use multiple edges instead +6. Preserve the original intent and structure as much as possible +7. If the diagram type is unclear, default to a flowchart + +Corrected Mermaid code:` +} diff --git a/src/core/protect/RooProtectedController.ts b/src/core/protect/RooProtectedController.ts index b74b6a9bb96b..86122c766084 100644 --- a/src/core/protect/RooProtectedController.ts +++ b/src/core/protect/RooProtectedController.ts @@ -18,6 +18,7 @@ export class RooProtectedController { ".roorules*", ".clinerules*", ".roo/**", + ".vscode/**", ".rooprotected", // For future use ] diff --git a/src/core/protect/__tests__/RooProtectedController.spec.ts b/src/core/protect/__tests__/RooProtectedController.spec.ts index 63d880928525..6c998e365a47 100644 --- a/src/core/protect/__tests__/RooProtectedController.spec.ts +++ b/src/core/protect/__tests__/RooProtectedController.spec.ts @@ -38,6 +38,12 @@ describe("RooProtectedController", () => { expect(controller.isWriteProtected(".clinerules.md")).toBe(true) }) + it("should protect files in .vscode directory", () => { + expect(controller.isWriteProtected(".vscode/settings.json")).toBe(true) + expect(controller.isWriteProtected(".vscode/launch.json")).toBe(true) + expect(controller.isWriteProtected(".vscode/tasks.json")).toBe(true) + }) + it("should not protect other files starting with .roo", () => { expect(controller.isWriteProtected(".roosettings")).toBe(false) expect(controller.isWriteProtected(".rooconfig")).toBe(false) @@ -134,6 +140,7 @@ describe("RooProtectedController", () => { ".roorules*", ".clinerules*", ".roo/**", + ".vscode/**", ".rooprotected", ]) }) diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 4f0d32c8c1e7..31260cd6fa26 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -20,6 +20,7 @@ import { type ToolProgressStatus, type HistoryItem, TelemetryEventName, + TodoItem, } from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" import { CloudService } from "@roo-code/cloud" @@ -85,6 +86,7 @@ import { processUserContentMentions } from "../mentions/processUserContentMentio import { ApiMessage } from "../task-persistence/apiMessages" import { getMessagesSinceLastSummary, summarizeConversation } from "../condense" import { maybeRemoveImageBlocks } from "../../api/transform/image-cleaning" +import { restoreTodoListForTask } from "../tools/updateTodoListTool" // Constants const MAX_EXPONENTIAL_BACKOFF_SECONDS = 600 // 10 minutes @@ -122,6 +124,7 @@ export type TaskOptions = { } export class Task extends EventEmitter { + todoList?: TodoItem[] readonly taskId: string readonly instanceId: string @@ -370,6 +373,7 @@ export class Task extends EventEmitter { public async overwriteClineMessages(newMessages: ClineMessage[]) { this.clineMessages = newMessages + restoreTodoListForTask(this) await this.saveClineMessages() } @@ -1716,7 +1720,9 @@ export class Task extends EventEmitter { const contextWindow = modelInfo.contextWindow - const currentProfileId = state?.listApiConfigMeta.find((profile) => profile.name === state?.currentApiConfigName)?.id ?? "default"; + const currentProfileId = + state?.listApiConfigMeta.find((profile) => profile.name === state?.currentApiConfigName)?.id ?? + "default" const truncateResult = await truncateConversationIfNeeded({ messages: this.apiConversationHistory, diff --git a/src/core/tools/accessMcpResourceTool.ts b/src/core/tools/accessMcpResourceTool.ts index 22b1aba90959..c8a40f9236d2 100644 --- a/src/core/tools/accessMcpResourceTool.ts +++ b/src/core/tools/accessMcpResourceTool.ts @@ -73,7 +73,11 @@ export async function accessMcpResourceTool( resourceResult?.contents.forEach((item) => { if (item.mimeType?.startsWith("image") && item.blob) { - images.push(item.blob) + if (item.blob.startsWith("data:")) { + images.push(item.blob) + } else { + images.push(`data:${item.mimeType};base64,` + item.blob) + } } }) diff --git a/src/core/tools/applyDiffTool.ts b/src/core/tools/applyDiffTool.ts index d4f7fd883f94..f5b4ab7dd3d0 100644 --- a/src/core/tools/applyDiffTool.ts +++ b/src/core/tools/applyDiffTool.ts @@ -145,11 +145,15 @@ export async function applyDiffToolLegacy( cline.diffViewProvider.editType = "modify" await cline.diffViewProvider.open(relPath) await cline.diffViewProvider.update(diffResult.content, true) - await cline.diffViewProvider.scrollToFirstDiff() + cline.diffViewProvider.scrollToFirstDiff() + + // Check if file is write-protected + const isWriteProtected = cline.rooProtectedController?.isWriteProtected(relPath) || false const completeMessage = JSON.stringify({ ...sharedMessageProps, diff: diffContent, + isProtected: isWriteProtected, } satisfies ClineSayTool) let toolProgressStatus @@ -158,7 +162,7 @@ export async function applyDiffToolLegacy( toolProgressStatus = cline.diffStrategy.getProgressStatus(block, diffResult) } - const didApprove = await askApproval("tool", completeMessage, toolProgressStatus) + const didApprove = await askApproval("tool", completeMessage, toolProgressStatus, isWriteProtected) if (!didApprove) { await cline.diffViewProvider.revertChanges() // Cline likely handles closing the diff view diff --git a/src/core/tools/multiApplyDiffTool.ts b/src/core/tools/multiApplyDiffTool.ts index e477008940d1..8057f7794958 100644 --- a/src/core/tools/multiApplyDiffTool.ts +++ b/src/core/tools/multiApplyDiffTool.ts @@ -159,7 +159,12 @@ Expected structure: Original error: ${errorMessage}` - throw new Error(detailedError) + cline.consecutiveMistakeCount++ + cline.recordToolError("apply_diff") + TelemetryService.instance.captureDiffApplicationError(cline.taskId, cline.consecutiveMistakeCount) + await cline.say("diff_error", `Failed to parse apply_diff XML: ${errorMessage}`) + pushToolResult(detailedError) + return } } else if (legacyPath && typeof legacyDiffContent === "string") { // Handle legacy parameters (old way) @@ -505,7 +510,7 @@ ${errorDetails ? `\nTechnical details:\n${errorDetails}\n` : ""} cline.diffViewProvider.editType = "modify" await cline.diffViewProvider.open(relPath) await cline.diffViewProvider.update(originalContent!, true) - await cline.diffViewProvider.scrollToFirstDiff() + cline.diffViewProvider.scrollToFirstDiff() // For batch operations, we've already gotten approval const isWriteProtected = cline.rooProtectedController?.isWriteProtected(relPath) || false diff --git a/src/core/tools/newTaskTool.ts b/src/core/tools/newTaskTool.ts index ab2519e9b463..7cc7063b4991 100644 --- a/src/core/tools/newTaskTool.ts +++ b/src/core/tools/newTaskTool.ts @@ -22,7 +22,7 @@ export async function newTaskTool( const partialMessage = JSON.stringify({ tool: "newTask", mode: removeClosingTag("mode", mode), - message: removeClosingTag("message", message), + content: removeClosingTag("message", message), }) await cline.ask("tool", partialMessage, block.partial).catch(() => {}) diff --git a/src/core/tools/updateTodoListTool.ts b/src/core/tools/updateTodoListTool.ts new file mode 100644 index 000000000000..cbb90338d370 --- /dev/null +++ b/src/core/tools/updateTodoListTool.ts @@ -0,0 +1,236 @@ +import { Task } from "../task/Task" +import { ToolUse, AskApproval, HandleError, PushToolResult, RemoveClosingTag } from "../../shared/tools" +import { formatResponse } from "../prompts/responses" + +import cloneDeep from "clone-deep" +import crypto from "crypto" +import { TodoItem, TodoStatus, todoStatusSchema } from "@roo-code/types" +import { getLatestTodo } from "../../shared/todo" + +let approvedTodoList: TodoItem[] | undefined = undefined + +/** + * Add a todo item to the task's todoList. + */ +export function addTodoToTask(cline: Task, content: string, status: TodoStatus = "pending", id?: string): TodoItem { + const todo: TodoItem = { + id: id ?? crypto.randomUUID(), + content, + status, + } + if (!cline.todoList) cline.todoList = [] + cline.todoList.push(todo) + return todo +} + +/** + * Update the status of a todo item by id. + */ +export function updateTodoStatusForTask(cline: Task, id: string, nextStatus: TodoStatus): boolean { + if (!cline.todoList) return false + const idx = cline.todoList.findIndex((t) => t.id === id) + if (idx === -1) return false + const current = cline.todoList[idx] + if ( + (current.status === "pending" && nextStatus === "in_progress") || + (current.status === "in_progress" && nextStatus === "completed") || + current.status === nextStatus + ) { + cline.todoList[idx] = { ...current, status: nextStatus } + return true + } + return false +} + +/** + * Remove a todo item by id. + */ +export function removeTodoFromTask(cline: Task, id: string): boolean { + if (!cline.todoList) return false + const idx = cline.todoList.findIndex((t) => t.id === id) + if (idx === -1) return false + cline.todoList.splice(idx, 1) + return true +} + +/** + * Get a copy of the todoList. + */ +export function getTodoListForTask(cline: Task): TodoItem[] | undefined { + return cline.todoList?.slice() +} + +/** + * Set the todoList for the task. + */ +export async function setTodoListForTask(cline?: Task, todos?: TodoItem[]) { + if (cline === undefined) return + cline.todoList = Array.isArray(todos) ? todos : [] +} + +/** + * Restore the todoList from argument or from clineMessages. + */ +export function restoreTodoListForTask(cline: Task, todoList?: TodoItem[]) { + if (todoList) { + cline.todoList = Array.isArray(todoList) ? todoList : [] + return + } + cline.todoList = getLatestTodo(cline.clineMessages) +} +/** + * Convert TodoItem[] to markdown checklist string. + * @param todos TodoItem array + * @returns markdown checklist string + */ +function todoListToMarkdown(todos: TodoItem[]): string { + return todos + .map((t) => { + let box = "[ ]" + if (t.status === "completed") box = "[x]" + else if (t.status === "in_progress") box = "[-]" + return `${box} ${t.content}` + }) + .join("\n") +} + +function normalizeStatus(status: string | undefined): TodoStatus { + if (status === "completed") return "completed" + if (status === "in_progress") return "in_progress" + return "pending" +} + +function parseMarkdownChecklist(md: string): TodoItem[] { + if (typeof md !== "string") return [] + const lines = md + .split(/\r?\n/) + .map((l) => l.trim()) + .filter(Boolean) + const todos: TodoItem[] = [] + for (const line of lines) { + const match = line.match(/^\[\s*([ xX\-~])\s*\]\s+(.+)$/) + if (!match) continue + let status: TodoStatus = "pending" + if (match[1] === "x" || match[1] === "X") status = "completed" + else if (match[1] === "-" || match[1] === "~") status = "in_progress" + const id = crypto + .createHash("md5") + .update(match[2] + status) + .digest("hex") + todos.push({ + id, + content: match[2], + status, + }) + } + return todos +} + +export function setPendingTodoList(todos: TodoItem[]) { + approvedTodoList = todos +} + +function validateTodos(todos: any[]): { valid: boolean; error?: string } { + if (!Array.isArray(todos)) return { valid: false, error: "todos must be an array" } + for (const [i, t] of todos.entries()) { + if (!t || typeof t !== "object") return { valid: false, error: `Item ${i + 1} is not an object` } + if (!t.id || typeof t.id !== "string") return { valid: false, error: `Item ${i + 1} is missing id` } + if (!t.content || typeof t.content !== "string") + return { valid: false, error: `Item ${i + 1} is missing content` } + if (t.status && !todoStatusSchema.options.includes(t.status as TodoStatus)) + return { valid: false, error: `Item ${i + 1} has invalid status` } + } + return { valid: true } +} + +/** + * Update the todo list for a task. + * @param cline Task instance + * @param block ToolUse block + * @param askApproval AskApproval function + * @param handleError HandleError function + * @param pushToolResult PushToolResult function + * @param removeClosingTag RemoveClosingTag function + * @param userEdited If true, only show "User Edit Succeeded" and do nothing else + */ +export async function updateTodoListTool( + cline: Task, + block: ToolUse, + askApproval: AskApproval, + handleError: HandleError, + pushToolResult: PushToolResult, + removeClosingTag: RemoveClosingTag, + userEdited?: boolean, +) { + // If userEdited is true, only show "User Edit Succeeded" and do nothing else + if (userEdited === true) { + pushToolResult("User Edit Succeeded") + return + } + try { + const todosRaw = block.params.todos + + let todos: TodoItem[] + try { + todos = parseMarkdownChecklist(todosRaw || "") + } catch { + cline.consecutiveMistakeCount++ + cline.recordToolError("update_todo_list") + pushToolResult(formatResponse.toolError("The todos parameter is not valid markdown checklist or JSON")) + return + } + + const { valid, error } = validateTodos(todos) + if (!valid && !block.partial) { + cline.consecutiveMistakeCount++ + cline.recordToolError("update_todo_list") + pushToolResult(formatResponse.toolError(error || "todos parameter validation failed")) + return + } + + let normalizedTodos: TodoItem[] = todos.map((t) => ({ + id: t.id, + content: t.content, + status: normalizeStatus(t.status), + })) + + const approvalMsg = JSON.stringify({ + tool: "updateTodoList", + todos: normalizedTodos, + }) + if (block.partial) { + await cline.ask("tool", approvalMsg, block.partial).catch(() => {}) + return + } + approvedTodoList = cloneDeep(normalizedTodos) + const didApprove = await askApproval("tool", approvalMsg) + if (!didApprove) { + pushToolResult("User declined to update the todoList.") + return + } + const isTodoListChanged = + approvedTodoList !== undefined && JSON.stringify(normalizedTodos) !== JSON.stringify(approvedTodoList) + if (isTodoListChanged) { + normalizedTodos = approvedTodoList ?? [] + cline.say( + "user_edit_todos", + JSON.stringify({ + tool: "updateTodoList", + todos: normalizedTodos, + }), + ) + } + + await setTodoListForTask(cline, normalizedTodos) + + // If todo list changed, output new todo list in markdown format + if (isTodoListChanged) { + const md = todoListToMarkdown(normalizedTodos) + pushToolResult(formatResponse.toolResult("User edits todo:\n\n" + md)) + } else { + pushToolResult(formatResponse.toolResult("Todo list updated successfully.")) + } + } catch (error) { + await handleError("update todo list", error) + } +} diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 51cb9a275bbd..8eb2e70a8ce1 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -110,7 +110,7 @@ export class ClineProvider public isViewLaunched = false public settingsImportedAt?: number - public readonly latestAnnouncementId = "jun-17-2025-3-21" // Update for v3.21.0 announcement + public readonly latestAnnouncementId = "jul-02-2025-3-22-6" // Update for v3.22.6 announcement public readonly providerSettingsManager: ProviderSettingsManager public readonly customModesManager: CustomModesManager @@ -884,11 +884,6 @@ export class ClineProvider this.contextProxy.setProviderSettings(providerSettings), ]) - // Notify CodeIndexManager about the settings change - if (this.codeIndexManager) { - await this.codeIndexManager.handleExternalSettingsChange() - } - // Change the provider for the current task. // TODO: We should rename `buildApiHandler` for clarity (e.g. `getProviderClient`). const task = this.getCurrentCline() @@ -1352,6 +1347,7 @@ export class ClineProvider alwaysAllowMcp, alwaysAllowModeSwitch, alwaysAllowSubtasks, + alwaysAllowUpdateTodoList, allowedMaxRequests, autoCondenseContext, autoCondenseContextPercent, @@ -1411,6 +1407,8 @@ export class ClineProvider codebaseIndexConfig, codebaseIndexModels, profileThresholds, + alwaysAllowFollowupQuestions, + followupAutoApproveTimeoutMs, } = await this.getState() const telemetryKey = process.env.POSTHOG_API_KEY @@ -1436,6 +1434,7 @@ export class ClineProvider alwaysAllowMcp: alwaysAllowMcp ?? false, alwaysAllowModeSwitch: alwaysAllowModeSwitch ?? false, alwaysAllowSubtasks: alwaysAllowSubtasks ?? false, + alwaysAllowUpdateTodoList: alwaysAllowUpdateTodoList ?? false, allowedMaxRequests, autoCondenseContext: autoCondenseContext ?? true, autoCondenseContextPercent: autoCondenseContextPercent ?? 100, @@ -1521,6 +1520,8 @@ export class ClineProvider profileThresholds: profileThresholds ?? {}, cloudApiUrl: getRooCodeApiUrl(), hasOpenedModeSelector: this.getGlobalState("hasOpenedModeSelector") ?? false, + alwaysAllowFollowupQuestions: alwaysAllowFollowupQuestions ?? false, + followupAutoApproveTimeoutMs: followupAutoApproveTimeoutMs ?? 60000, } } @@ -1601,6 +1602,9 @@ export class ClineProvider alwaysAllowMcp: stateValues.alwaysAllowMcp ?? false, alwaysAllowModeSwitch: stateValues.alwaysAllowModeSwitch ?? false, alwaysAllowSubtasks: stateValues.alwaysAllowSubtasks ?? false, + alwaysAllowFollowupQuestions: stateValues.alwaysAllowFollowupQuestions ?? false, + alwaysAllowUpdateTodoList: stateValues.alwaysAllowUpdateTodoList ?? false, + followupAutoApproveTimeoutMs: stateValues.followupAutoApproveTimeoutMs ?? 60000, allowedMaxRequests: stateValues.allowedMaxRequests, autoCondenseContext: stateValues.autoCondenseContext ?? true, autoCondenseContextPercent: stateValues.autoCondenseContextPercent ?? 100, diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index 46ace3ce85b6..2f356aef554a 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -14,8 +14,82 @@ const mockGetModels = getModels as Mock const mockClineProvider = { getState: vi.fn(), postMessageToWebview: vi.fn(), + customModesManager: { + getCustomModes: vi.fn(), + deleteCustomMode: vi.fn(), + }, + context: { + extensionPath: "/mock/extension/path", + globalStorageUri: { fsPath: "/mock/global/storage" }, + }, + contextProxy: { + context: { + extensionPath: "/mock/extension/path", + globalStorageUri: { fsPath: "/mock/global/storage" }, + }, + setValue: vi.fn(), + }, + log: vi.fn(), + postStateToWebview: vi.fn(), } as unknown as ClineProvider +import { t } from "../../../i18n" + +vi.mock("vscode", () => ({ + window: { + showInformationMessage: vi.fn(), + showErrorMessage: vi.fn(), + }, + workspace: { + workspaceFolders: [{ uri: { fsPath: "/mock/workspace" } }], + }, +})) + +vi.mock("../../../i18n", () => ({ + t: vi.fn((key: string, args?: Record) => { + // For the delete confirmation with rules, we need to return the interpolated string + if (key === "common:confirmation.delete_custom_mode_with_rules" && args) { + return `Are you sure you want to delete this ${args.scope} mode?\n\nThis will also delete the associated rules folder at:\n${args.rulesFolderPath}` + } + // Return the translated value for "Yes" + if (key === "common:answers.yes") { + return "Yes" + } + // Return the translated value for "Cancel" + if (key === "common:answers.cancel") { + return "Cancel" + } + return key + }), +})) + +vi.mock("fs/promises", () => { + const mockRm = vi.fn().mockResolvedValue(undefined) + const mockMkdir = vi.fn().mockResolvedValue(undefined) + + return { + default: { + rm: mockRm, + mkdir: mockMkdir, + }, + rm: mockRm, + mkdir: mockMkdir, + } +}) + +import * as vscode from "vscode" +import * as fs from "fs/promises" +import * as os from "os" +import * as path from "path" +import * as fsUtils from "../../../utils/fs" +import { getWorkspacePath } from "../../../utils/path" +import { ensureSettingsDirectoryExists } from "../../../utils/globalContext" +import type { ModeConfig } from "@roo-code/types" + +vi.mock("../../../utils/fs") +vi.mock("../../../utils/path") +vi.mock("../../../utils/globalContext") + describe("webviewMessageHandler - requestRouterModels", () => { beforeEach(() => { vi.clearAllMocks() @@ -295,3 +369,116 @@ describe("webviewMessageHandler - requestRouterModels", () => { }) }) }) + +describe("webviewMessageHandler - deleteCustomMode", () => { + beforeEach(() => { + vi.clearAllMocks() + vi.mocked(getWorkspacePath).mockReturnValue("/mock/workspace") + vi.mocked(vscode.window.showErrorMessage).mockResolvedValue(undefined) + vi.mocked(ensureSettingsDirectoryExists).mockResolvedValue("/mock/global/storage/.roo") + }) + + it("should delete a project mode and its rules folder", async () => { + const slug = "test-project-mode" + const rulesFolderPath = path.join("/mock/workspace", ".roo", `rules-${slug}`) + + vi.mocked(mockClineProvider.customModesManager.getCustomModes).mockResolvedValue([ + { + name: "Test Project Mode", + slug, + roleDefinition: "Test Role", + groups: [], + source: "project", + } as ModeConfig, + ]) + vi.mocked(fsUtils.fileExistsAtPath).mockResolvedValue(true) + vi.mocked(mockClineProvider.customModesManager.deleteCustomMode).mockResolvedValue(undefined) + + await webviewMessageHandler(mockClineProvider, { type: "deleteCustomMode", slug }) + + // The confirmation dialog is now handled in the webview, so we don't expect showInformationMessage to be called + expect(vscode.window.showInformationMessage).not.toHaveBeenCalled() + expect(mockClineProvider.customModesManager.deleteCustomMode).toHaveBeenCalledWith(slug) + expect(fs.rm).toHaveBeenCalledWith(rulesFolderPath, { recursive: true, force: true }) + }) + + it("should delete a global mode and its rules folder", async () => { + const slug = "test-global-mode" + const homeDir = os.homedir() + const rulesFolderPath = path.join(homeDir, ".roo", `rules-${slug}`) + + vi.mocked(mockClineProvider.customModesManager.getCustomModes).mockResolvedValue([ + { + name: "Test Global Mode", + slug, + roleDefinition: "Test Role", + groups: [], + source: "global", + } as ModeConfig, + ]) + vi.mocked(fsUtils.fileExistsAtPath).mockResolvedValue(true) + vi.mocked(mockClineProvider.customModesManager.deleteCustomMode).mockResolvedValue(undefined) + + await webviewMessageHandler(mockClineProvider, { type: "deleteCustomMode", slug }) + + // The confirmation dialog is now handled in the webview, so we don't expect showInformationMessage to be called + expect(vscode.window.showInformationMessage).not.toHaveBeenCalled() + expect(mockClineProvider.customModesManager.deleteCustomMode).toHaveBeenCalledWith(slug) + expect(fs.rm).toHaveBeenCalledWith(rulesFolderPath, { recursive: true, force: true }) + }) + + it("should only delete the mode when rules folder does not exist", async () => { + const slug = "test-mode-no-rules" + vi.mocked(mockClineProvider.customModesManager.getCustomModes).mockResolvedValue([ + { + name: "Test Mode No Rules", + slug, + roleDefinition: "Test Role", + groups: [], + source: "project", + } as ModeConfig, + ]) + vi.mocked(fsUtils.fileExistsAtPath).mockResolvedValue(false) + vi.mocked(mockClineProvider.customModesManager.deleteCustomMode).mockResolvedValue(undefined) + + await webviewMessageHandler(mockClineProvider, { type: "deleteCustomMode", slug }) + + // The confirmation dialog is now handled in the webview, so we don't expect showInformationMessage to be called + expect(vscode.window.showInformationMessage).not.toHaveBeenCalled() + expect(mockClineProvider.customModesManager.deleteCustomMode).toHaveBeenCalledWith(slug) + expect(fs.rm).not.toHaveBeenCalled() + }) + + it("should handle errors when deleting rules folder", async () => { + const slug = "test-mode-error" + const rulesFolderPath = path.join("/mock/workspace", ".roo", `rules-${slug}`) + const error = new Error("Permission denied") + + vi.mocked(mockClineProvider.customModesManager.getCustomModes).mockResolvedValue([ + { + name: "Test Mode Error", + slug, + roleDefinition: "Test Role", + groups: [], + source: "project", + } as ModeConfig, + ]) + vi.mocked(fsUtils.fileExistsAtPath).mockResolvedValue(true) + vi.mocked(mockClineProvider.customModesManager.deleteCustomMode).mockResolvedValue(undefined) + vi.mocked(fs.rm).mockRejectedValue(error) + + await webviewMessageHandler(mockClineProvider, { type: "deleteCustomMode", slug }) + + expect(mockClineProvider.customModesManager.deleteCustomMode).toHaveBeenCalledWith(slug) + expect(fs.rm).toHaveBeenCalledWith(rulesFolderPath, { recursive: true, force: true }) + // Verify error message is shown to the user + expect(vscode.window.showErrorMessage).toHaveBeenCalledWith( + t("common:errors.delete_rules_folder_failed", { + rulesFolderPath, + error: error.message, + }), + ) + // No error response is sent anymore - we just continue with deletion + expect(mockClineProvider.postMessageToWebview).not.toHaveBeenCalled() + }) +}) diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index cac94aa0ce18..81e39ff94d35 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -1,8 +1,10 @@ import { safeWriteJson } from "../../utils/safeWriteJson" import * as path from "path" -import fs from "fs/promises" +import * as os from "os" +import * as fs from "fs/promises" import pWaitFor from "p-wait-for" import * as vscode from "vscode" +import * as yaml from "yaml" import { type Language, type ProviderSettings, type GlobalState, TelemetryEventName } from "@roo-code/types" import { CloudService } from "@roo-code/cloud" @@ -34,15 +36,18 @@ import { getVsCodeLmModels } from "../../api/providers/vscode-lm" import { openMention } from "../mentions" import { TelemetrySetting } from "../../shared/TelemetrySetting" import { getWorkspacePath } from "../../utils/path" +import { ensureSettingsDirectoryExists } from "../../utils/globalContext" import { Mode, defaultModeSlug } from "../../shared/modes" import { getModels, flushModels } from "../../api/providers/fetchers/modelCache" import { GetModelsOptions } from "../../shared/api" import { generateSystemPrompt } from "./generateSystemPrompt" import { getCommand } from "../../utils/commands" +import { mermaidFixPrompt } from "../prompts/utilities/mermaid" const ALLOWED_VSCODE_SETTINGS = new Set(["terminal.integrated.inheritEnv"]) import { MarketplaceManager, MarketplaceItemType } from "../../services/marketplace" +import { setPendingTodoList } from "../tools/updateTodoListTool" export const webviewMessageHandler = async ( provider: ClineProvider, @@ -182,6 +187,10 @@ export const webviewMessageHandler = async ( await updateGlobalState("alwaysAllowSubtasks", message.bool) await provider.postStateToWebview() break + case "alwaysAllowUpdateTodoList": + await updateGlobalState("alwaysAllowUpdateTodoList", message.bool) + await provider.postStateToWebview() + break case "askResponse": provider.getCurrentCline()?.handleWebviewAskResponse(message.askResponse!, message.text, message.images) break @@ -1100,8 +1109,31 @@ export const webviewMessageHandler = async ( await updateGlobalState("maxWorkspaceFiles", fileCount) await provider.postStateToWebview() break + case "alwaysAllowFollowupQuestions": + await updateGlobalState("alwaysAllowFollowupQuestions", message.bool ?? false) + await provider.postStateToWebview() + break + case "followupAutoApproveTimeoutMs": + await updateGlobalState("followupAutoApproveTimeoutMs", message.value) + await provider.postStateToWebview() + break case "browserToolEnabled": await updateGlobalState("browserToolEnabled", message.bool ?? true) + await provider.postStateToWebview() + break + case "codebaseIndexEnabled": + // Update the codebaseIndexConfig with the new enabled state + const currentCodebaseConfig = getGlobalState("codebaseIndexConfig") || {} + await updateGlobalState("codebaseIndexConfig", { + ...currentCodebaseConfig, + codebaseIndexEnabled: message.bool ?? false, + }) + + // Notify the code index manager about the change + if (provider.codeIndexManager) { + await provider.codeIndexManager.handleSettingsChange() + } + await provider.postStateToWebview() break case "language": @@ -1291,6 +1323,14 @@ export const webviewMessageHandler = async ( } break } + case "updateTodoList": { + const payload = message.payload as { todos?: any[] } + const todos = payload?.todos + if (Array.isArray(todos)) { + await setPendingTodoList(todos) + } + break + } case "saveApiConfiguration": if (message.text && message.apiConfiguration) { try { @@ -1485,22 +1525,261 @@ export const webviewMessageHandler = async ( break case "deleteCustomMode": if (message.slug) { - const answer = await vscode.window.showInformationMessage( - t("common:confirmation.delete_custom_mode"), - { modal: true }, - t("common:answers.yes"), - ) + // Get the mode details to determine source and rules folder path + const customModes = await provider.customModesManager.getCustomModes() + const modeToDelete = customModes.find((mode) => mode.slug === message.slug) - if (answer !== t("common:answers.yes")) { + if (!modeToDelete) { break } + // Determine the scope based on source (project or global) + const scope = modeToDelete.source || "global" + + // Determine the rules folder path + let rulesFolderPath: string + if (scope === "project") { + const workspacePath = getWorkspacePath() + if (workspacePath) { + rulesFolderPath = path.join(workspacePath, ".roo", `rules-${message.slug}`) + } else { + rulesFolderPath = path.join(".roo", `rules-${message.slug}`) + } + } else { + // Global scope - use OS home directory + const homeDir = os.homedir() + rulesFolderPath = path.join(homeDir, ".roo", `rules-${message.slug}`) + } + + // Check if the rules folder exists + const rulesFolderExists = await fileExistsAtPath(rulesFolderPath) + + // If this is a check request, send back the folder info + if (message.checkOnly) { + await provider.postMessageToWebview({ + type: "deleteCustomModeCheck", + slug: message.slug, + rulesFolderPath: rulesFolderExists ? rulesFolderPath : undefined, + }) + break + } + + // Delete the mode await provider.customModesManager.deleteCustomMode(message.slug) + + // Delete the rules folder if it exists + if (rulesFolderExists) { + try { + await fs.rm(rulesFolderPath, { recursive: true, force: true }) + provider.log(`Deleted rules folder for mode ${message.slug}: ${rulesFolderPath}`) + } catch (error) { + provider.log(`Failed to delete rules folder for mode ${message.slug}: ${error}`) + // Notify the user about the failure + vscode.window.showErrorMessage( + t("common:errors.delete_rules_folder_failed", { + rulesFolderPath, + error: error instanceof Error ? error.message : String(error), + }), + ) + // Continue with mode deletion even if folder deletion fails + } + } + // Switch back to default mode after deletion await updateGlobalState("mode", defaultModeSlug) await provider.postStateToWebview() } break + case "exportMode": + if (message.slug) { + try { + // Get custom mode prompts to check if built-in mode has been customized + const customModePrompts = getGlobalState("customModePrompts") || {} + const customPrompt = customModePrompts[message.slug] + + // Export the mode with any customizations merged directly + const result = await provider.customModesManager.exportModeWithRules(message.slug, customPrompt) + + if (result.success && result.yaml) { + // Get last used directory for export + const lastExportPath = getGlobalState("lastModeExportPath") + let defaultUri: vscode.Uri + + if (lastExportPath) { + // Use the directory from the last export + const lastDir = path.dirname(lastExportPath) + defaultUri = vscode.Uri.file(path.join(lastDir, `${message.slug}-export.yaml`)) + } else { + // Default to workspace or home directory + const workspaceFolders = vscode.workspace.workspaceFolders + if (workspaceFolders && workspaceFolders.length > 0) { + defaultUri = vscode.Uri.file( + path.join(workspaceFolders[0].uri.fsPath, `${message.slug}-export.yaml`), + ) + } else { + defaultUri = vscode.Uri.file(`${message.slug}-export.yaml`) + } + } + + // Show save dialog + const saveUri = await vscode.window.showSaveDialog({ + defaultUri, + filters: { + "YAML files": ["yaml", "yml"], + }, + title: "Save mode export", + }) + + if (saveUri && result.yaml) { + // Save the directory for next time + await updateGlobalState("lastModeExportPath", saveUri.fsPath) + + // Write the file to the selected location + await fs.writeFile(saveUri.fsPath, result.yaml, "utf-8") + + // Send success message to webview + provider.postMessageToWebview({ + type: "exportModeResult", + success: true, + slug: message.slug, + }) + + // Show info message + vscode.window.showInformationMessage(t("common:info.mode_exported", { mode: message.slug })) + } else { + // User cancelled the save dialog + provider.postMessageToWebview({ + type: "exportModeResult", + success: false, + error: "Export cancelled", + slug: message.slug, + }) + } + } else { + // Send error message to webview + provider.postMessageToWebview({ + type: "exportModeResult", + success: false, + error: result.error, + slug: message.slug, + }) + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + provider.log(`Failed to export mode ${message.slug}: ${errorMessage}`) + + // Send error message to webview + provider.postMessageToWebview({ + type: "exportModeResult", + success: false, + error: errorMessage, + slug: message.slug, + }) + } + } + break + case "importMode": + try { + // Get last used directory for import + const lastImportPath = getGlobalState("lastModeImportPath") + let defaultUri: vscode.Uri | undefined + + if (lastImportPath) { + // Use the directory from the last import + const lastDir = path.dirname(lastImportPath) + defaultUri = vscode.Uri.file(lastDir) + } else { + // Default to workspace or home directory + const workspaceFolders = vscode.workspace.workspaceFolders + if (workspaceFolders && workspaceFolders.length > 0) { + defaultUri = vscode.Uri.file(workspaceFolders[0].uri.fsPath) + } + } + + // Show file picker to select YAML file + const fileUri = await vscode.window.showOpenDialog({ + canSelectFiles: true, + canSelectFolders: false, + canSelectMany: false, + defaultUri, + filters: { + "YAML files": ["yaml", "yml"], + }, + title: "Select mode export file to import", + }) + + if (fileUri && fileUri[0]) { + // Save the directory for next time + await updateGlobalState("lastModeImportPath", fileUri[0].fsPath) + + // Read the file content + const yamlContent = await fs.readFile(fileUri[0].fsPath, "utf-8") + + // Import the mode with the specified source level + const result = await provider.customModesManager.importModeWithRules( + yamlContent, + message.source || "project", // Default to project if not specified + ) + + if (result.success) { + // Update state after importing + const customModes = await provider.customModesManager.getCustomModes() + await updateGlobalState("customModes", customModes) + await provider.postStateToWebview() + + // Send success message to webview + provider.postMessageToWebview({ + type: "importModeResult", + success: true, + }) + + // Show success message + vscode.window.showInformationMessage(t("common:info.mode_imported")) + } else { + // Send error message to webview + provider.postMessageToWebview({ + type: "importModeResult", + success: false, + error: result.error, + }) + + // Show error message + vscode.window.showErrorMessage(t("common:errors.mode_import_failed", { error: result.error })) + } + } else { + // User cancelled the file dialog - reset the importing state + provider.postMessageToWebview({ + type: "importModeResult", + success: false, + error: "cancelled", + }) + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error) + provider.log(`Failed to import mode: ${errorMessage}`) + + // Send error message to webview + provider.postMessageToWebview({ + type: "importModeResult", + success: false, + error: errorMessage, + }) + + // Show error message + vscode.window.showErrorMessage(t("common:errors.mode_import_failed", { error: errorMessage })) + } + break + case "checkRulesDirectory": + if (message.slug) { + const hasContent = await provider.customModesManager.checkRulesDirectoryHasContent(message.slug) + + provider.postMessageToWebview({ + type: "checkRulesDirectoryResult", + slug: message.slug, + hasContent: hasContent, + }) + } + break case "humanRelayResponse": if (message.requestId && message.text) { vscode.commands.executeCommand(getCommand("handleHumanRelayResponse"), { @@ -1556,38 +1835,131 @@ export const webviewMessageHandler = async ( break } - case "codebaseIndexConfig": { - const codebaseIndexConfig = message.values ?? { - codebaseIndexEnabled: false, - codebaseIndexQdrantUrl: "http://localhost:6333", - codebaseIndexEmbedderProvider: "openai", - codebaseIndexEmbedderBaseUrl: "", - codebaseIndexEmbedderModelId: "", + + case "saveCodeIndexSettingsAtomic": { + if (!message.codeIndexSettings) { + break } - await updateGlobalState("codebaseIndexConfig", codebaseIndexConfig) + + const settings = message.codeIndexSettings try { + // Check if embedder provider has changed + const currentConfig = getGlobalState("codebaseIndexConfig") || {} + const embedderProviderChanged = + currentConfig.codebaseIndexEmbedderProvider !== settings.codebaseIndexEmbedderProvider + + // Save global state settings atomically (without codebaseIndexEnabled which is now in global settings) + const globalStateConfig = { + ...currentConfig, + codebaseIndexQdrantUrl: settings.codebaseIndexQdrantUrl, + codebaseIndexEmbedderProvider: settings.codebaseIndexEmbedderProvider, + codebaseIndexEmbedderBaseUrl: settings.codebaseIndexEmbedderBaseUrl, + codebaseIndexEmbedderModelId: settings.codebaseIndexEmbedderModelId, + codebaseIndexOpenAiCompatibleBaseUrl: settings.codebaseIndexOpenAiCompatibleBaseUrl, + codebaseIndexOpenAiCompatibleModelDimension: settings.codebaseIndexOpenAiCompatibleModelDimension, + codebaseIndexSearchMaxResults: settings.codebaseIndexSearchMaxResults, + codebaseIndexSearchMinScore: settings.codebaseIndexSearchMinScore, + } + + // Save global state first + await updateGlobalState("codebaseIndexConfig", globalStateConfig) + + // Save secrets directly using context proxy + if (settings.codeIndexOpenAiKey !== undefined) { + await provider.contextProxy.storeSecret("codeIndexOpenAiKey", settings.codeIndexOpenAiKey) + } + if (settings.codeIndexQdrantApiKey !== undefined) { + await provider.contextProxy.storeSecret("codeIndexQdrantApiKey", settings.codeIndexQdrantApiKey) + } + if (settings.codebaseIndexOpenAiCompatibleApiKey !== undefined) { + await provider.contextProxy.storeSecret( + "codebaseIndexOpenAiCompatibleApiKey", + settings.codebaseIndexOpenAiCompatibleApiKey, + ) + } + if (settings.codebaseIndexGeminiApiKey !== undefined) { + await provider.contextProxy.storeSecret( + "codebaseIndexGeminiApiKey", + settings.codebaseIndexGeminiApiKey, + ) + } + + // Send success response first - settings are saved regardless of validation + await provider.postMessageToWebview({ + type: "codeIndexSettingsSaved", + success: true, + settings: globalStateConfig, + }) + + // Update webview state + await provider.postStateToWebview() + + // Then handle validation and initialization if (provider.codeIndexManager) { - await provider.codeIndexManager.handleExternalSettingsChange() + // If embedder provider changed, perform proactive validation + if (embedderProviderChanged) { + try { + // Force handleSettingsChange which will trigger validation + await provider.codeIndexManager.handleSettingsChange() + } catch (error) { + // Validation failed - the error state is already set by handleSettingsChange + provider.log( + `Embedder validation failed after provider change: ${error instanceof Error ? error.message : String(error)}`, + ) + // Send validation error to webview + await provider.postMessageToWebview({ + type: "indexingStatusUpdate", + values: provider.codeIndexManager.getCurrentStatus(), + }) + // Exit early - don't try to start indexing with invalid configuration + break + } + } else { + // No provider change, just handle settings normally + try { + await provider.codeIndexManager.handleSettingsChange() + } catch (error) { + // Log but don't fail - settings are saved + provider.log( + `Settings change handling error: ${error instanceof Error ? error.message : String(error)}`, + ) + } + } + + // Wait a bit more to ensure everything is ready + await new Promise((resolve) => setTimeout(resolve, 200)) - // If now configured and enabled, start indexing automatically + // Auto-start indexing if now enabled and configured if (provider.codeIndexManager.isFeatureEnabled && provider.codeIndexManager.isFeatureConfigured) { if (!provider.codeIndexManager.isInitialized) { - await provider.codeIndexManager.initialize(provider.contextProxy) + try { + await provider.codeIndexManager.initialize(provider.contextProxy) + provider.log(`Code index manager initialized after settings save`) + } catch (error) { + provider.log( + `Code index initialization failed: ${error instanceof Error ? error.message : String(error)}`, + ) + // Send error status to webview + await provider.postMessageToWebview({ + type: "indexingStatusUpdate", + values: provider.codeIndexManager.getCurrentStatus(), + }) + } } - // Start indexing in background (no await) - provider.codeIndexManager.startIndexing() } } } catch (error) { - provider.log( - `[CodeIndexManager] Error during background CodeIndexManager configuration/indexing: ${error.message || error}`, - ) + provider.log(`Error saving code index settings: ${error.message || error}`) + await provider.postMessageToWebview({ + type: "codeIndexSettingsSaved", + success: false, + error: error.message || "Failed to save settings", + }) } - - await provider.postStateToWebview() break } + case "requestIndexingStatus": { const status = provider.codeIndexManager!.getCurrentStatus() provider.postMessageToWebview({ @@ -1596,6 +1968,26 @@ export const webviewMessageHandler = async ( }) break } + case "requestCodeIndexSecretStatus": { + // Check if secrets are set using the VSCode context directly for async access + const hasOpenAiKey = !!(await provider.context.secrets.get("codeIndexOpenAiKey")) + const hasQdrantApiKey = !!(await provider.context.secrets.get("codeIndexQdrantApiKey")) + const hasOpenAiCompatibleApiKey = !!(await provider.context.secrets.get( + "codebaseIndexOpenAiCompatibleApiKey", + )) + const hasGeminiApiKey = !!(await provider.context.secrets.get("codebaseIndexGeminiApiKey")) + + provider.postMessageToWebview({ + type: "codeIndexSecretStatus", + values: { + hasOpenAiKey, + hasQdrantApiKey, + hasOpenAiCompatibleApiKey, + hasGeminiApiKey, + }, + }) + break + } case "startIndexing": { try { const manager = provider.codeIndexManager! @@ -1628,6 +2020,34 @@ export const webviewMessageHandler = async ( } break } + case "fixMermaidSyntax": + if (message.text && message.requestId) { + try { + const { apiConfiguration } = await provider.getState() + + const prompt = mermaidFixPrompt(message.values?.error || "Unknown syntax error", message.text) + + const fixedCode = await singleCompletionHandler(apiConfiguration, prompt) + + provider.postMessageToWebview({ + type: "mermaidFixResponse", + requestId: message.requestId, + success: true, + fixedCode: fixedCode?.trim() || null, + }) + } catch (error) { + const errorMessage = error instanceof Error ? error.message : "Failed to fix Mermaid syntax" + provider.log(`Error fixing Mermaid syntax: ${errorMessage}`) + + provider.postMessageToWebview({ + type: "mermaidFixResponse", + requestId: message.requestId, + success: false, + error: errorMessage, + }) + } + } + break case "focusPanelRequest": { // Execute the focusPanel command to focus the WebView await vscode.commands.executeCommand(getCommand("focusPanel")) diff --git a/src/i18n/locales/ca/common.json b/src/i18n/locales/ca/common.json index 3ff61fca0aea..91389a136d48 100644 --- a/src/i18n/locales/ca/common.json +++ b/src/i18n/locales/ca/common.json @@ -21,7 +21,7 @@ "confirmation": { "reset_state": "Estàs segur que vols restablir tots els estats i emmagatzematge secret a l'extensió? Això no es pot desfer.", "delete_config_profile": "Estàs segur que vols eliminar aquest perfil de configuració?", - "delete_custom_mode": "Estàs segur que vols eliminar aquest mode personalitzat?", + "delete_custom_mode_with_rules": "Esteu segur que voleu suprimir aquest mode {scope}?\n\nAixò també suprimirà la carpeta de regles associada a:\n{rulesFolderPath}", "delete_message": "Què vols eliminar?", "just_this_message": "Només aquest missatge", "this_and_subsequent": "Aquest i tots els missatges posteriors" @@ -74,13 +74,15 @@ "share_auth_required": "Es requereix autenticació. Si us plau, inicia sessió per compartir tasques.", "share_not_enabled": "La compartició de tasques no està habilitada per a aquesta organització.", "share_task_not_found": "Tasca no trobada o accés denegat.", + "delete_rules_folder_failed": "Error en eliminar la carpeta de regles: {{rulesFolderPath}}. Error: {{error}}", "claudeCode": { "processExited": "El procés Claude Code ha sortit amb codi {{exitCode}}.", "errorOutput": "Sortida d'error: {{output}}", "processExitedWithError": "El procés Claude Code ha sortit amb codi {{exitCode}}. Sortida d'error: {{output}}", "stoppedWithReason": "Claude Code s'ha aturat per la raó: {{reason}}", "apiKeyModelPlanMismatch": "Les claus API i els plans de subscripció permeten models diferents. Assegura't que el model seleccionat estigui inclòs al teu pla." - } + }, + "mode_import_failed": "Ha fallat la importació del mode: {{error}}" }, "warnings": { "no_terminal_content": "No s'ha seleccionat contingut de terminal", @@ -99,7 +101,9 @@ "image_copied_to_clipboard": "URI de dades de la imatge copiada al portapapers", "image_saved": "Imatge desada a {{path}}", "organization_share_link_copied": "Enllaç de compartició d'organització copiat al porta-retalls!", - "public_share_link_copied": "Enllaç de compartició pública copiat al porta-retalls!" + "public_share_link_copied": "Enllaç de compartició pública copiat al porta-retalls!", + "mode_exported": "Mode '{{mode}}' exportat correctament", + "mode_imported": "Mode importat correctament" }, "answers": { "yes": "Sí", @@ -141,6 +145,10 @@ "resetFailed": "Error en restablir els modes personalitzats: {{error}}", "modeNotFound": "Error d'escriptura: Mode no trobat", "noWorkspaceForProject": "No s'ha trobat cap carpeta d'espai de treball per al mode específic del projecte" + }, + "scope": { + "project": "projecte", + "global": "global" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Has d'estar autenticat amb el compte de Roo Code Cloud de la teva organització.", "verification_failed": "No s'ha pogut verificar l'autenticació de l'organització." } + }, + "prompts": { + "deleteMode": { + "title": "Suprimeix el mode personalitzat", + "description": "Esteu segur que voleu suprimir aquest mode {{scope}}? Això també suprimirà la carpeta de regles associada a: {{rulesFolderPath}}", + "descriptionNoRules": "Esteu segur que voleu suprimir aquest mode personalitzat?", + "cancel": "Cancel·la", + "confirm": "Suprimeix" + } } } diff --git a/src/i18n/locales/ca/embeddings.json b/src/i18n/locales/ca/embeddings.json index 3302ff7acd4c..35be4089d459 100644 --- a/src/i18n/locales/ca/embeddings.json +++ b/src/i18n/locales/ca/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "No s'ha pogut llegir el cos de l'error", "requestFailed": "La sol·licitud de l'API d'Ollama ha fallat amb l'estat {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Estructura de resposta no vàlida de l'API d'Ollama: no s'ha trobat la matriu \"embeddings\" o no és una matriu.", - "embeddingFailed": "La incrustació d'Ollama ha fallat: {{message}}" + "embeddingFailed": "La incrustació d'Ollama ha fallat: {{message}}", + "serviceNotRunning": "El servei d'Ollama no s'està executant a {{baseUrl}}", + "serviceUnavailable": "El servei d'Ollama no està disponible (estat: {{status}})", + "modelNotFound": "No s'ha trobat el model d'Ollama: {{modelId}}", + "modelNotEmbeddingCapable": "El model d'Ollama no és capaç de fer incrustacions: {{modelId}}", + "hostNotFound": "No s'ha trobat l'amfitrió d'Ollama: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Error desconegut en processar el fitxer {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "No s'ha pogut connectar a la base de dades vectorial Qdrant. Assegura't que Qdrant estigui funcionant i sigui accessible a {{qdrantUrl}}. Error: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Ha fallat l'autenticació. Comproveu la vostra clau d'API a la configuració.", + "connectionFailed": "No s'ha pogut connectar al servei d'incrustació. Comproveu la vostra configuració de connexió i assegureu-vos que el servei estigui funcionant.", + "modelNotAvailable": "El model especificat no està disponible. Comproveu la vostra configuració de model.", + "configurationError": "Configuració d'incrustació no vàlida. Reviseu la vostra configuració.", + "serviceUnavailable": "El servei d'incrustació no està disponible. Assegureu-vos que estigui funcionant i sigui accessible.", + "invalidEndpoint": "Punt final d'API no vàlid. Comproveu la vostra configuració d'URL.", + "invalidEmbedderConfig": "Configuració d'incrustació no vàlida. Comproveu la vostra configuració.", + "invalidApiKey": "Clau d'API no vàlida. Comproveu la vostra configuració de clau d'API.", + "invalidBaseUrl": "URL base no vàlida. Comproveu la vostra configuració d'URL.", + "invalidModel": "Model no vàlid. Comproveu la vostra configuració de model.", + "invalidResponse": "Resposta no vàlida del servei d'incrustació. Comproveu la vostra configuració." } } diff --git a/src/i18n/locales/de/common.json b/src/i18n/locales/de/common.json index cd16ba2b59b2..e43c88a956cd 100644 --- a/src/i18n/locales/de/common.json +++ b/src/i18n/locales/de/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Möchtest du wirklich alle Zustände und geheimen Speicher in der Erweiterung zurücksetzen? Dies kann nicht rückgängig gemacht werden.", "delete_config_profile": "Möchtest du dieses Konfigurationsprofil wirklich löschen?", - "delete_custom_mode": "Möchtest du diesen benutzerdefinierten Modus wirklich löschen?", + "delete_custom_mode_with_rules": "Bist du sicher, dass du diesen {scope}-Modus löschen möchtest?\n\nDadurch wird auch der zugehörige Regelordner unter folgender Adresse gelöscht:\n{rulesFolderPath}", "delete_message": "Was möchtest du löschen?", "just_this_message": "Nur diese Nachricht", "this_and_subsequent": "Diese und alle nachfolgenden Nachrichten" @@ -70,6 +70,8 @@ "share_auth_required": "Authentifizierung erforderlich. Bitte melde dich an, um Aufgaben zu teilen.", "share_not_enabled": "Aufgabenfreigabe ist für diese Organisation nicht aktiviert.", "share_task_not_found": "Aufgabe nicht gefunden oder Zugriff verweigert.", + "mode_import_failed": "Fehler beim Importieren des Modus: {{error}}", + "delete_rules_folder_failed": "Fehler beim Löschen des Regelordners: {{rulesFolderPath}}. Fehler: {{error}}", "claudeCode": { "processExited": "Claude Code Prozess wurde mit Code {{exitCode}} beendet.", "errorOutput": "Fehlerausgabe: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "Bild-Daten-URI in die Zwischenablage kopiert", "image_saved": "Bild gespeichert unter {{path}}", "organization_share_link_copied": "Organisations-Freigabelink in die Zwischenablage kopiert!", - "public_share_link_copied": "Öffentlicher Freigabelink in die Zwischenablage kopiert!" + "public_share_link_copied": "Öffentlicher Freigabelink in die Zwischenablage kopiert!", + "mode_exported": "Modus '{{mode}}' erfolgreich exportiert", + "mode_imported": "Modus erfolgreich importiert" }, "answers": { "yes": "Ja", @@ -141,6 +145,10 @@ "resetFailed": "Fehler beim Zurücksetzen der benutzerdefinierten Modi: {{error}}", "modeNotFound": "Schreibfehler: Modus nicht gefunden", "noWorkspaceForProject": "Kein Arbeitsbereich-Ordner für projektspezifischen Modus gefunden" + }, + "scope": { + "project": "projekt", + "global": "global" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Du musst mit dem Roo Code Cloud-Konto deiner Organisation authentifiziert sein.", "verification_failed": "Die Organisationsauthentifizierung konnte nicht verifiziert werden." } + }, + "prompts": { + "deleteMode": { + "title": "Benutzerdefinierten Modus löschen", + "description": "Bist du sicher, dass du diesen {{scope}}-Modus löschen möchtest? Dadurch wird auch der zugehörige Regelordner unter {{rulesFolderPath}} gelöscht", + "descriptionNoRules": "Bist du sicher, dass du diesen benutzerdefinierten Modus löschen möchtest?", + "cancel": "Abbrechen", + "confirm": "Löschen" + } } } diff --git a/src/i18n/locales/de/embeddings.json b/src/i18n/locales/de/embeddings.json index 300899fd1b74..7d96ddb511a9 100644 --- a/src/i18n/locales/de/embeddings.json +++ b/src/i18n/locales/de/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Fehlerinhalt konnte nicht gelesen werden", "requestFailed": "Ollama API-Anfrage fehlgeschlagen mit Status {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Ungültige Antwortstruktur von Ollama API: \"embeddings\" Array nicht gefunden oder kein Array.", - "embeddingFailed": "Ollama Einbettung fehlgeschlagen: {{message}}" + "embeddingFailed": "Ollama Einbettung fehlgeschlagen: {{message}}", + "serviceNotRunning": "Ollama-Dienst wird unter {{baseUrl}} nicht ausgeführt", + "serviceUnavailable": "Ollama-Dienst ist nicht verfügbar (Status: {{status}})", + "modelNotFound": "Ollama-Modell nicht gefunden: {{modelId}}", + "modelNotEmbeddingCapable": "Ollama-Modell ist nicht für Einbettungen geeignet: {{modelId}}", + "hostNotFound": "Ollama-Host nicht gefunden: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Unbekannter Fehler beim Verarbeiten der Datei {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Verbindung zur Qdrant-Vektordatenbank fehlgeschlagen. Stelle sicher, dass Qdrant läuft und unter {{qdrantUrl}} erreichbar ist. Fehler: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Authentifizierung fehlgeschlagen. Bitte überprüfe deinen API-Schlüssel in den Einstellungen.", + "connectionFailed": "Verbindung zum Embedder-Dienst fehlgeschlagen. Bitte überprüfe deine Verbindungseinstellungen und stelle sicher, dass der Dienst läuft.", + "modelNotAvailable": "Das angegebene Modell ist nicht verfügbar. Bitte überprüfe deine Modellkonfiguration.", + "configurationError": "Ungültige Embedder-Konfiguration. Bitte überprüfe deine Einstellungen.", + "serviceUnavailable": "Der Embedder-Dienst ist nicht verfügbar. Bitte stelle sicher, dass er läuft und erreichbar ist.", + "invalidEndpoint": "Ungültiger API-Endpunkt. Bitte überprüfe deine URL-Konfiguration.", + "invalidEmbedderConfig": "Ungültige Embedder-Konfiguration. Bitte überprüfe deine Einstellungen.", + "invalidApiKey": "Ungültiger API-Schlüssel. Bitte überprüfe deine API-Schlüssel-Konfiguration.", + "invalidBaseUrl": "Ungültige Basis-URL. Bitte überprüfe deine URL-Konfiguration.", + "invalidModel": "Ungültiges Modell. Bitte überprüfe deine Modellkonfiguration.", + "invalidResponse": "Ungültige Antwort vom Embedder-Dienst. Bitte überprüfe deine Konfiguration." } } diff --git a/src/i18n/locales/en/common.json b/src/i18n/locales/en/common.json index 386986316be4..7197cc1fe49e 100644 --- a/src/i18n/locales/en/common.json +++ b/src/i18n/locales/en/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Are you sure you want to reset all state and secret storage in the extension? This cannot be undone.", "delete_config_profile": "Are you sure you want to delete this configuration profile?", - "delete_custom_mode": "Are you sure you want to delete this custom mode?", + "delete_custom_mode_with_rules": "Are you sure you want to delete this {scope} mode?\n\nThis will also delete the associated rules folder at:\n{rulesFolderPath}", "delete_message": "What would you like to delete?", "just_this_message": "Just this message", "this_and_subsequent": "This and all subsequent messages" @@ -70,6 +70,8 @@ "share_auth_required": "Authentication required. Please sign in to share tasks.", "share_not_enabled": "Task sharing is not enabled for this organization.", "share_task_not_found": "Task not found or access denied.", + "mode_import_failed": "Failed to import mode: {{error}}", + "delete_rules_folder_failed": "Failed to delete rules folder: {{rulesFolderPath}}. Error: {{error}}", "claudeCode": { "processExited": "Claude Code process exited with code {{exitCode}}.", "errorOutput": "Error output: {{output}}", @@ -95,7 +97,9 @@ "organization_share_link_copied": "Organization share link copied to clipboard!", "public_share_link_copied": "Public share link copied to clipboard!", "image_copied_to_clipboard": "Image data URI copied to clipboard", - "image_saved": "Image saved to {{path}}" + "image_saved": "Image saved to {{path}}", + "mode_exported": "Mode '{{mode}}' exported successfully", + "mode_imported": "Mode imported successfully" }, "answers": { "yes": "Yes", @@ -130,6 +134,10 @@ "resetFailed": "Failed to reset custom modes: {{error}}", "modeNotFound": "Write error: Mode not found", "noWorkspaceForProject": "No workspace folder found for project-specific mode" + }, + "scope": { + "project": "project", + "global": "global" } }, "mdm": { @@ -138,5 +146,14 @@ "organization_mismatch": "You must be authenticated with your organization's Roo Code Cloud account.", "verification_failed": "Unable to verify organization authentication." } + }, + "prompts": { + "deleteMode": { + "title": "Delete Custom Mode", + "description": "Are you sure you want to delete this {{scope}} mode? This will also delete the associated rules folder at: {{rulesFolderPath}}", + "descriptionNoRules": "Are you sure you want to delete this custom mode?", + "cancel": "Cancel", + "confirm": "Delete" + } } } diff --git a/src/i18n/locales/en/embeddings.json b/src/i18n/locales/en/embeddings.json index e57f3de0e8fb..012b2323cfa9 100644 --- a/src/i18n/locales/en/embeddings.json +++ b/src/i18n/locales/en/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Could not read error body", "requestFailed": "Ollama API request failed with status {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Invalid response structure from Ollama API: \"embeddings\" array not found or not an array.", - "embeddingFailed": "Ollama embedding failed: {{message}}" + "embeddingFailed": "Ollama embedding failed: {{message}}", + "serviceNotRunning": "Ollama service is not running at {{baseUrl}}", + "serviceUnavailable": "Ollama service is unavailable (status: {{status}})", + "modelNotFound": "Ollama model not found: {{modelId}}", + "modelNotEmbeddingCapable": "Ollama model is not embedding capable: {{modelId}}", + "hostNotFound": "Ollama host not found: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Unknown error processing file {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Failed to connect to Qdrant vector database. Please ensure Qdrant is running and accessible at {{qdrantUrl}}. Error: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Authentication failed. Please check your API key in the settings.", + "connectionFailed": "Failed to connect to the embedder service. Please check your connection settings and ensure the service is running.", + "modelNotAvailable": "The specified model is not available. Please check your model configuration.", + "configurationError": "Invalid embedder configuration. Please review your settings.", + "serviceUnavailable": "The embedder service is not available. Please ensure it is running and accessible.", + "invalidEndpoint": "Invalid API endpoint. Please check your URL configuration.", + "invalidEmbedderConfig": "Invalid embedder configuration. Please check your settings.", + "invalidApiKey": "Invalid API key. Please check your API key configuration.", + "invalidBaseUrl": "Invalid base URL. Please check your URL configuration.", + "invalidModel": "Invalid model. Please check your model configuration.", + "invalidResponse": "Invalid response from embedder service. Please check your configuration." } } diff --git a/src/i18n/locales/es/common.json b/src/i18n/locales/es/common.json index 20ffda84dd95..4225aa4743d6 100644 --- a/src/i18n/locales/es/common.json +++ b/src/i18n/locales/es/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "¿Estás seguro de que deseas restablecer todo el estado y el almacenamiento secreto en la extensión? Esta acción no se puede deshacer.", "delete_config_profile": "¿Estás seguro de que deseas eliminar este perfil de configuración?", - "delete_custom_mode": "¿Estás seguro de que deseas eliminar este modo personalizado?", + "delete_custom_mode_with_rules": "¿Estás seguro de que quieres eliminar este modo {scope}?\n\nEsto también eliminará la carpeta de reglas asociada en:\n{rulesFolderPath}", "delete_message": "¿Qué deseas eliminar?", "just_this_message": "Solo este mensaje", "this_and_subsequent": "Este y todos los mensajes posteriores" @@ -70,6 +70,8 @@ "share_auth_required": "Se requiere autenticación. Por favor, inicia sesión para compartir tareas.", "share_not_enabled": "La compartición de tareas no está habilitada para esta organización.", "share_task_not_found": "Tarea no encontrada o acceso denegado.", + "mode_import_failed": "Error al importar el modo: {{error}}", + "delete_rules_folder_failed": "Error al eliminar la carpeta de reglas: {{rulesFolderPath}}. Error: {{error}}", "claudeCode": { "processExited": "El proceso de Claude Code terminó con código {{exitCode}}.", "errorOutput": "Salida de error: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "URI de datos de imagen copiada al portapapeles", "image_saved": "Imagen guardada en {{path}}", "organization_share_link_copied": "¡Enlace de compartición de organización copiado al portapapeles!", - "public_share_link_copied": "¡Enlace de compartición pública copiado al portapapeles!" + "public_share_link_copied": "¡Enlace de compartición pública copiado al portapapeles!", + "mode_exported": "Modo '{{mode}}' exportado correctamente", + "mode_imported": "Modo importado correctamente" }, "answers": { "yes": "Sí", @@ -141,6 +145,10 @@ "resetFailed": "Error al restablecer modos personalizados: {{error}}", "modeNotFound": "Error de escritura: Modo no encontrado", "noWorkspaceForProject": "No se encontró carpeta de espacio de trabajo para modo específico del proyecto" + }, + "scope": { + "project": "proyecto", + "global": "global" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Debes estar autenticado con la cuenta de Roo Code Cloud de tu organización.", "verification_failed": "No se pudo verificar la autenticación de la organización." } + }, + "prompts": { + "deleteMode": { + "title": "Eliminar modo personalizado", + "description": "¿Estás seguro de que quieres eliminar este modo {{scope}}? Esto también eliminará la carpeta de reglas asociada en: {{rulesFolderPath}}", + "descriptionNoRules": "¿Estás seguro de que quieres eliminar este modo personalizado?", + "cancel": "Cancelar", + "confirm": "Eliminar" + } } } diff --git a/src/i18n/locales/es/embeddings.json b/src/i18n/locales/es/embeddings.json index c2d779536213..5fa46f9c45ec 100644 --- a/src/i18n/locales/es/embeddings.json +++ b/src/i18n/locales/es/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "No se pudo leer el cuerpo del error", "requestFailed": "La solicitud de la API de Ollama falló con estado {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Estructura de respuesta inválida de la API de Ollama: array \"embeddings\" no encontrado o no es un array.", - "embeddingFailed": "Incrustación de Ollama falló: {{message}}" + "embeddingFailed": "Incrustación de Ollama falló: {{message}}", + "serviceNotRunning": "El servicio Ollama no se está ejecutando en {{baseUrl}}", + "serviceUnavailable": "El servicio Ollama no está disponible (estado: {{status}})", + "modelNotFound": "No se encuentra el modelo Ollama: {{modelId}}", + "modelNotEmbeddingCapable": "El modelo Ollama no es capaz de realizar incrustaciones: {{modelId}}", + "hostNotFound": "No se encuentra el host de Ollama: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Error desconocido procesando archivo {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Error al conectar con la base de datos vectorial Qdrant. Asegúrate de que Qdrant esté funcionando y sea accesible en {{qdrantUrl}}. Error: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Error de autenticación. Comprueba tu clave de API en los ajustes.", + "connectionFailed": "Error al conectar con el servicio de embedder. Comprueba los ajustes de conexión y asegúrate de que el servicio esté funcionando.", + "modelNotAvailable": "El modelo especificado no está disponible. Comprueba la configuración de tu modelo.", + "configurationError": "Configuración de embedder no válida. Revisa tus ajustes.", + "serviceUnavailable": "El servicio de embedder no está disponible. Asegúrate de que esté funcionando y sea accesible.", + "invalidEndpoint": "Punto de conexión de API no válido. Comprueba la configuración de tu URL.", + "invalidEmbedderConfig": "Configuración de embedder no válida. Comprueba tus ajustes.", + "invalidApiKey": "Clave de API no válida. Comprueba la configuración de tu clave de API.", + "invalidBaseUrl": "URL base no válida. Comprueba la configuración de tu URL.", + "invalidModel": "Modelo no válido. Comprueba la configuración de tu modelo.", + "invalidResponse": "Respuesta no válida del servicio de embedder. Comprueba tu configuración." } } diff --git a/src/i18n/locales/fr/common.json b/src/i18n/locales/fr/common.json index 3c7c9b0a67f3..96f1fdfd9f96 100644 --- a/src/i18n/locales/fr/common.json +++ b/src/i18n/locales/fr/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Êtes-vous sûr de vouloir réinitialiser le global state et le stockage de secrets de l'extension ? Cette action est irréversible.", "delete_config_profile": "Êtes-vous sûr de vouloir supprimer ce profil de configuration ?", - "delete_custom_mode": "Êtes-vous sûr de vouloir supprimer ce mode personnalisé ?", + "delete_custom_mode_with_rules": "Êtes-vous sûr de vouloir supprimer ce mode {scope} ?\n\nCela supprimera également le dossier de règles associé à l'adresse :\n{rulesFolderPath}", "delete_message": "Que souhaitez-vous supprimer ?", "just_this_message": "Uniquement ce message", "this_and_subsequent": "Ce message et tous les messages suivants" @@ -70,6 +70,8 @@ "share_auth_required": "Authentification requise. Veuillez vous connecter pour partager des tâches.", "share_not_enabled": "Le partage de tâches n'est pas activé pour cette organisation.", "share_task_not_found": "Tâche non trouvée ou accès refusé.", + "mode_import_failed": "Échec de l'importation du mode : {{error}}", + "delete_rules_folder_failed": "Échec de la suppression du dossier de règles : {{rulesFolderPath}}. Erreur : {{error}}", "claudeCode": { "processExited": "Le processus Claude Code s'est terminé avec le code {{exitCode}}.", "errorOutput": "Sortie d'erreur : {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "URI de données d'image copiée dans le presse-papiers", "image_saved": "Image enregistrée dans {{path}}", "organization_share_link_copied": "Lien de partage d'organisation copié dans le presse-papiers !", - "public_share_link_copied": "Lien de partage public copié dans le presse-papiers !" + "public_share_link_copied": "Lien de partage public copié dans le presse-papiers !", + "mode_exported": "Mode '{{mode}}' exporté avec succès", + "mode_imported": "Mode importé avec succès" }, "answers": { "yes": "Oui", @@ -141,6 +145,10 @@ "resetFailed": "Échec de la réinitialisation des modes personnalisés : {{error}}", "modeNotFound": "Erreur d'écriture : Mode non trouvé", "noWorkspaceForProject": "Aucun dossier d'espace de travail trouvé pour le mode spécifique au projet" + }, + "scope": { + "project": "projet", + "global": "global" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Vous devez être authentifié avec le compte Roo Code Cloud de votre organisation.", "verification_failed": "Impossible de vérifier l'authentification de l'organisation." } + }, + "prompts": { + "deleteMode": { + "title": "Supprimer le mode personnalisé", + "description": "Êtes-vous sûr de vouloir supprimer ce mode {{scope}} ? Cela supprimera également le dossier de règles associé à l'adresse : {{rulesFolderPath}}", + "descriptionNoRules": "Êtes-vous sûr de vouloir supprimer ce mode personnalisé ?", + "cancel": "Annuler", + "confirm": "Supprimer" + } } } diff --git a/src/i18n/locales/fr/embeddings.json b/src/i18n/locales/fr/embeddings.json index 4dbbe6218bba..b6ef0d8786dc 100644 --- a/src/i18n/locales/fr/embeddings.json +++ b/src/i18n/locales/fr/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Impossible de lire le corps de l'erreur", "requestFailed": "Échec de la requête API Ollama avec le statut {{status}} {{statusText}} : {{errorBody}}", "invalidResponseStructure": "Structure de réponse invalide de l'API Ollama : tableau \"embeddings\" non trouvé ou n'est pas un tableau.", - "embeddingFailed": "Échec de l'embedding Ollama : {{message}}" + "embeddingFailed": "Échec de l'embedding Ollama : {{message}}", + "serviceNotRunning": "Le service Ollama n'est pas en cours d'exécution sur {{baseUrl}}", + "serviceUnavailable": "Le service Ollama est indisponible (statut : {{status}})", + "modelNotFound": "Modèle Ollama introuvable : {{modelId}}", + "modelNotEmbeddingCapable": "Le modèle Ollama n'est pas capable d'intégrer : {{modelId}}", + "hostNotFound": "Hôte Ollama introuvable : {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Erreur inconnue lors du traitement du fichier {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Échec de la connexion à la base de données vectorielle Qdrant. Veuillez vous assurer que Qdrant fonctionne et est accessible à {{qdrantUrl}}. Erreur : {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Échec de l'authentification. Veuillez vérifier votre clé API dans les paramètres.", + "connectionFailed": "Échec de la connexion au service d'embedding. Veuillez vérifier vos paramètres de connexion et vous assurer que le service est en cours d'exécution.", + "modelNotAvailable": "Le modèle spécifié n'est pas disponible. Veuillez vérifier la configuration de votre modèle.", + "configurationError": "Configuration de l'embedder invalide. Veuillez vérifier vos paramètres.", + "serviceUnavailable": "Le service d'embedding n'est pas disponible. Veuillez vous assurer qu'il est en cours d'exécution et accessible.", + "invalidEndpoint": "Point de terminaison d'API invalide. Veuillez vérifier votre configuration d'URL.", + "invalidEmbedderConfig": "Configuration de l'embedder invalide. Veuillez vérifier vos paramètres.", + "invalidApiKey": "Clé API invalide. Veuillez vérifier votre configuration de clé API.", + "invalidBaseUrl": "URL de base invalide. Veuillez vérifier votre configuration d'URL.", + "invalidModel": "Modèle invalide. Veuillez vérifier votre configuration de modèle.", + "invalidResponse": "Réponse invalide du service d'embedder. Veuillez vérifier votre configuration." } } diff --git a/src/i18n/locales/hi/common.json b/src/i18n/locales/hi/common.json index 16698d4ddd2c..0e72c8374bac 100644 --- a/src/i18n/locales/hi/common.json +++ b/src/i18n/locales/hi/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "क्या आप वाकई एक्सटेंशन में सभी स्टेट और गुप्त स्टोरेज रीसेट करना चाहते हैं? इसे पूर्ववत नहीं किया जा सकता है।", "delete_config_profile": "क्या आप वाकई इस कॉन्फ़िगरेशन प्रोफ़ाइल को हटाना चाहते हैं?", - "delete_custom_mode": "क्या आप वाकई इस कस्टम मोड को हटाना चाहते हैं?", + "delete_custom_mode_with_rules": "क्या आप वाकई इस {scope} मोड को हटाना चाहते हैं?\n\nयह संबंधित नियम फ़ोल्डर को भी यहाँ हटा देगा:\n{rulesFolderPath}", "delete_message": "आप क्या हटाना चाहते हैं?", "just_this_message": "सिर्फ यह संदेश", "this_and_subsequent": "यह और सभी बाद के संदेश" @@ -70,6 +70,8 @@ "share_auth_required": "प्रमाणीकरण आवश्यक है। कार्य साझा करने के लिए कृपया साइन इन करें।", "share_not_enabled": "इस संगठन के लिए कार्य साझाकरण सक्षम नहीं है।", "share_task_not_found": "कार्य नहीं मिला या पहुंच अस्वीकृत।", + "mode_import_failed": "मोड आयात करने में विफल: {{error}}", + "delete_rules_folder_failed": "नियम फ़ोल्डर हटाने में विफल: {{rulesFolderPath}}। त्रुटि: {{error}}", "claudeCode": { "processExited": "Claude Code प्रक्रिया कोड {{exitCode}} के साथ समाप्त हुई।", "errorOutput": "त्रुटि आउटपुट: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "छवि डेटा URI क्लिपबोर्ड में कॉपी की गई", "image_saved": "छवि {{path}} में सहेजी गई", "organization_share_link_copied": "संगठन साझाकरण लिंक क्लिपबोर्ड में कॉपी किया गया!", - "public_share_link_copied": "सार्वजनिक साझाकरण लिंक क्लिपबोर्ड में कॉपी किया गया!" + "public_share_link_copied": "सार्वजनिक साझाकरण लिंक क्लिपबोर्ड में कॉपी किया गया!", + "mode_exported": "मोड '{{mode}}' सफलतापूर्वक निर्यात किया गया", + "mode_imported": "मोड सफलतापूर्वक आयात किया गया" }, "answers": { "yes": "हां", @@ -126,7 +130,7 @@ "getGroqApiKey": "ग्रोक एपीआई कुंजी प्राप्त करें", "claudeCode": { "pathLabel": "क्लाउड कोड पाथ", - "description": "आपके क्लाउड कोड CLI का वैकल्पिक पाथ। सेट न होने पर डिफ़ॉल्ट रूप से 'claude'。", + "description": "आपके क्लाउड कोड CLI का वैकल्पिक पाथ। सेट न होने पर डिफ़ॉल्ट रूप से 'claude'।", "placeholder": "डिफ़ॉल्ट: claude" } } @@ -141,6 +145,10 @@ "resetFailed": "कस्टम मोड रीसेट विफल: {{error}}", "modeNotFound": "लेखन त्रुटि: मोड नहीं मिला", "noWorkspaceForProject": "प्रोजेक्ट-विशिष्ट मोड के लिए वर्कस्पेस फ़ोल्डर नहीं मिला" + }, + "scope": { + "project": "परियोजना", + "global": "वैश्विक" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "आपको अपने संगठन के Roo Code Cloud खाते से प्रमाणित होना होगा।", "verification_failed": "संगठन प्रमाणीकरण सत्यापित करने में असमर्थ।" } + }, + "prompts": { + "deleteMode": { + "title": "कस्टम मोड हटाएं", + "description": "क्या आप वाकई इस {{scope}} मोड को हटाना चाहते हैं? यह संबंधित नियम फ़ोल्डर को भी {{rulesFolderPath}} पर हटा देगा", + "descriptionNoRules": "क्या आप वाकई इस कस्टम मोड को हटाना चाहते हैं?", + "cancel": "रद्द करें", + "confirm": "हटाएं" + } } } diff --git a/src/i18n/locales/hi/embeddings.json b/src/i18n/locales/hi/embeddings.json index 312d42e69cc8..5ec34e56245d 100644 --- a/src/i18n/locales/hi/embeddings.json +++ b/src/i18n/locales/hi/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "त्रुटि सामग्री पढ़ नहीं सका", "requestFailed": "Ollama API अनुरोध स्थिति {{status}} {{statusText}} के साथ विफल: {{errorBody}}", "invalidResponseStructure": "Ollama API से अमान्य प्रतिक्रिया संरचना: \"embeddings\" सरणी नहीं मिली या सरणी नहीं है।", - "embeddingFailed": "Ollama एम्बेडिंग विफल: {{message}}" + "embeddingFailed": "Ollama एम्बेडिंग विफल: {{message}}", + "serviceNotRunning": "ओलामा सेवा {{baseUrl}} पर नहीं चल रही है", + "serviceUnavailable": "ओलामा सेवा अनुपलब्ध है (स्थिति: {{status}})", + "modelNotFound": "ओलामा मॉडल नहीं मिला: {{modelId}}", + "modelNotEmbeddingCapable": "ओलामा मॉडल एम्बेडिंग में सक्षम नहीं है: {{modelId}}", + "hostNotFound": "ओलामा होस्ट नहीं मिला: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "फ़ाइल {{filePath}} प्रसंस्करण में अज्ञात त्रुटि", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Qdrant वेक्टर डेटाबेस से कनेक्ट करने में विफल। कृपया सुनिश्चित करें कि Qdrant चल रहा है और {{qdrantUrl}} पर पहुंच योग्य है। त्रुटि: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "प्रमाणीकरण विफल। कृपया सेटिंग्स में अपनी एपीआई कुंजी जांचें।", + "connectionFailed": "एम्बेडर सेवा से कनेक्ट करने में विफल। कृपया अपनी कनेक्शन सेटिंग्स जांचें और सुनिश्चित करें कि सेवा चल रही है।", + "modelNotAvailable": "निर्दिष्ट मॉडल उपलब्ध नहीं है। कृपया अपनी मॉडल कॉन्फ़िगरेशन जांचें।", + "configurationError": "अमान्य एम्बेडर कॉन्फ़िगरेशन। कृपया अपनी सेटिंग्स की समीक्षा करें।", + "serviceUnavailable": "एम्बेडर सेवा उपलब्ध नहीं है। कृपया सुनिश्चित करें कि यह चल रहा है और पहुंच योग्य है।", + "invalidEndpoint": "अमान्य एपीआई एंडपॉइंट। कृपया अपनी यूआरएल कॉन्फ़िगरेशन जांचें।", + "invalidEmbedderConfig": "अमान्य एम्बेडर कॉन्फ़िगरेशन। कृपया अपनी सेटिंग्स जांचें।", + "invalidApiKey": "अमान्य एपीआई कुंजी। कृपया अपनी एपीआई कुंजी कॉन्फ़िगरेशन जांचें।", + "invalidBaseUrl": "अमान्य बेस यूआरएल। कृपया अपनी यूआरएल कॉन्फ़िगरेशन जांचें।", + "invalidModel": "अमान्य मॉडल। कृपया अपनी मॉडल कॉन्फ़िगरेशन जांचें।", + "invalidResponse": "एम्बेडर सेवा से अमान्य प्रतिक्रिया। कृपया अपनी कॉन्फ़िगरेशन जांचें।" } } diff --git a/src/i18n/locales/id/common.json b/src/i18n/locales/id/common.json index f1512095bc9c..3298515dd978 100644 --- a/src/i18n/locales/id/common.json +++ b/src/i18n/locales/id/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Apakah kamu yakin ingin mereset semua state dan secret storage di ekstensi? Ini tidak dapat dibatalkan.", "delete_config_profile": "Apakah kamu yakin ingin menghapus profil konfigurasi ini?", - "delete_custom_mode": "Apakah kamu yakin ingin menghapus mode kustom ini?", + "delete_custom_mode_with_rules": "Anda yakin ingin menghapus mode {scope} ini?\n\nIni juga akan menghapus folder aturan terkait di:\n{rulesFolderPath}", "delete_message": "Apa yang ingin kamu hapus?", "just_this_message": "Hanya pesan ini", "this_and_subsequent": "Ini dan semua pesan selanjutnya" @@ -70,6 +70,8 @@ "share_auth_required": "Autentikasi diperlukan. Silakan masuk untuk berbagi tugas.", "share_not_enabled": "Berbagi tugas tidak diaktifkan untuk organisasi ini.", "share_task_not_found": "Tugas tidak ditemukan atau akses ditolak.", + "mode_import_failed": "Gagal mengimpor mode: {{error}}", + "delete_rules_folder_failed": "Gagal menghapus folder aturan: {{rulesFolderPath}}. Error: {{error}}", "claudeCode": { "processExited": "Proses Claude Code keluar dengan kode {{exitCode}}.", "errorOutput": "Output error: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "Data URI gambar disalin ke clipboard", "image_saved": "Gambar disimpan ke {{path}}", "organization_share_link_copied": "Tautan berbagi organisasi disalin ke clipboard!", - "public_share_link_copied": "Tautan berbagi publik disalin ke clipboard!" + "public_share_link_copied": "Tautan berbagi publik disalin ke clipboard!", + "mode_exported": "Mode '{{mode}}' berhasil diekspor", + "mode_imported": "Mode berhasil diimpor" }, "answers": { "yes": "Ya", @@ -141,6 +145,10 @@ "resetFailed": "Gagal mereset mode kustom: {{error}}", "modeNotFound": "Kesalahan tulis: Mode tidak ditemukan", "noWorkspaceForProject": "Tidak ditemukan folder workspace untuk mode khusus proyek" + }, + "scope": { + "project": "proyek", + "global": "global" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Kamu harus diautentikasi dengan akun Roo Code Cloud organisasi kamu.", "verification_failed": "Tidak dapat memverifikasi autentikasi organisasi." } + }, + "prompts": { + "deleteMode": { + "title": "Hapus Mode Kustom", + "description": "Anda yakin ingin menghapus mode {{scope}} ini? Ini juga akan menghapus folder aturan terkait di: {{rulesFolderPath}}", + "descriptionNoRules": "Anda yakin ingin menghapus mode kustom ini?", + "cancel": "Batal", + "confirm": "Hapus" + } } } diff --git a/src/i18n/locales/id/embeddings.json b/src/i18n/locales/id/embeddings.json index abfa9cb354fd..0082ec8dcfa0 100644 --- a/src/i18n/locales/id/embeddings.json +++ b/src/i18n/locales/id/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Tidak dapat membaca body error", "requestFailed": "Permintaan API Ollama gagal dengan status {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Struktur respons tidak valid dari API Ollama: array \"embeddings\" tidak ditemukan atau bukan array.", - "embeddingFailed": "Embedding Ollama gagal: {{message}}" + "embeddingFailed": "Embedding Ollama gagal: {{message}}", + "serviceNotRunning": "Layanan Ollama tidak berjalan di {{baseUrl}}", + "serviceUnavailable": "Layanan Ollama tidak tersedia (status: {{status}})", + "modelNotFound": "Model Ollama tidak ditemukan: {{modelId}}", + "modelNotEmbeddingCapable": "Model Ollama tidak mampu melakukan embedding: {{modelId}}", + "hostNotFound": "Host Ollama tidak ditemukan: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Error tidak dikenal saat memproses file {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Gagal terhubung ke database vektor Qdrant. Pastikan Qdrant berjalan dan dapat diakses di {{qdrantUrl}}. Error: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Autentikasi gagal. Silakan periksa kunci API Anda di pengaturan.", + "connectionFailed": "Gagal terhubung ke layanan embedder. Silakan periksa pengaturan koneksi Anda dan pastikan layanan berjalan.", + "modelNotAvailable": "Model yang ditentukan tidak tersedia. Silakan periksa konfigurasi model Anda.", + "configurationError": "Konfigurasi embedder tidak valid. Harap tinjau pengaturan Anda.", + "serviceUnavailable": "Layanan embedder tidak tersedia. Pastikan layanan tersebut berjalan dan dapat diakses.", + "invalidEndpoint": "Endpoint API tidak valid. Silakan periksa konfigurasi URL Anda.", + "invalidEmbedderConfig": "Konfigurasi embedder tidak valid. Silakan periksa pengaturan Anda.", + "invalidApiKey": "Kunci API tidak valid. Silakan periksa konfigurasi kunci API Anda.", + "invalidBaseUrl": "URL dasar tidak valid. Silakan periksa konfigurasi URL Anda.", + "invalidModel": "Model tidak valid. Silakan periksa konfigurasi model Anda.", + "invalidResponse": "Respons tidak valid dari layanan embedder. Silakan periksa konfigurasi Anda." } } diff --git a/src/i18n/locales/it/common.json b/src/i18n/locales/it/common.json index 96408088c37e..6bc5b026d95a 100644 --- a/src/i18n/locales/it/common.json +++ b/src/i18n/locales/it/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Sei sicuro di voler reimpostare tutti gli stati e l'archiviazione segreta nell'estensione? Questa azione non può essere annullata.", "delete_config_profile": "Sei sicuro di voler eliminare questo profilo di configurazione?", - "delete_custom_mode": "Sei sicuro di voler eliminare questa modalità personalizzata?", + "delete_custom_mode_with_rules": "Sei sicuro di voler eliminare questa modalità {scope}?\n\nQuesto eliminerà anche la cartella delle regole associata in:\n{rulesFolderPath}", "delete_message": "Cosa desideri eliminare?", "just_this_message": "Solo questo messaggio", "this_and_subsequent": "Questo e tutti i messaggi successivi" @@ -70,6 +70,8 @@ "share_auth_required": "Autenticazione richiesta. Accedi per condividere le attività.", "share_not_enabled": "La condivisione delle attività non è abilitata per questa organizzazione.", "share_task_not_found": "Attività non trovata o accesso negato.", + "mode_import_failed": "Importazione della modalità non riuscita: {{error}}", + "delete_rules_folder_failed": "Impossibile eliminare la cartella delle regole: {{rulesFolderPath}}. Errore: {{error}}", "claudeCode": { "processExited": "Il processo Claude Code è terminato con codice {{exitCode}}.", "errorOutput": "Output di errore: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "URI dati dell'immagine copiato negli appunti", "image_saved": "Immagine salvata in {{path}}", "organization_share_link_copied": "Link di condivisione organizzazione copiato negli appunti!", - "public_share_link_copied": "Link di condivisione pubblica copiato negli appunti!" + "public_share_link_copied": "Link di condivisione pubblica copiato negli appunti!", + "mode_exported": "Modalità '{{mode}}' esportata con successo", + "mode_imported": "Modalità importata con successo" }, "answers": { "yes": "Sì", @@ -141,6 +145,10 @@ "resetFailed": "Reset modalità personalizzate fallito: {{error}}", "modeNotFound": "Errore di scrittura: Modalità non trovata", "noWorkspaceForProject": "Nessuna cartella workspace trovata per la modalità specifica del progetto" + }, + "scope": { + "project": "progetto", + "global": "globale" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Devi essere autenticato con l'account Roo Code Cloud della tua organizzazione.", "verification_failed": "Impossibile verificare l'autenticazione dell'organizzazione." } + }, + "prompts": { + "deleteMode": { + "title": "Elimina Modalità Personalizzata", + "description": "Sei sicuro di voler eliminare questa modalità {{scope}}? Questo eliminerà anche la cartella delle regole associata a: {{rulesFolderPath}}", + "descriptionNoRules": "Sei sicuro di voler eliminare questa modalità personalizzata?", + "cancel": "Annulla", + "confirm": "Elimina" + } } } diff --git a/src/i18n/locales/it/embeddings.json b/src/i18n/locales/it/embeddings.json index 5bd716488626..19e6af332f1f 100644 --- a/src/i18n/locales/it/embeddings.json +++ b/src/i18n/locales/it/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Impossibile leggere il corpo dell'errore", "requestFailed": "Richiesta API Ollama fallita con stato {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Struttura di risposta non valida dall'API Ollama: array \"embeddings\" non trovato o non è un array.", - "embeddingFailed": "Embedding Ollama fallito: {{message}}" + "embeddingFailed": "Embedding Ollama fallito: {{message}}", + "serviceNotRunning": "Il servizio Ollama non è in esecuzione su {{baseUrl}}", + "serviceUnavailable": "Il servizio Ollama non è disponibile (stato: {{status}})", + "modelNotFound": "Modello Ollama non trovato: {{modelId}}", + "modelNotEmbeddingCapable": "Il modello Ollama non è in grado di eseguire l'embedding: {{modelId}}", + "hostNotFound": "Host Ollama non trovato: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Errore sconosciuto nell'elaborazione del file {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Impossibile connettersi al database vettoriale Qdrant. Assicurati che Qdrant sia in esecuzione e accessibile su {{qdrantUrl}}. Errore: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Autenticazione fallita. Controlla la tua chiave API nelle impostazioni.", + "connectionFailed": "Connessione al servizio di embedder fallita. Controlla le impostazioni di connessione e assicurati che il servizio sia in esecuzione.", + "modelNotAvailable": "Il modello specificato non è disponibile. Controlla la configurazione del tuo modello.", + "configurationError": "Configurazione dell'embedder non valida. Rivedi le tue impostazioni.", + "serviceUnavailable": "Il servizio di embedder non è disponibile. Assicurati che sia in esecuzione e accessibile.", + "invalidEndpoint": "Endpoint API non valido. Controlla la configurazione del tuo URL.", + "invalidEmbedderConfig": "Configurazione dell'embedder non valida. Controlla le tue impostazioni.", + "invalidApiKey": "Chiave API non valida. Controlla la configurazione della tua chiave API.", + "invalidBaseUrl": "URL di base non valido. Controlla la configurazione del tuo URL.", + "invalidModel": "Modello non valido. Controlla la configurazione del tuo modello.", + "invalidResponse": "Risposta non valida dal servizio embedder. Controlla la tua configurazione." } } diff --git a/src/i18n/locales/ja/common.json b/src/i18n/locales/ja/common.json index bdd7f5bbc8b9..79bba7c96555 100644 --- a/src/i18n/locales/ja/common.json +++ b/src/i18n/locales/ja/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "拡張機能のすべての状態とシークレットストレージをリセットしてもよろしいですか?この操作は元に戻せません。", "delete_config_profile": "この設定プロファイルを削除してもよろしいですか?", - "delete_custom_mode": "このカスタムモードを削除してもよろしいですか?", + "delete_custom_mode_with_rules": "この{scope}モードを削除してもよろしいですか?\n\nこれにより、関連するルールフォルダも次の場所で削除されます:\n{rulesFolderPath}", "delete_message": "何を削除しますか?", "just_this_message": "このメッセージのみ", "this_and_subsequent": "これ以降のすべてのメッセージ" @@ -70,6 +70,8 @@ "share_auth_required": "認証が必要です。タスクを共有するにはサインインしてください。", "share_not_enabled": "この組織ではタスク共有が有効になっていません。", "share_task_not_found": "タスクが見つからないか、アクセスが拒否されました。", + "mode_import_failed": "モードのインポートに失敗しました:{{error}}", + "delete_rules_folder_failed": "ルールフォルダの削除に失敗しました:{{rulesFolderPath}}。エラー:{{error}}", "claudeCode": { "processExited": "Claude Code プロセスがコード {{exitCode}} で終了しました。", "errorOutput": "エラー出力:{{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "画像データURIがクリップボードにコピーされました", "image_saved": "画像を{{path}}に保存しました", "organization_share_link_copied": "組織共有リンクがクリップボードにコピーされました!", - "public_share_link_copied": "公開共有リンクがクリップボードにコピーされました!" + "public_share_link_copied": "公開共有リンクがクリップボードにコピーされました!", + "mode_exported": "モード「{{mode}}」が正常にエクスポートされました", + "mode_imported": "モードが正常にインポートされました" }, "answers": { "yes": "はい", @@ -141,6 +145,10 @@ "resetFailed": "カスタムモードのリセットに失敗しました:{{error}}", "modeNotFound": "書き込みエラー:モードが見つかりません", "noWorkspaceForProject": "プロジェクト固有モード用のワークスペースフォルダーが見つかりません" + }, + "scope": { + "project": "プロジェクト", + "global": "グローバル" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "組織の Roo Code Cloud アカウントで認証する必要があります。", "verification_failed": "組織認証の確認ができませんでした。" } + }, + "prompts": { + "deleteMode": { + "title": "カスタムモードの削除", + "description": "この{{scope}}モードを削除してもよろしいですか?これにより、関連するルールフォルダーも{{rulesFolderPath}}で削除されます", + "descriptionNoRules": "このカスタムモードを削除してもよろしいですか?", + "cancel": "キャンセル", + "confirm": "削除" + } } } diff --git a/src/i18n/locales/ja/embeddings.json b/src/i18n/locales/ja/embeddings.json index 862270a364e9..fcf426a14cfa 100644 --- a/src/i18n/locales/ja/embeddings.json +++ b/src/i18n/locales/ja/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "エラー本文を読み取れませんでした", "requestFailed": "Ollama APIリクエストが失敗しました。ステータス {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Ollama APIからの無効な応答構造:\"embeddings\"配列が見つからないか、配列ではありません。", - "embeddingFailed": "Ollama埋め込みが失敗しました:{{message}}" + "embeddingFailed": "Ollama埋め込みが失敗しました:{{message}}", + "serviceNotRunning": "Ollamaサービスは{{baseUrl}}で実行されていません", + "serviceUnavailable": "Ollamaサービスは利用できません(ステータス:{{status}})", + "modelNotFound": "Ollamaモデルが見つかりません:{{modelId}}", + "modelNotEmbeddingCapable": "Ollamaモデルは埋め込みに対応していません:{{modelId}}", + "hostNotFound": "Ollamaホストが見つかりません:{{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "ファイル{{filePath}}の処理中に不明なエラーが発生しました", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Qdrantベクターデータベースへの接続に失敗しました。Qdrantが実行中で{{qdrantUrl}}でアクセス可能であることを確認してください。エラー:{{errorMessage}}" + }, + "validation": { + "authenticationFailed": "認証に失敗しました。設定でAPIキーを確認してください。", + "connectionFailed": "エンベッダーサービスへの接続に失敗しました。接続設定を確認し、サービスが実行されていることを確認してください。", + "modelNotAvailable": "指定されたモデルは利用できません。モデル構成を確認してください。", + "configurationError": "無効なエンベッダー構成です。設定を確認してください。", + "serviceUnavailable": "エンベッダーサービスは利用できません。実行中でアクセス可能であることを確認してください。", + "invalidEndpoint": "無効なAPIエンドポイントです。URL構成を確認してください。", + "invalidEmbedderConfig": "無効なエンベッダー構成です。設定を確認してください。", + "invalidApiKey": "無効なAPIキーです。APIキー構成を確認してください。", + "invalidBaseUrl": "無効なベースURLです。URL構成を確認してください。", + "invalidModel": "無効なモデルです。モデル構成を確認してください。", + "invalidResponse": "エンベッダーサービスからの無効な応答です。設定を確認してください。" } } diff --git a/src/i18n/locales/ko/common.json b/src/i18n/locales/ko/common.json index 205c49b6c38a..fffe82742fb3 100644 --- a/src/i18n/locales/ko/common.json +++ b/src/i18n/locales/ko/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "확장 프로그램의 모든 상태와 보안 저장소를 재설정하시겠습니까? 이 작업은 취소할 수 없습니다.", "delete_config_profile": "이 구성 프로필을 삭제하시겠습니까?", - "delete_custom_mode": "이 사용자 지정 모드를 삭제하시겠습니까?", + "delete_custom_mode_with_rules": "이 {scope} 모드를 삭제하시겠습니까?\n\n이렇게 하면 연결된 규칙 폴더도 다음 위치에서 삭제됩니다:\n{rulesFolderPath}", "delete_message": "무엇을 삭제하시겠습니까?", "just_this_message": "이 메시지만", "this_and_subsequent": "이 메시지와 모든 후속 메시지" @@ -70,6 +70,8 @@ "share_auth_required": "인증이 필요합니다. 작업을 공유하려면 로그인하세요.", "share_not_enabled": "이 조직에서는 작업 공유가 활성화되지 않았습니다.", "share_task_not_found": "작업을 찾을 수 없거나 액세스가 거부되었습니다.", + "mode_import_failed": "모드 가져오기 실패: {{error}}", + "delete_rules_folder_failed": "규칙 폴더 삭제 실패: {{rulesFolderPath}}. 오류: {{error}}", "claudeCode": { "processExited": "Claude Code 프로세스가 코드 {{exitCode}}로 종료되었습니다.", "errorOutput": "오류 출력: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "이미지 데이터 URI가 클립보드에 복사되었습니다", "image_saved": "이미지가 {{path}}에 저장되었습니다", "organization_share_link_copied": "조직 공유 링크가 클립보드에 복사되었습니다!", - "public_share_link_copied": "공개 공유 링크가 클립보드에 복사되었습니다!" + "public_share_link_copied": "공개 공유 링크가 클립보드에 복사되었습니다!", + "mode_exported": "'{{mode}}' 모드가 성공적으로 내보내졌습니다", + "mode_imported": "모드를 성공적으로 가져왔습니다" }, "answers": { "yes": "예", @@ -141,6 +145,10 @@ "resetFailed": "사용자 정의 모드 재설정 실패: {{error}}", "modeNotFound": "쓰기 오류: 모드를 찾을 수 없습니다", "noWorkspaceForProject": "프로젝트별 모드용 작업 공간 폴더를 찾을 수 없습니다" + }, + "scope": { + "project": "프로젝트", + "global": "글로벌" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "조직의 Roo Code Cloud 계정으로 인증해야 합니다.", "verification_failed": "조직 인증을 확인할 수 없습니다." } + }, + "prompts": { + "deleteMode": { + "title": "사용자 정의 모드 삭제", + "description": "이 {{scope}} 모드를 삭제하시겠습니까? 이렇게 하면 {{rulesFolderPath}}의 관련 규칙 폴더도 삭제됩니다.", + "descriptionNoRules": "이 사용자 정의 모드를 삭제하시겠습니까?", + "cancel": "취소", + "confirm": "삭제" + } } } diff --git a/src/i18n/locales/ko/embeddings.json b/src/i18n/locales/ko/embeddings.json index 37877bfa9722..16d119c959c7 100644 --- a/src/i18n/locales/ko/embeddings.json +++ b/src/i18n/locales/ko/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "오류 본문을 읽을 수 없습니다", "requestFailed": "Ollama API 요청이 실패했습니다. 상태 {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Ollama API에서 잘못된 응답 구조: \"embeddings\" 배열을 찾을 수 없거나 배열이 아닙니다.", - "embeddingFailed": "Ollama 임베딩 실패: {{message}}" + "embeddingFailed": "Ollama 임베딩 실패: {{message}}", + "serviceNotRunning": "Ollama 서비스가 {{baseUrl}}에서 실행되고 있지 않습니다", + "serviceUnavailable": "Ollama 서비스를 사용할 수 없습니다 (상태: {{status}})", + "modelNotFound": "Ollama 모델을 찾을 수 없습니다: {{modelId}}", + "modelNotEmbeddingCapable": "Ollama 모델은 임베딩이 불가능합니다: {{modelId}}", + "hostNotFound": "Ollama 호스트를 찾을 수 없습니다: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "파일 {{filePath}} 처리 중 알 수 없는 오류", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Qdrant 벡터 데이터베이스에 연결하지 못했습니다. Qdrant가 실행 중이고 {{qdrantUrl}}에서 접근 가능한지 확인하세요. 오류: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "인증에 실패했습니다. 설정에서 API 키를 확인하세요.", + "connectionFailed": "임베더 서비스에 연결하지 못했습니다. 연결 설정을 확인하고 서비스가 실행 중인지 확인하세요.", + "modelNotAvailable": "지정된 모델을 사용할 수 없습니다. 모델 구성을 확인하세요.", + "configurationError": "잘못된 임베더 구성입니다. 설정을 검토하세요.", + "serviceUnavailable": "임베더 서비스를 사용할 수 없습니다. 실행 중이고 액세스 가능한지 확인하세요.", + "invalidEndpoint": "잘못된 API 엔드포인트입니다. URL 구성을 확인하세요.", + "invalidEmbedderConfig": "잘못된 임베더 구성입니다. 설정을 확인하세요.", + "invalidApiKey": "잘못된 API 키입니다. API 키 구성을 확인하세요.", + "invalidBaseUrl": "잘못된 기본 URL입니다. URL 구성을 확인하세요.", + "invalidModel": "잘못된 모델입니다. 모델 구성을 확인하세요.", + "invalidResponse": "임베더 서비스에서 잘못된 응답이 왔습니다. 구성을 확인하세요." } } diff --git a/src/i18n/locales/nl/common.json b/src/i18n/locales/nl/common.json index 8f2be38bd53e..fd9e37b16a74 100644 --- a/src/i18n/locales/nl/common.json +++ b/src/i18n/locales/nl/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Weet je zeker dat je alle status en geheime opslag in de extensie wilt resetten? Dit kan niet ongedaan worden gemaakt.", "delete_config_profile": "Weet je zeker dat je dit configuratieprofiel wilt verwijderen?", - "delete_custom_mode": "Weet je zeker dat je deze aangepaste modus wilt verwijderen?", + "delete_custom_mode_with_rules": "Weet je zeker dat je deze {scope}-modus wilt verwijderen?\n\nDit verwijdert ook de bijbehorende regelsmap op:\n{rulesFolderPath}", "delete_message": "Wat wil je verwijderen?", "just_this_message": "Alleen dit bericht", "this_and_subsequent": "Dit en alle volgende berichten" @@ -70,6 +70,8 @@ "share_auth_required": "Authenticatie vereist. Log in om taken te delen.", "share_not_enabled": "Taken delen is niet ingeschakeld voor deze organisatie.", "share_task_not_found": "Taak niet gevonden of toegang geweigerd.", + "mode_import_failed": "Importeren van modus mislukt: {{error}}", + "delete_rules_folder_failed": "Kan regelmap niet verwijderen: {{rulesFolderPath}}. Fout: {{error}}", "claudeCode": { "processExited": "Claude Code proces beëindigd met code {{exitCode}}.", "errorOutput": "Foutuitvoer: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "Afbeelding data-URI gekopieerd naar klembord", "image_saved": "Afbeelding opgeslagen naar {{path}}", "organization_share_link_copied": "Organisatie deel-link gekopieerd naar klembord!", - "public_share_link_copied": "Openbare deel-link gekopieerd naar klembord!" + "public_share_link_copied": "Openbare deel-link gekopieerd naar klembord!", + "mode_exported": "Modus '{{mode}}' succesvol geëxporteerd", + "mode_imported": "Modus succesvol geïmporteerd" }, "answers": { "yes": "Ja", @@ -141,6 +145,10 @@ "resetFailed": "Aangepaste modi resetten mislukt: {{error}}", "modeNotFound": "Schrijffout: Modus niet gevonden", "noWorkspaceForProject": "Geen workspace map gevonden voor projectspecifieke modus" + }, + "scope": { + "project": "project", + "global": "globaal" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Je moet geauthenticeerd zijn met het Roo Code Cloud-account van je organisatie.", "verification_failed": "Kan organisatie-authenticatie niet verifiëren." } + }, + "prompts": { + "deleteMode": { + "title": "Aangepaste modus verwijderen", + "description": "Weet je zeker dat je deze {{scope}}-modus wilt verwijderen? Dit zal ook de bijbehorende regelsmap op {{rulesFolderPath}} verwijderen", + "descriptionNoRules": "Weet je zeker dat je deze aangepaste modus wilt verwijderen?", + "cancel": "Annuleren", + "confirm": "Verwijderen" + } } } diff --git a/src/i18n/locales/nl/embeddings.json b/src/i18n/locales/nl/embeddings.json index 7256b0973bf9..9eeb5a04eaf1 100644 --- a/src/i18n/locales/nl/embeddings.json +++ b/src/i18n/locales/nl/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Kon foutinhoud niet lezen", "requestFailed": "Ollama API-verzoek mislukt met status {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Ongeldige responsstructuur van Ollama API: \"embeddings\" array niet gevonden of is geen array.", - "embeddingFailed": "Ollama insluiting mislukt: {{message}}" + "embeddingFailed": "Ollama insluiting mislukt: {{message}}", + "serviceNotRunning": "Ollama-service draait niet op {{baseUrl}}", + "serviceUnavailable": "Ollama-service is niet beschikbaar (status: {{status}})", + "modelNotFound": "Ollama-model niet gevonden: {{modelId}}", + "modelNotEmbeddingCapable": "Ollama-model is niet in staat tot insluiten: {{modelId}}", + "hostNotFound": "Ollama-host niet gevonden: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Onbekende fout bij verwerken van bestand {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Kan geen verbinding maken met Qdrant vectordatabase. Zorg ervoor dat Qdrant draait en toegankelijk is op {{qdrantUrl}}. Fout: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Authenticatie mislukt. Controleer je API-sleutel in de instellingen.", + "connectionFailed": "Verbinding met de embedder-service mislukt. Controleer je verbindingsinstellingen en zorg ervoor dat de service draait.", + "modelNotAvailable": "Het opgegeven model is niet beschikbaar. Controleer je modelconfiguratie.", + "configurationError": "Ongeldige embedder-configuratie. Controleer je instellingen.", + "serviceUnavailable": "De embedder-service is niet beschikbaar. Zorg ervoor dat deze draait en toegankelijk is.", + "invalidEndpoint": "Ongeldig API-eindpunt. Controleer je URL-configuratie.", + "invalidEmbedderConfig": "Ongeldige embedder-configuratie. Controleer je instellingen.", + "invalidApiKey": "Ongeldige API-sleutel. Controleer je API-sleutelconfiguratie.", + "invalidBaseUrl": "Ongeldige basis-URL. Controleer je URL-configuratie.", + "invalidModel": "Ongeldig model. Controleer je modelconfiguratie.", + "invalidResponse": "Ongeldige reactie van embedder-service. Controleer je configuratie." } } diff --git a/src/i18n/locales/pl/common.json b/src/i18n/locales/pl/common.json index be7f6e8ee709..4163eb4fc72e 100644 --- a/src/i18n/locales/pl/common.json +++ b/src/i18n/locales/pl/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Czy na pewno chcesz zresetować wszystkie stany i tajne magazyny w rozszerzeniu? Tej operacji nie można cofnąć.", "delete_config_profile": "Czy na pewno chcesz usunąć ten profil konfiguracyjny?", - "delete_custom_mode": "Czy na pewno chcesz usunąć ten niestandardowy tryb?", + "delete_custom_mode_with_rules": "Czy na pewno chcesz usunąć ten tryb {scope}?\n\nSpowoduje to również usunięcie powiązanego folderu reguł pod adresem:\n{rulesFolderPath}", "delete_message": "Co chcesz usunąć?", "just_this_message": "Tylko tę wiadomość", "this_and_subsequent": "Tę i wszystkie kolejne wiadomości" @@ -70,6 +70,8 @@ "share_auth_required": "Wymagana autoryzacja. Zaloguj się, aby udostępniać zadania.", "share_not_enabled": "Udostępnianie zadań nie jest włączone dla tej organizacji.", "share_task_not_found": "Zadanie nie znalezione lub dostęp odmówiony.", + "mode_import_failed": "Import trybu nie powiódł się: {{error}}", + "delete_rules_folder_failed": "Nie udało się usunąć folderu reguł: {{rulesFolderPath}}. Błąd: {{error}}", "claudeCode": { "processExited": "Proces Claude Code zakończył się kodem {{exitCode}}.", "errorOutput": "Wyjście błędu: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "URI danych obrazu skopiowane do schowka", "image_saved": "Obraz zapisany w {{path}}", "organization_share_link_copied": "Link udostępniania organizacji skopiowany do schowka!", - "public_share_link_copied": "Publiczny link udostępniania skopiowany do schowka!" + "public_share_link_copied": "Publiczny link udostępniania skopiowany do schowka!", + "mode_exported": "Tryb '{{mode}}' pomyślnie wyeksportowany", + "mode_imported": "Tryb pomyślnie zaimportowany" }, "answers": { "yes": "Tak", @@ -141,6 +145,10 @@ "resetFailed": "Resetowanie trybów niestandardowych nie powiodło się: {{error}}", "modeNotFound": "Błąd zapisu: Tryb nie został znaleziony", "noWorkspaceForProject": "Nie znaleziono folderu obszaru roboczego dla trybu specyficznego dla projektu" + }, + "scope": { + "project": "projekt", + "global": "globalny" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Musisz być uwierzytelniony kontem Roo Code Cloud swojej organizacji.", "verification_failed": "Nie można zweryfikować uwierzytelnienia organizacji." } + }, + "prompts": { + "deleteMode": { + "title": "Usuń tryb niestandardowy", + "description": "Czy na pewno chcesz usunąć ten tryb {{scope}}? Spowoduje to również usunięcie powiązanego folderu z regułami w {{rulesFolderPath}}", + "descriptionNoRules": "Czy na pewno chcesz usunąć ten tryb niestandardowy?", + "cancel": "Anuluj", + "confirm": "Usuń" + } } } diff --git a/src/i18n/locales/pl/embeddings.json b/src/i18n/locales/pl/embeddings.json index c3e160869b5b..dd10c1ec4c8a 100644 --- a/src/i18n/locales/pl/embeddings.json +++ b/src/i18n/locales/pl/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Nie można odczytać treści błędu", "requestFailed": "Żądanie API Ollama nie powiodło się ze statusem {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Nieprawidłowa struktura odpowiedzi z API Ollama: tablica \"embeddings\" nie została znaleziona lub nie jest tablicą.", - "embeddingFailed": "Osadzenie Ollama nie powiodło się: {{message}}" + "embeddingFailed": "Osadzenie Ollama nie powiodło się: {{message}}", + "serviceNotRunning": "Usługa Ollama nie jest uruchomiona pod adresem {{baseUrl}}", + "serviceUnavailable": "Usługa Ollama jest niedostępna (status: {{status}})", + "modelNotFound": "Nie znaleziono modelu Ollama: {{modelId}}", + "modelNotEmbeddingCapable": "Model Ollama nie jest zdolny do osadzania: {{modelId}}", + "hostNotFound": "Nie znaleziono hosta Ollama: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Nieznany błąd podczas przetwarzania pliku {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Nie udało się połączyć z bazą danych wektorowych Qdrant. Upewnij się, że Qdrant jest uruchomiony i dostępny pod adresem {{qdrantUrl}}. Błąd: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Uwierzytelnianie nie powiodło się. Sprawdź swój klucz API w ustawieniach.", + "connectionFailed": "Nie udało się połączyć z usługą embeddera. Sprawdź ustawienia połączenia i upewnij się, że usługa jest uruchomiona.", + "modelNotAvailable": "Określony model jest niedostępny. Sprawdź konfigurację modelu.", + "configurationError": "Nieprawidłowa konfiguracja embeddera. Sprawdź swoje ustawienia.", + "serviceUnavailable": "Usługa embeddera jest niedostępna. Upewnij się, że jest uruchomiona i dostępna.", + "invalidEndpoint": "Nieprawidłowy punkt końcowy API. Sprawdź konfigurację adresu URL.", + "invalidEmbedderConfig": "Nieprawidłowa konfiguracja embeddera. Sprawdź swoje ustawienia.", + "invalidApiKey": "Nieprawidłowy klucz API. Sprawdź konfigurację klucza API.", + "invalidBaseUrl": "Nieprawidłowy podstawowy adres URL. Sprawdź konfigurację adresu URL.", + "invalidModel": "Nieprawidłowy model. Sprawdź konfigurację modelu.", + "invalidResponse": "Nieprawidłowa odpowiedź z usługi embedder. Sprawdź swoją konfigurację." } } diff --git a/src/i18n/locales/pt-BR/common.json b/src/i18n/locales/pt-BR/common.json index d98b90325a45..3c23671f3c76 100644 --- a/src/i18n/locales/pt-BR/common.json +++ b/src/i18n/locales/pt-BR/common.json @@ -21,7 +21,7 @@ "confirmation": { "reset_state": "Tem certeza de que deseja redefinir todo o estado e armazenamento secreto na extensão? Isso não pode ser desfeito.", "delete_config_profile": "Tem certeza de que deseja excluir este perfil de configuração?", - "delete_custom_mode": "Tem certeza de que deseja excluir este modo personalizado?", + "delete_custom_mode_with_rules": "Tem certeza de que deseja excluir este modo {scope}?\n\nIsso também excluirá a pasta de regras associada em:\n{rulesFolderPath}", "delete_message": "O que você gostaria de excluir?", "just_this_message": "Apenas esta mensagem", "this_and_subsequent": "Esta e todas as mensagens subsequentes" @@ -74,6 +74,8 @@ "share_auth_required": "Autenticação necessária. Faça login para compartilhar tarefas.", "share_not_enabled": "O compartilhamento de tarefas não está habilitado para esta organização.", "share_task_not_found": "Tarefa não encontrada ou acesso negado.", + "mode_import_failed": "Falha ao importar o modo: {{error}}", + "delete_rules_folder_failed": "Falha ao excluir pasta de regras: {{rulesFolderPath}}. Erro: {{error}}", "claudeCode": { "processExited": "O processo Claude Code saiu com código {{exitCode}}.", "errorOutput": "Saída de erro: {{output}}", @@ -99,7 +101,9 @@ "image_copied_to_clipboard": "URI de dados da imagem copiada para a área de transferência", "image_saved": "Imagem salva em {{path}}", "organization_share_link_copied": "Link de compartilhamento da organização copiado para a área de transferência!", - "public_share_link_copied": "Link de compartilhamento público copiado para a área de transferência!" + "public_share_link_copied": "Link de compartilhamento público copiado para a área de transferência!", + "mode_exported": "Modo '{{mode}}' exportado com sucesso", + "mode_imported": "Modo importado com sucesso" }, "answers": { "yes": "Sim", @@ -141,6 +145,10 @@ "resetFailed": "Falha ao redefinir modos personalizados: {{error}}", "modeNotFound": "Erro de escrita: Modo não encontrado", "noWorkspaceForProject": "Nenhuma pasta de workspace encontrada para modo específico do projeto" + }, + "scope": { + "project": "projeto", + "global": "global" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Você deve estar autenticado com a conta Roo Code Cloud da sua organização.", "verification_failed": "Não foi possível verificar a autenticação da organização." } + }, + "prompts": { + "deleteMode": { + "title": "Excluir Modo Personalizado", + "description": "Tem certeza de que deseja excluir este modo {{scope}}? Isso também excluirá a pasta de regras associada em: {{rulesFolderPath}}", + "descriptionNoRules": "Tem certeza de que deseja excluir este modo personalizado?", + "cancel": "Cancelar", + "confirm": "Excluir" + } } } diff --git a/src/i18n/locales/pt-BR/embeddings.json b/src/i18n/locales/pt-BR/embeddings.json index 6b9747526518..ec1db0711342 100644 --- a/src/i18n/locales/pt-BR/embeddings.json +++ b/src/i18n/locales/pt-BR/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Não foi possível ler o corpo do erro", "requestFailed": "Solicitação da API Ollama falhou com status {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Estrutura de resposta inválida da API Ollama: array \"embeddings\" não encontrado ou não é um array.", - "embeddingFailed": "Embedding Ollama falhou: {{message}}" + "embeddingFailed": "Embedding Ollama falhou: {{message}}", + "serviceNotRunning": "O serviço Ollama não está em execução em {{baseUrl}}", + "serviceUnavailable": "O serviço Ollama não está disponível (status: {{status}})", + "modelNotFound": "Modelo Ollama não encontrado: {{modelId}}", + "modelNotEmbeddingCapable": "O modelo Ollama não é capaz de embedding: {{modelId}}", + "hostNotFound": "Host Ollama não encontrado: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Erro desconhecido ao processar arquivo {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Falha ao conectar com o banco de dados vetorial Qdrant. Certifique-se de que o Qdrant esteja rodando e acessível em {{qdrantUrl}}. Erro: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Falha na autenticação. Verifique sua chave de API nas configurações.", + "connectionFailed": "Falha ao conectar ao serviço do embedder. Verifique suas configurações de conexão e garanta que o serviço esteja em execução.", + "modelNotAvailable": "O modelo especificado não está disponível. Verifique a configuração do seu modelo.", + "configurationError": "Configuração do embedder inválida. Revise suas configurações.", + "serviceUnavailable": "O serviço do embedder não está disponível. Garanta que ele esteja em execução e acessível.", + "invalidEndpoint": "Endpoint de API inválido. Verifique sua configuração de URL.", + "invalidEmbedderConfig": "Configuração do embedder inválida. Verifique suas configurações.", + "invalidApiKey": "Chave de API inválida. Verifique sua configuração de chave de API.", + "invalidBaseUrl": "URL base inválida. Verifique sua configuração de URL.", + "invalidModel": "Modelo inválido. Verifique a configuração do seu modelo.", + "invalidResponse": "Resposta inválida do serviço de embedder. Verifique sua configuração." } } diff --git a/src/i18n/locales/ru/common.json b/src/i18n/locales/ru/common.json index 037957f046b9..bcd28e3e9277 100644 --- a/src/i18n/locales/ru/common.json +++ b/src/i18n/locales/ru/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Вы уверены, что хотите сбросить все состояние и секретное хранилище в расширении? Это действие нельзя отменить.", "delete_config_profile": "Вы уверены, что хотите удалить этот профиль конфигурации?", - "delete_custom_mode": "Вы уверены, что хотите удалить этот пользовательский режим?", + "delete_custom_mode_with_rules": "Вы уверены, что хотите удалить этот режим {scope}?\n\nЭто также приведет к удалению соответствующей папки правил по адресу:\n{rulesFolderPath}", "delete_message": "Что вы хотите удалить?", "just_this_message": "Только это сообщение", "this_and_subsequent": "Это и все последующие сообщения" @@ -70,6 +70,8 @@ "share_auth_required": "Требуется аутентификация. Войдите в систему для совместного доступа к задачам.", "share_not_enabled": "Совместный доступ к задачам не включен для этой организации.", "share_task_not_found": "Задача не найдена или доступ запрещен.", + "mode_import_failed": "Не удалось импортировать режим: {{error}}", + "delete_rules_folder_failed": "Не удалось удалить папку правил: {{rulesFolderPath}}. Ошибка: {{error}}", "claudeCode": { "processExited": "Процесс Claude Code завершился с кодом {{exitCode}}.", "errorOutput": "Вывод ошибки: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "URI данных изображения скопирован в буфер обмена", "image_saved": "Изображение сохранено в {{path}}", "organization_share_link_copied": "Ссылка для совместного доступа организации скопирована в буфер обмена!", - "public_share_link_copied": "Публичная ссылка для совместного доступа скопирована в буфер обмена!" + "public_share_link_copied": "Публичная ссылка для совместного доступа скопирована в буфер обмена!", + "mode_exported": "Режим '{{mode}}' успешно экспортирован", + "mode_imported": "Режим успешно импортирован" }, "answers": { "yes": "Да", @@ -141,6 +145,10 @@ "resetFailed": "Не удалось сбросить пользовательские режимы: {{error}}", "modeNotFound": "Ошибка записи: Режим не найден", "noWorkspaceForProject": "Не найдена папка рабочего пространства для режима, специфичного для проекта" + }, + "scope": { + "project": "проект", + "global": "глобальный" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Вы должны быть аутентифицированы с учетной записью Roo Code Cloud вашей организации.", "verification_failed": "Не удается проверить аутентификацию организации." } + }, + "prompts": { + "deleteMode": { + "title": "Удалить пользовательский режим", + "description": "Вы уверены, что хотите удалить этот режим {{scope}}? Это также удалит связанную папку правил по адресу: {{rulesFolderPath}}", + "descriptionNoRules": "Вы уверены, что хотите удалить этот пользовательский режим?", + "cancel": "Отмена", + "confirm": "Удалить" + } } } diff --git a/src/i18n/locales/ru/embeddings.json b/src/i18n/locales/ru/embeddings.json index c6143816e821..931dca5bbd63 100644 --- a/src/i18n/locales/ru/embeddings.json +++ b/src/i18n/locales/ru/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Не удалось прочитать тело ошибки", "requestFailed": "Запрос к API Ollama не удался со статусом {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Неверная структура ответа от API Ollama: массив \"embeddings\" не найден или не является массивом.", - "embeddingFailed": "Вложение Ollama не удалось: {{message}}" + "embeddingFailed": "Вложение Ollama не удалось: {{message}}", + "serviceNotRunning": "Сервис Ollama не запущен по адресу {{baseUrl}}", + "serviceUnavailable": "Сервис Ollama недоступен (статус: {{status}})", + "modelNotFound": "Модель Ollama не найдена: {{modelId}}", + "modelNotEmbeddingCapable": "Модель Ollama не способна к вложению: {{modelId}}", + "hostNotFound": "Хост Ollama не найден: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Неизвестная ошибка при обработке файла {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Не удалось подключиться к векторной базе данных Qdrant. Убедитесь, что Qdrant запущен и доступен по адресу {{qdrantUrl}}. Ошибка: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Ошибка аутентификации. Проверьте свой ключ API в настройках.", + "connectionFailed": "Не удалось подключиться к службе эмбеддера. Проверьте настройки подключения и убедитесь, что служба запущена.", + "modelNotAvailable": "Указанная модель недоступна. Проверьте конфигурацию модели.", + "configurationError": "Неверная конфигурация эмбеддера. Проверьте свои настройки.", + "serviceUnavailable": "Служба эмбеддера недоступна. Убедитесь, что она запущена и доступна.", + "invalidEndpoint": "Неверная конечная точка API. Проверьте конфигурацию URL.", + "invalidEmbedderConfig": "Неверная конфигурация эмбеддера. Проверьте свои настройки.", + "invalidApiKey": "Неверный ключ API. Проверьте конфигурацию ключа API.", + "invalidBaseUrl": "Неверный базовый URL. Проверьте конфигурацию URL.", + "invalidModel": "Неверная модель. Проверьте конфигурацию модели.", + "invalidResponse": "Неверный ответ от службы embedder. Проверьте вашу конфигурацию." } } diff --git a/src/i18n/locales/tr/common.json b/src/i18n/locales/tr/common.json index b77b8a4e1c3c..e8863bc1093c 100644 --- a/src/i18n/locales/tr/common.json +++ b/src/i18n/locales/tr/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Uzantıdaki tüm durumları ve gizli depolamayı sıfırlamak istediğinizden emin misiniz? Bu işlem geri alınamaz.", "delete_config_profile": "Bu yapılandırma profilini silmek istediğinizden emin misiniz?", - "delete_custom_mode": "Bu özel modu silmek istediğinizden emin misiniz?", + "delete_custom_mode_with_rules": "Bu {scope} modunu silmek istediğinizden emin misiniz?\n\nBu işlem, ilişkili kurallar klasörünü de şu konumdan silecektir:\n{rulesFolderPath}", "delete_message": "Neyi silmek istersiniz?", "just_this_message": "Sadece bu mesajı", "this_and_subsequent": "Bu ve sonraki tüm mesajları" @@ -70,6 +70,8 @@ "share_auth_required": "Kimlik doğrulama gerekli. Görevleri paylaşmak için lütfen giriş yapın.", "share_not_enabled": "Bu kuruluş için görev paylaşımı etkinleştirilmemiş.", "share_task_not_found": "Görev bulunamadı veya erişim reddedildi.", + "mode_import_failed": "Mod içe aktarılamadı: {{error}}", + "delete_rules_folder_failed": "Kurallar klasörü silinemedi: {{rulesFolderPath}}. Hata: {{error}}", "claudeCode": { "processExited": "Claude Code işlemi {{exitCode}} koduyla çıktı.", "errorOutput": "Hata çıktısı: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "Resim veri URI'si panoya kopyalandı", "image_saved": "Resim {{path}} konumuna kaydedildi", "organization_share_link_copied": "Kuruluş paylaşım bağlantısı panoya kopyalandı!", - "public_share_link_copied": "Herkese açık paylaşım bağlantısı panoya kopyalandı!" + "public_share_link_copied": "Herkese açık paylaşım bağlantısı panoya kopyalandı!", + "mode_exported": "'{{mode}}' modu başarıyla dışa aktarıldı", + "mode_imported": "Mod başarıyla içe aktarıldı" }, "answers": { "yes": "Evet", @@ -141,6 +145,10 @@ "resetFailed": "Özel modları sıfırlama başarısız: {{error}}", "modeNotFound": "Yazma hatası: Mod bulunamadı", "noWorkspaceForProject": "Proje özel modu için çalışma alanı klasörü bulunamadı" + }, + "scope": { + "project": "proje", + "global": "küresel" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "Kuruluşunuzun Roo Code Cloud hesabıyla kimlik doğrulaması yapmalısınız.", "verification_failed": "Kuruluş kimlik doğrulaması doğrulanamıyor." } + }, + "prompts": { + "deleteMode": { + "title": "Özel Modu Sil", + "description": "Bu {{scope}} modunu silmek istediğinizden emin misiniz? Bu, {{rulesFolderPath}} adresindeki ilişkili kurallar klasörünü de silecektir", + "descriptionNoRules": "Bu özel modu silmek istediğinizden emin misiniz?", + "cancel": "İptal", + "confirm": "Sil" + } } } diff --git a/src/i18n/locales/tr/embeddings.json b/src/i18n/locales/tr/embeddings.json index 10ad965f0fa5..8ff94b0dae93 100644 --- a/src/i18n/locales/tr/embeddings.json +++ b/src/i18n/locales/tr/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Hata gövdesi okunamadı", "requestFailed": "Ollama API isteği {{status}} {{statusText}} durumuyla başarısız oldu: {{errorBody}}", "invalidResponseStructure": "Ollama API'den geçersiz yanıt yapısı: \"embeddings\" dizisi bulunamadı veya dizi değil.", - "embeddingFailed": "Ollama gömülmesi başarısız oldu: {{message}}" + "embeddingFailed": "Ollama gömülmesi başarısız oldu: {{message}}", + "serviceNotRunning": "Ollama hizmeti {{baseUrl}} adresinde çalışmıyor", + "serviceUnavailable": "Ollama hizmeti kullanılamıyor (durum: {{status}})", + "modelNotFound": "Ollama modeli bulunamadı: {{modelId}}", + "modelNotEmbeddingCapable": "Ollama modeli gömme yeteneğine sahip değil: {{modelId}}", + "hostNotFound": "Ollama ana bilgisayarı bulunamadı: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "{{filePath}} dosyası işlenirken bilinmeyen hata", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Qdrant vektör veritabanına bağlanılamadı. Qdrant'ın çalıştığından ve {{qdrantUrl}} adresinde erişilebilir olduğundan emin olun. Hata: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Kimlik doğrulama başarısız oldu. Lütfen ayarlardan API anahtarınızı kontrol edin.", + "connectionFailed": "Gömücü hizmetine bağlanılamadı. Lütfen bağlantı ayarlarınızı kontrol edin ve hizmetin çalıştığından emin olun.", + "modelNotAvailable": "Belirtilen model mevcut değil. Lütfen model yapılandırmanızı kontrol edin.", + "configurationError": "Geçersiz gömücü yapılandırması. Lütfen ayarlarınızı gözden geçirin.", + "serviceUnavailable": "Gömücü hizmeti mevcut değil. Lütfen çalıştığından ve erişilebilir olduğundan emin olun.", + "invalidEndpoint": "Geçersiz API uç noktası. Lütfen URL yapılandırmanızı kontrol edin.", + "invalidEmbedderConfig": "Geçersiz gömücü yapılandırması. Lütfen ayarlarınızı kontrol edin.", + "invalidApiKey": "Geçersiz API anahtarı. Lütfen API anahtarı yapılandırmanızı kontrol edin.", + "invalidBaseUrl": "Geçersiz temel URL. Lütfen URL yapılandırmanızı kontrol edin.", + "invalidModel": "Geçersiz model. Lütfen model yapılandırmanızı kontrol edin.", + "invalidResponse": "Embedder hizmetinden geçersiz yanıt. Lütfen yapılandırmanızı kontrol edin." } } diff --git a/src/i18n/locales/vi/common.json b/src/i18n/locales/vi/common.json index 321a7d71e48e..b930e962c3c0 100644 --- a/src/i18n/locales/vi/common.json +++ b/src/i18n/locales/vi/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "Bạn có chắc chắn muốn đặt lại tất cả trạng thái và lưu trữ bí mật trong tiện ích mở rộng không? Hành động này không thể hoàn tác.", "delete_config_profile": "Bạn có chắc chắn muốn xóa hồ sơ cấu hình này không?", - "delete_custom_mode": "Bạn có chắc chắn muốn xóa chế độ tùy chỉnh này không?", + "delete_custom_mode_with_rules": "Bạn có chắc chắn muốn xóa chế độ {scope} này không?\n\nThao tác này cũng sẽ xóa thư mục quy tắc liên quan tại:\n{rulesFolderPath}", "delete_message": "Bạn muốn xóa gì?", "just_this_message": "Chỉ tin nhắn này", "this_and_subsequent": "Tin nhắn này và tất cả tin nhắn tiếp theo" @@ -70,6 +70,8 @@ "share_auth_required": "Cần xác thực. Vui lòng đăng nhập để chia sẻ nhiệm vụ.", "share_not_enabled": "Chia sẻ nhiệm vụ không được bật cho tổ chức này.", "share_task_not_found": "Không tìm thấy nhiệm vụ hoặc truy cập bị từ chối.", + "mode_import_failed": "Nhập chế độ thất bại: {{error}}", + "delete_rules_folder_failed": "Không thể xóa thư mục quy tắc: {{rulesFolderPath}}. Lỗi: {{error}}", "claudeCode": { "processExited": "Tiến trình Claude Code thoát với mã {{exitCode}}.", "errorOutput": "Đầu ra lỗi: {{output}}", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "URI dữ liệu hình ảnh đã được sao chép vào clipboard", "image_saved": "Hình ảnh đã được lưu vào {{path}}", "organization_share_link_copied": "Liên kết chia sẻ tổ chức đã được sao chép vào clipboard!", - "public_share_link_copied": "Liên kết chia sẻ công khai đã được sao chép vào clipboard!" + "public_share_link_copied": "Liên kết chia sẻ công khai đã được sao chép vào clipboard!", + "mode_exported": "Chế độ '{{mode}}' đã được xuất thành công", + "mode_imported": "Chế độ đã được nhập thành công" }, "answers": { "yes": "Có", @@ -141,6 +145,10 @@ "resetFailed": "Đặt lại chế độ tùy chỉnh thất bại: {{error}}", "modeNotFound": "Lỗi ghi: Không tìm thấy chế độ", "noWorkspaceForProject": "Không tìm thấy thư mục workspace cho chế độ dành riêng cho dự án" + }, + "scope": { + "project": "dự án", + "global": "toàn cầu" } }, "mdm": { @@ -149,5 +157,21 @@ "organization_mismatch": "Bạn phải được xác thực bằng tài khoản Roo Code Cloud của tổ chức.", "verification_failed": "Không thể xác minh xác thực tổ chức." } + }, + "prompts": { + "deleteMode": { + "title": "Xóa chế độ tùy chỉnh", + "description": "Bạn có chắc chắn muốn xóa chế độ {{scope}} này không? Thao tác này cũng θα xóa thư mục quy tắc liên quan tại {{rulesFolderPath}}", + "translations": { + "title": "Xóa chế độ", + "description": "Bạn có chắc chắn muốn xóa chế độ này không?", + "deleteMessage": "Chỉ chế độ này", + "rulesFolderMessage": "Thư mục quy tắc cũng sẽ bị xóa nếu tồn tại.", + "deleteConfirmation": "Chắc chắn xóa chế độ này?" + }, + "descriptionNoRules": "Bạn có chắc chắn muốn xóa chế độ tùy chỉnh này không?", + "cancel": "Hủy", + "confirm": "Xóa" + } } } diff --git a/src/i18n/locales/vi/embeddings.json b/src/i18n/locales/vi/embeddings.json index a533aaac0711..5988219aedd2 100644 --- a/src/i18n/locales/vi/embeddings.json +++ b/src/i18n/locales/vi/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "Không thể đọc nội dung lỗi", "requestFailed": "Yêu cầu API Ollama thất bại với trạng thái {{status}} {{statusText}}: {{errorBody}}", "invalidResponseStructure": "Cấu trúc phản hồi không hợp lệ từ API Ollama: không tìm thấy mảng \"embeddings\" hoặc không phải là mảng.", - "embeddingFailed": "Nhúng Ollama thất bại: {{message}}" + "embeddingFailed": "Nhúng Ollama thất bại: {{message}}", + "serviceNotRunning": "Dịch vụ Ollama không chạy tại {{baseUrl}}", + "serviceUnavailable": "Dịch vụ Ollama không khả dụng (trạng thái: {{status}})", + "modelNotFound": "Không tìm thấy mô hình Ollama: {{modelId}}", + "modelNotEmbeddingCapable": "Mô hình Ollama không có khả năng nhúng: {{modelId}}", + "hostNotFound": "Không tìm thấy máy chủ Ollama: {{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "Lỗi không xác định khi xử lý tệp {{filePath}}", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "Không thể kết nối với cơ sở dữ liệu vector Qdrant. Vui lòng đảm bảo Qdrant đang chạy và có thể truy cập tại {{qdrantUrl}}. Lỗi: {{errorMessage}}" + }, + "validation": { + "authenticationFailed": "Xác thực không thành công. Vui lòng kiểm tra khóa API của bạn trong cài đặt.", + "connectionFailed": "Không thể kết nối với dịch vụ nhúng. Vui lòng kiểm tra cài đặt kết nối của bạn và đảm bảo dịch vụ đang chạy.", + "modelNotAvailable": "Mô hình được chỉ định không có sẵn. Vui lòng kiểm tra cấu hình mô hình của bạn.", + "configurationError": "Cấu hình nhúng không hợp lệ. Vui lòng xem lại cài đặt của bạn.", + "serviceUnavailable": "Dịch vụ nhúng không có sẵn. Vui lòng đảm bảo nó đang chạy và có thể truy cập được.", + "invalidEndpoint": "Điểm cuối API không hợp lệ. Vui lòng kiểm tra cấu hình URL của bạn.", + "invalidEmbedderConfig": "Cấu hình nhúng không hợp lệ. Vui lòng kiểm tra cài đặt của bạn.", + "invalidApiKey": "Khóa API không hợp lệ. Vui lòng kiểm tra cấu hình khóa API của bạn.", + "invalidBaseUrl": "URL cơ sở không hợp lệ. Vui lòng kiểm tra cấu hình URL của bạn.", + "invalidModel": "Mô hình không hợp lệ. Vui lòng kiểm tra cấu hình mô hình của bạn.", + "invalidResponse": "Phản hồi không hợp lệ từ dịch vụ embedder. Vui lòng kiểm tra cấu hình của bạn." } } diff --git a/src/i18n/locales/zh-CN/common.json b/src/i18n/locales/zh-CN/common.json index e716bc26a1a6..081f9888c1f1 100644 --- a/src/i18n/locales/zh-CN/common.json +++ b/src/i18n/locales/zh-CN/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "您确定要重置扩展中的所有状态和密钥存储吗?此操作无法撤消。", "delete_config_profile": "您确定要删除此配置文件吗?", - "delete_custom_mode": "您确定要删除此自定义模式吗?", + "delete_custom_mode_with_rules": "您确定要删除此 {scope} 模式吗?\n\n这也将删除位于以下位置的关联规则文件夹:\n{rulesFolderPath}", "delete_message": "您想删除什么?", "just_this_message": "仅此消息", "this_and_subsequent": "此消息及所有后续消息" @@ -75,6 +75,8 @@ "share_auth_required": "需要身份验证。请登录以分享任务。", "share_not_enabled": "此组织未启用任务分享功能。", "share_task_not_found": "未找到任务或访问被拒绝。", + "mode_import_failed": "导入模式失败:{{error}}", + "delete_rules_folder_failed": "删除规则文件夹失败:{{rulesFolderPath}}。错误:{{error}}", "claudeCode": { "processExited": "Claude Code 进程退出,退出码:{{exitCode}}。", "errorOutput": "错误输出:{{output}}", @@ -100,7 +102,9 @@ "image_copied_to_clipboard": "图片数据 URI 已复制到剪贴板", "image_saved": "图片已保存到 {{path}}", "organization_share_link_copied": "组织分享链接已复制到剪贴板!", - "public_share_link_copied": "公开分享链接已复制到剪贴板!" + "public_share_link_copied": "公开分享链接已复制到剪贴板!", + "mode_exported": "模式 '{{mode}}' 已成功导出", + "mode_imported": "模式已成功导入" }, "answers": { "yes": "是", @@ -146,6 +150,10 @@ "resetFailed": "重置自定义模式失败:{{error}}", "modeNotFound": "写入错误:未找到模式", "noWorkspaceForProject": "未找到项目特定模式的工作区文件夹" + }, + "scope": { + "project": "项目", + "global": "全局" } }, "mdm": { @@ -154,5 +162,14 @@ "organization_mismatch": "您必须使用组织的 Roo Code Cloud 账户进行身份验证。", "verification_failed": "无法验证组织身份验证。" } + }, + "prompts": { + "deleteMode": { + "title": "删除自定义模式", + "description": "您确定要删除此 {{scope}} 模式吗?这也将删除位于 {{rulesFolderPath}} 的关联规则文件夹", + "descriptionNoRules": "您确定要删除此自定义模式吗?", + "cancel": "取消", + "confirm": "删除" + } } } diff --git a/src/i18n/locales/zh-CN/embeddings.json b/src/i18n/locales/zh-CN/embeddings.json index dba52828447d..68d41a2f4cb3 100644 --- a/src/i18n/locales/zh-CN/embeddings.json +++ b/src/i18n/locales/zh-CN/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "无法读取错误内容", "requestFailed": "Ollama API 请求失败,状态码 {{status}} {{statusText}}:{{errorBody}}", "invalidResponseStructure": "Ollama API 响应结构无效:未找到 \"embeddings\" 数组或不是数组。", - "embeddingFailed": "Ollama 嵌入失败:{{message}}" + "embeddingFailed": "Ollama 嵌入失败:{{message}}", + "serviceNotRunning": "Ollama 服务未在 {{baseUrl}} 运行", + "serviceUnavailable": "Ollama 服务不可用(状态:{{status}})", + "modelNotFound": "未找到 Ollama 模型:{{modelId}}", + "modelNotEmbeddingCapable": "Ollama 模型不具备嵌入能力:{{modelId}}", + "hostNotFound": "未找到 Ollama 主机:{{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "处理文件 {{filePath}} 时出现未知错误", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "连接 Qdrant 向量数据库失败。请确保 Qdrant 正在运行并可在 {{qdrantUrl}} 访问。错误:{{errorMessage}}" + }, + "validation": { + "authenticationFailed": "身份验证失败。请在设置中检查您的 API 密钥。", + "connectionFailed": "连接嵌入器服务失败。请检查您的连接设置并确保服务正在运行。", + "modelNotAvailable": "指定的模型不可用。请检查您的模型配置。", + "configurationError": "嵌入器配置无效。请查看您的设置。", + "serviceUnavailable": "嵌入器服务不可用。请确保它正在运行且可访问。", + "invalidEndpoint": "API 端点无效。请检查您的 URL 配置。", + "invalidEmbedderConfig": "嵌入器配置无效。请检查您的设置。", + "invalidApiKey": "API 密钥无效。请检查您的 API 密钥配置。", + "invalidBaseUrl": "基础 URL 无效。请检查您的 URL 配置。", + "invalidModel": "模型无效。请检查您的模型配置。", + "invalidResponse": "嵌入服务响应无效。请检查您的配置。" } } diff --git a/src/i18n/locales/zh-TW/common.json b/src/i18n/locales/zh-TW/common.json index 6ecece8835e0..60e806cab829 100644 --- a/src/i18n/locales/zh-TW/common.json +++ b/src/i18n/locales/zh-TW/common.json @@ -17,7 +17,7 @@ "confirmation": { "reset_state": "您確定要重設擴充套件中的所有狀態和金鑰儲存嗎?此操作無法復原。", "delete_config_profile": "您確定要刪除此設定檔案嗎?", - "delete_custom_mode": "您確定要刪除此自訂模式嗎?", + "delete_custom_mode_with_rules": "您確定要刪除此 {scope} 模式嗎?\n\n這也將刪除位於以下位置的關聯規則資料夾:\n{rulesFolderPath}", "delete_message": "您想刪除哪些內容?", "just_this_message": "僅這則訊息", "this_and_subsequent": "這則訊息及所有後續訊息" @@ -70,13 +70,15 @@ "share_auth_required": "需要身份驗證。請登入以分享工作。", "share_not_enabled": "此組織未啟用工作分享功能。", "share_task_not_found": "未找到工作或存取被拒絕。", + "delete_rules_folder_failed": "刪除規則資料夾失敗: {{rulesFolderPath}}。錯誤: {{error}}", "claudeCode": { "processExited": "Claude Code 程序退出,退出碼:{{exitCode}}。", "errorOutput": "錯誤輸出:{{output}}", "processExitedWithError": "Claude Code 程序退出,退出碼:{{exitCode}}。錯誤輸出:{{output}}", "stoppedWithReason": "Claude Code 停止,原因:{{reason}}", "apiKeyModelPlanMismatch": "API 金鑰和訂閱方案允許不同的模型。請確保所選模型包含在您的方案中。" - } + }, + "mode_import_failed": "匯入模式失敗:{{error}}" }, "warnings": { "no_terminal_content": "沒有選擇終端機內容", @@ -95,7 +97,9 @@ "image_copied_to_clipboard": "圖片資料 URI 已複製到剪貼簿", "image_saved": "圖片已儲存至 {{path}}", "organization_share_link_copied": "組織分享連結已複製到剪貼簿!", - "public_share_link_copied": "公開分享連結已複製到剪貼簿!" + "public_share_link_copied": "公開分享連結已複製到剪貼簿!", + "mode_exported": "模式 '{{mode}}' 已成功匯出", + "mode_imported": "模式已成功匯入" }, "answers": { "yes": "是", @@ -141,6 +145,10 @@ "resetFailed": "重設自訂模式失敗:{{error}}", "modeNotFound": "寫入錯誤:未找到模式", "noWorkspaceForProject": "未找到專案特定模式的工作區資料夾" + }, + "scope": { + "project": "專案", + "global": "全域" } }, "mdm": { @@ -149,5 +157,14 @@ "organization_mismatch": "您必須使用組織的 Roo Code Cloud 帳戶進行身份驗證。", "verification_failed": "無法驗證組織身份驗證。" } + }, + "prompts": { + "deleteMode": { + "title": "刪除自訂模式", + "description": "您確定要刪除此 {{scope}} 模式嗎?這也將刪除位於 {{rulesFolderPath}} 的關聯規則資料夾", + "descriptionNoRules": "您確定要刪除此自訂模式嗎?", + "cancel": "取消", + "confirm": "刪除" + } } } diff --git a/src/i18n/locales/zh-TW/embeddings.json b/src/i18n/locales/zh-TW/embeddings.json index 71a5a482f234..2b9967a93066 100644 --- a/src/i18n/locales/zh-TW/embeddings.json +++ b/src/i18n/locales/zh-TW/embeddings.json @@ -10,7 +10,12 @@ "couldNotReadErrorBody": "無法讀取錯誤內容", "requestFailed": "Ollama API 請求失敗,狀態碼 {{status}} {{statusText}}:{{errorBody}}", "invalidResponseStructure": "Ollama API 回應結構無效:未找到 \"embeddings\" 陣列或不是陣列。", - "embeddingFailed": "Ollama 內嵌失敗:{{message}}" + "embeddingFailed": "Ollama 內嵌失敗:{{message}}", + "serviceNotRunning": "Ollama 服務未在 {{baseUrl}} 執行", + "serviceUnavailable": "Ollama 服務不可用(狀態:{{status}})", + "modelNotFound": "找不到 Ollama 模型:{{modelId}}", + "modelNotEmbeddingCapable": "Ollama 模型不具備內嵌能力:{{modelId}}", + "hostNotFound": "找不到 Ollama 主機:{{baseUrl}}" }, "scanner": { "unknownErrorProcessingFile": "處理檔案 {{filePath}} 時發生未知錯誤", @@ -19,5 +24,18 @@ }, "vectorStore": { "qdrantConnectionFailed": "連接 Qdrant 向量資料庫失敗。請確保 Qdrant 正在執行並可在 {{qdrantUrl}} 存取。錯誤:{{errorMessage}}" + }, + "validation": { + "authenticationFailed": "驗證失敗。請在設定中檢查您的 API 金鑰。", + "connectionFailed": "連線至內嵌服務失敗。請檢查您的連線設定並確保服務正在執行。", + "modelNotAvailable": "指定的模型不可用。請檢查您的模型組態。", + "configurationError": "無效的內嵌程式組態。請檢閱您的設定。", + "serviceUnavailable": "內嵌服務不可用。請確保它正在執行且可存取。", + "invalidEndpoint": "無效的 API 端點。請檢查您的 URL 組態。", + "invalidEmbedderConfig": "無效的內嵌程式組態。請檢查您的設定。", + "invalidApiKey": "無效的 API 金鑰。請檢查您的 API 金鑰組態。", + "invalidBaseUrl": "無效的基礎 URL。請檢查您的 URL 組態。", + "invalidModel": "無效的模型。請檢查您的模型組態。", + "invalidResponse": "內嵌服務回應無效。請檢查您的組態。" } } diff --git a/src/integrations/editor/DiffViewProvider.ts b/src/integrations/editor/DiffViewProvider.ts index b38c55c3e4c4..225e076297e3 100644 --- a/src/integrations/editor/DiffViewProvider.ts +++ b/src/integrations/editor/DiffViewProvider.ts @@ -122,7 +122,7 @@ export class DiffViewProvider { } // Place cursor at the beginning of the diff editor to keep it out of - // the way of the stream animation. + // the way of the stream animation, but do this without stealing focus const beginningOfDocument = new vscode.Position(0, 0) diffEditor.selection = new vscode.Selection(beginningOfDocument, beginningOfDocument) @@ -137,7 +137,7 @@ export class DiffViewProvider { // Update decorations. this.activeLineController.setActiveLine(endLine) this.fadedOverlayController.updateOverlayAfterLine(endLine, document.lineCount) - // Scroll to the current line. + // Scroll to the current line without stealing focus. const ranges = this.activeDiffEditor?.visibleRanges if (ranges && ranges.length > 0 && ranges[0].start.line < endLine && ranges[0].end.line > endLine) { this.scrollEditorToLine(endLine) @@ -504,7 +504,7 @@ export class DiffViewProvider { // Pre-open the file as a text document to ensure it doesn't open in preview mode // This fixes issues with files that have custom editor associations (like markdown preview) vscode.window - .showTextDocument(uri, { preview: false, viewColumn: vscode.ViewColumn.Active }) + .showTextDocument(uri, { preview: false, viewColumn: vscode.ViewColumn.Active, preserveFocus: true }) .then(() => { // Execute the diff command after ensuring the file is open as text return vscode.commands.executeCommand( @@ -552,7 +552,7 @@ export class DiffViewProvider { for (const part of diffs) { if (part.added || part.removed) { - // Found the first diff, scroll to it. + // Found the first diff, scroll to it without stealing focus. this.activeDiffEditor.revealRange( new vscode.Range(lineCount, 0, lineCount, 0), vscode.TextEditorRevealType.InCenter, diff --git a/src/integrations/editor/__tests__/DiffViewProvider.spec.ts b/src/integrations/editor/__tests__/DiffViewProvider.spec.ts index 44b3aaba2aec..ad1950345bdc 100644 --- a/src/integrations/editor/__tests__/DiffViewProvider.spec.ts +++ b/src/integrations/editor/__tests__/DiffViewProvider.spec.ts @@ -176,7 +176,7 @@ describe("DiffViewProvider", () => { // Mock showTextDocument to track when it's called vi.mocked(vscode.window.showTextDocument).mockImplementation(async (uri, options) => { callOrder.push("showTextDocument") - expect(options).toEqual({ preview: false, viewColumn: vscode.ViewColumn.Active }) + expect(options).toEqual({ preview: false, viewColumn: vscode.ViewColumn.Active, preserveFocus: true }) return mockEditor as any }) @@ -208,10 +208,10 @@ describe("DiffViewProvider", () => { // Verify that showTextDocument was called before executeCommand expect(callOrder).toEqual(["showTextDocument", "executeCommand"]) - // Verify that showTextDocument was called with preview: false + // Verify that showTextDocument was called with preview: false and preserveFocus: true expect(vscode.window.showTextDocument).toHaveBeenCalledWith( expect.objectContaining({ fsPath: `${mockCwd}/test.md` }), - { preview: false, viewColumn: vscode.ViewColumn.Active }, + { preview: false, viewColumn: vscode.ViewColumn.Active, preserveFocus: true }, ) // Verify that the diff command was executed diff --git a/src/package.json b/src/package.json index 4232bf2be772..30168c951c4b 100644 --- a/src/package.json +++ b/src/package.json @@ -3,7 +3,7 @@ "displayName": "%extension.displayName%", "description": "%extension.description%", "publisher": "RooVeterinaryInc", - "version": "3.22.5", + "version": "3.22.6", "icon": "assets/icons/icon.png", "galleryBanner": { "color": "#617A91", diff --git a/src/services/code-index/__tests__/config-manager.spec.ts b/src/services/code-index/__tests__/config-manager.spec.ts index f5a759c158ce..994f228e5204 100644 --- a/src/services/code-index/__tests__/config-manager.spec.ts +++ b/src/services/code-index/__tests__/config-manager.spec.ts @@ -9,11 +9,26 @@ describe("CodeIndexConfigManager", () => { mockContextProxy = { getGlobalState: vitest.fn(), getSecret: vitest.fn().mockReturnValue(undefined), + refreshSecrets: vitest.fn().mockResolvedValue(undefined), } configManager = new CodeIndexConfigManager(mockContextProxy) }) + // Helper function to setup secret mocking + const setupSecretMocks = (secrets: Record) => { + // Mock sync secret access + mockContextProxy.getSecret.mockImplementation((key: string) => { + return secrets[key] || undefined + }) + + // Mock refreshSecrets to update the getSecret mock with new values + mockContextProxy.refreshSecrets.mockImplementation(async () => { + // In real implementation, this would refresh from VSCode storage + // For tests, we just keep the existing mock behavior + }) + } + describe("constructor", () => { it("should initialize with ContextProxy", () => { expect(configManager).toBeDefined() @@ -52,10 +67,11 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderModelId: "text-embedding-3-large", } mockContextProxy.getGlobalState.mockReturnValue(mockGlobalState) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-openai-key" - if (key === "codeIndexQdrantApiKey") return "test-qdrant-key" - return undefined + + // Mock both sync and async secret access + setupSecretMocks({ + codeIndexOpenAiKey: "test-openai-key", + codeIndexQdrantApiKey: "test-qdrant-key", }) const result = await configManager.loadConfiguration() @@ -80,16 +96,16 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderBaseUrl: "", codebaseIndexEmbedderModelId: "text-embedding-3-large", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", } mockContextProxy.getGlobalState.mockImplementation((key: string) => { if (key === "codebaseIndexConfig") return mockGlobalState - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexQdrantApiKey") return "test-qdrant-key" - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-openai-compatible-key" - return undefined + + setupSecretMocks({ + codeIndexQdrantApiKey: "test-qdrant-key", + codebaseIndexOpenAiCompatibleApiKey: "test-openai-compatible-key", }) const result = await configManager.loadConfiguration() @@ -118,17 +134,16 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderBaseUrl: "", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1024, } mockContextProxy.getGlobalState.mockImplementation((key: string) => { if (key === "codebaseIndexConfig") return mockGlobalState - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return 1024 return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexQdrantApiKey") return "test-qdrant-key" - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-openai-compatible-key" - return undefined + setupSecretMocks({ + codeIndexQdrantApiKey: "test-qdrant-key", + codebaseIndexOpenAiCompatibleApiKey: "test-openai-compatible-key", }) const result = await configManager.loadConfiguration() @@ -158,17 +173,16 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderBaseUrl: "", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + // modelDimension is not set } mockContextProxy.getGlobalState.mockImplementation((key: string) => { if (key === "codebaseIndexConfig") return mockGlobalState - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return undefined return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexQdrantApiKey") return "test-qdrant-key" - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-openai-compatible-key" - return undefined + setupSecretMocks({ + codeIndexQdrantApiKey: "test-qdrant-key", + codebaseIndexOpenAiCompatibleApiKey: "test-openai-compatible-key", }) const result = await configManager.loadConfiguration() @@ -183,6 +197,7 @@ describe("CodeIndexConfigManager", () => { openAiCompatibleOptions: { baseUrl: "https://api.example.com/v1", apiKey: "test-openai-compatible-key", + // modelDimension is undefined when not set }, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", @@ -197,17 +212,16 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderBaseUrl: "", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: "invalid-dimension", // Invalid type } mockContextProxy.getGlobalState.mockImplementation((key: string) => { if (key === "codebaseIndexConfig") return mockGlobalState - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return "invalid-dimension" return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexQdrantApiKey") return "test-qdrant-key" - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-openai-compatible-key" - return undefined + setupSecretMocks({ + codeIndexQdrantApiKey: "test-qdrant-key", + codebaseIndexOpenAiCompatibleApiKey: "test-openai-compatible-key", }) const result = await configManager.loadConfiguration() @@ -238,9 +252,8 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-large", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-openai-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-openai-key", }) await configManager.loadConfiguration() @@ -266,7 +279,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue("test-key") + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", + codeIndexQdrantApiKey: "test-key", + }) await configManager.loadConfiguration() @@ -290,9 +306,8 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", }) await configManager.loadConfiguration() @@ -324,7 +339,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue("test-key") + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", + codeIndexQdrantApiKey: "test-key", + }) const result = await configManager.loadConfiguration() expect(result.requiresRestart).toBe(true) @@ -339,14 +357,17 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue("old-key") + setupSecretMocks({ + codeIndexOpenAiKey: "old-key", + codeIndexQdrantApiKey: "old-key", + }) await configManager.loadConfiguration() // Change API key - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "new-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "new-key", + codeIndexQdrantApiKey: "old-key", }) const result = await configManager.loadConfiguration() @@ -361,7 +382,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue("test-key") + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", + codeIndexQdrantApiKey: "test-key", + }) await configManager.loadConfiguration() @@ -385,7 +409,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue("test-key") + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", + codeIndexQdrantApiKey: "test-key", + }) await configManager.loadConfiguration() @@ -435,14 +462,14 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexOpenAiCompatibleBaseUrl: "https://old-api.example.com/v1", } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://old-api.example.com/v1" return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "old-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "old-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -455,9 +482,9 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexOpenAiCompatibleBaseUrl: "https://new-api.example.com/v1", } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://new-api.example.com/v1" return undefined }) @@ -474,22 +501,22 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "old-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "old-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() // Change OpenAI Compatible API key - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "new-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "new-api-key", + codeIndexQdrantApiKey: "test-key", }) const result = await configManager.loadConfiguration() @@ -505,15 +532,15 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1024, } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return 1024 return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "test-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -526,10 +553,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 2048, } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return 2048 return undefined }) @@ -552,9 +579,9 @@ describe("CodeIndexConfigManager", () => { if (key === "codebaseIndexOpenAiCompatibleModelDimension") return 1024 return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "test-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -588,15 +615,15 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + // modelDimension not set initially } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return undefined return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "test-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -609,10 +636,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1024, } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return 1024 return undefined }) @@ -629,15 +656,15 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + codebaseIndexOpenAiCompatibleModelDimension: 1024, } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return 1024 return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "test-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -650,10 +677,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", codebaseIndexEmbedderModelId: "custom-model", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", + // modelDimension removed } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" - if (key === "codebaseIndexOpenAiCompatibleModelDimension") return undefined return undefined }) @@ -668,9 +695,8 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", }) await configManager.loadConfiguration() @@ -694,7 +720,7 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai", }) - mockContextProxy.getSecret.mockReturnValue(undefined) + setupSecretMocks({}) await configManager.loadConfiguration() @@ -709,6 +735,208 @@ describe("CodeIndexConfigManager", () => { const result = await configManager.loadConfiguration() expect(result.requiresRestart).toBe(false) }) + + describe("currentSearchMinScore priority system", () => { + it("should return user-configured score when set", async () => { + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexSearchMinScore: 0.8, // User setting + }) + mockContextProxy.getSecret.mockImplementation((key: string) => { + if (key === "codeIndexOpenAiKey") return "test-key" + return undefined + }) + + await configManager.loadConfiguration() + expect(configManager.currentSearchMinScore).toBe(0.8) + }) + + it("should fall back to model-specific threshold when user setting is undefined", async () => { + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "ollama", + codebaseIndexEmbedderModelId: "nomic-embed-code", + // No codebaseIndexSearchMinScore - user hasn't configured it + }) + + await configManager.loadConfiguration() + // nomic-embed-code has a specific threshold of 0.15 + expect(configManager.currentSearchMinScore).toBe(0.15) + }) + + it("should fall back to default DEFAULT_SEARCH_MIN_SCORE when neither user setting nor model threshold exists", async () => { + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "unknown-model", // Model not in profiles + // No codebaseIndexSearchMinScore + }) + mockContextProxy.getSecret.mockImplementation((key: string) => { + if (key === "codeIndexOpenAiKey") return "test-key" + return undefined + }) + + await configManager.loadConfiguration() + // Should fall back to default DEFAULT_SEARCH_MIN_SCORE (0.4) + expect(configManager.currentSearchMinScore).toBe(0.4) + }) + + it("should respect user setting of 0 (edge case)", async () => { + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "ollama", + codebaseIndexEmbedderModelId: "nomic-embed-code", + codebaseIndexSearchMinScore: 0, // User explicitly sets 0 + }) + + await configManager.loadConfiguration() + // Should return 0, not fall back to model threshold (0.15) + expect(configManager.currentSearchMinScore).toBe(0) + }) + + it("should use model-specific threshold with openai-compatible provider", async () => { + mockContextProxy.getGlobalState.mockImplementation((key: string) => { + if (key === "codebaseIndexConfig") { + return { + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai-compatible", + codebaseIndexEmbedderModelId: "nomic-embed-code", + // No codebaseIndexSearchMinScore + } + } + if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" + return undefined + }) + mockContextProxy.getSecret.mockImplementation((key: string) => { + if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" + return undefined + }) + + await configManager.loadConfiguration() + // openai-compatible provider also has nomic-embed-code with 0.15 threshold + expect(configManager.currentSearchMinScore).toBe(0.15) + }) + + it("should use default model ID when modelId is not specified", async () => { + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + // No modelId specified + // No codebaseIndexSearchMinScore + }) + mockContextProxy.getSecret.mockImplementation((key: string) => { + if (key === "codeIndexOpenAiKey") return "test-key" + return undefined + }) + + await configManager.loadConfiguration() + // Should use default model (text-embedding-3-small) threshold (0.4) + expect(configManager.currentSearchMinScore).toBe(0.4) + }) + + it("should handle priority correctly: user > model > default", async () => { + // Test 1: User setting takes precedence + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "ollama", + codebaseIndexEmbedderModelId: "nomic-embed-code", // Has 0.15 threshold + codebaseIndexSearchMinScore: 0.9, // User overrides + }) + + await configManager.loadConfiguration() + expect(configManager.currentSearchMinScore).toBe(0.9) // User setting wins + + // Test 2: Model threshold when no user setting + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "ollama", + codebaseIndexEmbedderModelId: "nomic-embed-code", + // No user setting + }) + + const newManager = new CodeIndexConfigManager(mockContextProxy) + await newManager.loadConfiguration() + expect(newManager.currentSearchMinScore).toBe(0.15) // Model threshold + + // Test 3: Default when neither exists + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "custom-unknown-model", + // No user setting, unknown model + }) + + const anotherManager = new CodeIndexConfigManager(mockContextProxy) + await anotherManager.loadConfiguration() + expect(anotherManager.currentSearchMinScore).toBe(0.4) // Default + }) + }) + + describe("currentSearchMaxResults", () => { + it("should return user setting when provided, otherwise default", async () => { + // Test 1: User setting takes precedence + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexSearchMaxResults: 150, // User setting + }) + + await configManager.loadConfiguration() + expect(configManager.currentSearchMaxResults).toBe(150) // User setting + + // Test 2: Default when no user setting + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "text-embedding-3-small", + // No user setting + }) + + const newManager = new CodeIndexConfigManager(mockContextProxy) + await newManager.loadConfiguration() + expect(newManager.currentSearchMaxResults).toBe(50) // Default (DEFAULT_MAX_SEARCH_RESULTS) + + // Test 3: Boundary values + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexSearchMaxResults: 10, // Minimum allowed + }) + + const minManager = new CodeIndexConfigManager(mockContextProxy) + await minManager.loadConfiguration() + expect(minManager.currentSearchMaxResults).toBe(10) + + // Test 4: Maximum value + mockContextProxy.getGlobalState.mockReturnValue({ + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderModelId: "text-embedding-3-small", + codebaseIndexSearchMaxResults: 200, // Maximum allowed + }) + + const maxManager = new CodeIndexConfigManager(mockContextProxy) + await maxManager.loadConfiguration() + expect(maxManager.currentSearchMaxResults).toBe(200) + }) + }) }) describe("empty/missing API key handling", () => { @@ -720,7 +948,7 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue(undefined) + setupSecretMocks({}) await configManager.loadConfiguration() @@ -745,12 +973,15 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai", }) - mockContextProxy.getSecret.mockReturnValue(undefined) + setupSecretMocks({}) await configManager.loadConfiguration() // Change to empty string API keys (simulating what happens when secrets return "") - mockContextProxy.getSecret.mockReturnValue("") + setupSecretMocks({ + codeIndexOpenAiKey: "", + codeIndexQdrantApiKey: "", + }) const result = await configManager.loadConfiguration() // Should NOT require restart since undefined and "" are both "empty" @@ -764,14 +995,17 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai", }) - mockContextProxy.getSecret.mockReturnValue("") + setupSecretMocks({ + codeIndexOpenAiKey: "", + codeIndexQdrantApiKey: "", + }) await configManager.loadConfiguration() // Add actual API key - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "actual-api-key" - return "" + setupSecretMocks({ + codeIndexOpenAiKey: "actual-api-key", + codeIndexQdrantApiKey: "", }) const result = await configManager.loadConfiguration() @@ -789,7 +1023,10 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockReturnValue("test-key") + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", + codeIndexQdrantApiKey: "test-key", + }) await configManager.loadConfiguration() @@ -818,9 +1055,9 @@ describe("CodeIndexConfigManager", () => { codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -846,14 +1083,14 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEnabled: true, codebaseIndexQdrantUrl: "http://qdrant.local", codebaseIndexEmbedderProvider: "openai-compatible", + codebaseIndexOpenAiCompatibleBaseUrl: "https://api.example.com/v1", } } - if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "test-api-key", + codeIndexQdrantApiKey: "test-key", }) await configManager.loadConfiguration() @@ -872,9 +1109,8 @@ describe("CodeIndexConfigManager", () => { if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "" return undefined }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "test-api-key" - return undefined + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "test-api-key", }) await configManager.loadConfiguration() @@ -893,8 +1129,47 @@ describe("CodeIndexConfigManager", () => { if (key === "codebaseIndexOpenAiCompatibleBaseUrl") return "https://api.example.com/v1" return undefined }) + setupSecretMocks({ + codebaseIndexOpenAiCompatibleApiKey: "", + }) + + await configManager.loadConfiguration() + expect(configManager.isFeatureConfigured).toBe(false) + }) + + it("should validate Gemini configuration correctly", async () => { + mockContextProxy.getGlobalState.mockImplementation((key: string) => { + if (key === "codebaseIndexConfig") { + return { + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "gemini", + } + } + return undefined + }) mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codebaseIndexOpenAiCompatibleApiKey") return "" + if (key === "codebaseIndexGeminiApiKey") return "test-gemini-key" + return undefined + }) + + await configManager.loadConfiguration() + expect(configManager.isFeatureConfigured).toBe(true) + }) + + it("should return false when Gemini API key is missing", async () => { + mockContextProxy.getGlobalState.mockImplementation((key: string) => { + if (key === "codebaseIndexConfig") { + return { + codebaseIndexEnabled: true, + codebaseIndexQdrantUrl: "http://qdrant.local", + codebaseIndexEmbedderProvider: "gemini", + } + } + return undefined + }) + mockContextProxy.getSecret.mockImplementation((key: string) => { + if (key === "codebaseIndexGeminiApiKey") return "" return undefined }) @@ -921,10 +1196,9 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-large", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-openai-key" - if (key === "codeIndexQdrantApiKey") return "test-qdrant-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-openai-key", + codeIndexQdrantApiKey: "test-qdrant-key", }) await configManager.loadConfiguration() @@ -939,9 +1213,12 @@ describe("CodeIndexConfigManager", () => { modelId: "text-embedding-3-large", openAiOptions: { openAiNativeApiKey: "test-openai-key" }, ollamaOptions: { ollamaBaseUrl: undefined }, + geminiOptions: undefined, + openAiCompatibleOptions: undefined, qdrantUrl: "http://qdrant.local", qdrantApiKey: "test-qdrant-key", searchMinScore: 0.4, + searchMaxResults: 50, }) }) @@ -974,9 +1251,8 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", }) // First load - this will initialize the config manager with current state @@ -995,9 +1271,8 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", }) // Create a new config manager (simulating what happens in CodeIndexManager.initialize) @@ -1009,8 +1284,8 @@ describe("CodeIndexConfigManager", () => { }) it("should not require restart when settings are saved but code indexing config unchanged", async () => { - // This test simulates the original issue: handleExternalSettingsChange() being called - // when other settings are saved, but code indexing settings haven't changed + // This test simulates the scenario where handleSettingsChange() is called + // but code indexing settings haven't actually changed // Setup initial state - enabled and configured mockContextProxy.getGlobalState.mockReturnValue({ @@ -1019,9 +1294,8 @@ describe("CodeIndexConfigManager", () => { codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderModelId: "text-embedding-3-small", }) - mockContextProxy.getSecret.mockImplementation((key: string) => { - if (key === "codeIndexOpenAiKey") return "test-key" - return undefined + setupSecretMocks({ + codeIndexOpenAiKey: "test-key", }) // First load to establish baseline diff --git a/src/services/code-index/__tests__/manager.spec.ts b/src/services/code-index/__tests__/manager.spec.ts index 2aaeb1d3747a..ae473e3870e9 100644 --- a/src/services/code-index/__tests__/manager.spec.ts +++ b/src/services/code-index/__tests__/manager.spec.ts @@ -1,19 +1,38 @@ import { CodeIndexManager } from "../manager" +import { CodeIndexServiceFactory } from "../service-factory" +import type { MockedClass } from "vitest" + +// Mock vscode module +vi.mock("vscode", () => ({ + workspace: { + workspaceFolders: [ + { + uri: { fsPath: "/test/workspace" }, + name: "test", + index: 0, + }, + ], + }, +})) // Mock only the essential dependencies -vitest.mock("../../../utils/path", () => ({ - getWorkspacePath: vitest.fn(() => "/test/workspace"), +vi.mock("../../../utils/path", () => ({ + getWorkspacePath: vi.fn(() => "/test/workspace"), })) -vitest.mock("../state-manager", () => ({ - CodeIndexStateManager: vitest.fn().mockImplementation(() => ({ - onProgressUpdate: vitest.fn(), - getCurrentStatus: vitest.fn(), - dispose: vitest.fn(), +vi.mock("../state-manager", () => ({ + CodeIndexStateManager: vi.fn().mockImplementation(() => ({ + onProgressUpdate: vi.fn(), + getCurrentStatus: vi.fn(), + dispose: vi.fn(), + setSystemState: vi.fn(), })), })) -describe("CodeIndexManager - handleExternalSettingsChange regression", () => { +vi.mock("../service-factory") +const MockedCodeIndexServiceFactory = CodeIndexServiceFactory as MockedClass + +describe("CodeIndexManager - handleSettingsChange regression", () => { let mockContext: any let manager: CodeIndexManager @@ -27,7 +46,7 @@ describe("CodeIndexManager - handleExternalSettingsChange regression", () => { globalState: {} as any, extensionUri: {} as any, extensionPath: "/test/extension", - asAbsolutePath: vitest.fn(), + asAbsolutePath: vi.fn(), storageUri: {} as any, storagePath: "/test/storage", globalStorageUri: {} as any, @@ -48,9 +67,9 @@ describe("CodeIndexManager - handleExternalSettingsChange regression", () => { CodeIndexManager.disposeAll() }) - describe("handleExternalSettingsChange", () => { + describe("handleSettingsChange", () => { it("should not throw when called on uninitialized manager (regression test)", async () => { - // This is the core regression test: handleExternalSettingsChange() should not throw + // This is the core regression test: handleSettingsChange() should not throw // when called before the manager is initialized (during first-time configuration) // Ensure manager is not initialized @@ -58,50 +77,149 @@ describe("CodeIndexManager - handleExternalSettingsChange regression", () => { // Mock a minimal config manager that simulates first-time configuration const mockConfigManager = { - loadConfiguration: vitest.fn().mockResolvedValue({ requiresRestart: true }), + loadConfiguration: vi.fn().mockResolvedValue({ requiresRestart: true }), + isFeatureConfigured: true, + isFeatureEnabled: true, + getConfig: vi.fn().mockReturnValue({ + isEnabled: true, + isConfigured: true, + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { openAiNativeApiKey: "test-key" }, + qdrantUrl: "http://localhost:6333", + qdrantApiKey: "test-key", + searchMinScore: 0.4, + }), } ;(manager as any)._configManager = mockConfigManager + // Mock cache manager + const mockCacheManager = { + initialize: vi.fn(), + clearCacheFile: vi.fn(), + } + ;(manager as any)._cacheManager = mockCacheManager + // Mock the feature state to simulate valid configuration that would normally trigger restart - vitest.spyOn(manager, "isFeatureEnabled", "get").mockReturnValue(true) - vitest.spyOn(manager, "isFeatureConfigured", "get").mockReturnValue(true) + vi.spyOn(manager, "isFeatureEnabled", "get").mockReturnValue(true) + vi.spyOn(manager, "isFeatureConfigured", "get").mockReturnValue(true) + + // Mock service factory to handle _recreateServices call + const mockServiceFactoryInstance = { + configManager: mockConfigManager, + workspacePath: "/test/workspace", + cacheManager: mockCacheManager, + createEmbedder: vi.fn().mockReturnValue({ embedderInfo: { name: "openai" } }), + createVectorStore: vi.fn().mockReturnValue({}), + createDirectoryScanner: vi.fn().mockReturnValue({}), + createFileWatcher: vi.fn().mockReturnValue({ + onDidStartBatchProcessing: vi.fn(), + onBatchProgressUpdate: vi.fn(), + watch: vi.fn(), + stopWatcher: vi.fn(), + dispose: vi.fn(), + }), + createServices: vi.fn().mockReturnValue({ + embedder: { embedderInfo: { name: "openai" } }, + vectorStore: {}, + scanner: {}, + fileWatcher: { + onDidStartBatchProcessing: vi.fn(), + onBatchProgressUpdate: vi.fn(), + watch: vi.fn(), + stopWatcher: vi.fn(), + dispose: vi.fn(), + }, + }), + validateEmbedder: vi.fn().mockResolvedValue({ valid: true }), + } + MockedCodeIndexServiceFactory.mockImplementation(() => mockServiceFactoryInstance as any) // The key test: this should NOT throw "CodeIndexManager not initialized" error - await expect(manager.handleExternalSettingsChange()).resolves.not.toThrow() + await expect(manager.handleSettingsChange()).resolves.not.toThrow() // Verify that loadConfiguration was called (the method should still work) expect(mockConfigManager.loadConfiguration).toHaveBeenCalled() }) it("should work normally when manager is initialized", async () => { - // Mock a minimal config manager + // Mock a complete config manager with all required properties const mockConfigManager = { - loadConfiguration: vitest.fn().mockResolvedValue({ requiresRestart: true }), + loadConfiguration: vi.fn().mockResolvedValue({ requiresRestart: true }), + isFeatureConfigured: true, + isFeatureEnabled: true, + getConfig: vi.fn().mockReturnValue({ + isEnabled: true, + isConfigured: true, + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { openAiNativeApiKey: "test-key" }, + qdrantUrl: "http://localhost:6333", + qdrantApiKey: "test-key", + searchMinScore: 0.4, + }), } ;(manager as any)._configManager = mockConfigManager + // Mock cache manager + const mockCacheManager = { + initialize: vi.fn(), + clearCacheFile: vi.fn(), + } + ;(manager as any)._cacheManager = mockCacheManager + // Simulate an initialized manager by setting the required properties - ;(manager as any)._orchestrator = { stopWatcher: vitest.fn() } + ;(manager as any)._orchestrator = { stopWatcher: vi.fn() } ;(manager as any)._searchService = {} - ;(manager as any)._cacheManager = {} // Verify manager is considered initialized expect(manager.isInitialized).toBe(true) - // Mock the methods that would be called during restart - const recreateServicesSpy = vitest.spyOn(manager as any, "_recreateServices").mockImplementation(() => {}) - const startIndexingSpy = vitest.spyOn(manager, "startIndexing").mockResolvedValue() - // Mock the feature state - vitest.spyOn(manager, "isFeatureEnabled", "get").mockReturnValue(true) - vitest.spyOn(manager, "isFeatureConfigured", "get").mockReturnValue(true) + vi.spyOn(manager, "isFeatureEnabled", "get").mockReturnValue(true) + vi.spyOn(manager, "isFeatureConfigured", "get").mockReturnValue(true) - await manager.handleExternalSettingsChange() + // Mock service factory to handle _recreateServices call + const mockServiceFactoryInstance = { + configManager: mockConfigManager, + workspacePath: "/test/workspace", + cacheManager: mockCacheManager, + createEmbedder: vi.fn().mockReturnValue({ embedderInfo: { name: "openai" } }), + createVectorStore: vi.fn().mockReturnValue({}), + createDirectoryScanner: vi.fn().mockReturnValue({}), + createFileWatcher: vi.fn().mockReturnValue({ + onDidStartBatchProcessing: vi.fn(), + onBatchProgressUpdate: vi.fn(), + watch: vi.fn(), + stopWatcher: vi.fn(), + dispose: vi.fn(), + }), + createServices: vi.fn().mockReturnValue({ + embedder: { embedderInfo: { name: "openai" } }, + vectorStore: {}, + scanner: {}, + fileWatcher: { + onDidStartBatchProcessing: vi.fn(), + onBatchProgressUpdate: vi.fn(), + watch: vi.fn(), + stopWatcher: vi.fn(), + dispose: vi.fn(), + }, + }), + validateEmbedder: vi.fn().mockResolvedValue({ valid: true }), + } + MockedCodeIndexServiceFactory.mockImplementation(() => mockServiceFactoryInstance as any) + + // Mock the methods that would be called during restart + const recreateServicesSpy = vi.spyOn(manager as any, "_recreateServices") + + await manager.handleSettingsChange() // Verify that the restart sequence was called expect(mockConfigManager.loadConfiguration).toHaveBeenCalled() + // _recreateServices should be called when requiresRestart is true expect(recreateServicesSpy).toHaveBeenCalled() - expect(startIndexingSpy).toHaveBeenCalled() + // Note: startIndexing is NOT called by handleSettingsChange - it's only called by initialize() }) it("should handle case when config manager is not set", async () => { @@ -109,7 +227,138 @@ describe("CodeIndexManager - handleExternalSettingsChange regression", () => { ;(manager as any)._configManager = undefined // This should not throw an error - await expect(manager.handleExternalSettingsChange()).resolves.not.toThrow() + await expect(manager.handleSettingsChange()).resolves.not.toThrow() + }) + }) + + describe("embedder validation integration", () => { + let mockServiceFactoryInstance: any + let mockStateManager: any + let mockEmbedder: any + let mockVectorStore: any + let mockScanner: any + let mockFileWatcher: any + + beforeEach(() => { + // Mock service factory objects + mockEmbedder = { embedderInfo: { name: "openai" } } + mockVectorStore = {} + mockScanner = {} + mockFileWatcher = { + onDidStartBatchProcessing: vi.fn(), + onBatchProgressUpdate: vi.fn(), + watch: vi.fn(), + stopWatcher: vi.fn(), + dispose: vi.fn(), + } + + // Mock service factory instance + mockServiceFactoryInstance = { + createServices: vi.fn().mockReturnValue({ + embedder: mockEmbedder, + vectorStore: mockVectorStore, + scanner: mockScanner, + fileWatcher: mockFileWatcher, + }), + validateEmbedder: vi.fn(), + } + + // Mock the ServiceFactory constructor + MockedCodeIndexServiceFactory.mockImplementation(() => mockServiceFactoryInstance) + + // Mock state manager methods directly on the existing instance + mockStateManager = (manager as any)._stateManager + mockStateManager.setSystemState = vi.fn() + + // Mock config manager + const mockConfigManager = { + loadConfiguration: vitest.fn().mockResolvedValue({ requiresRestart: false }), + isFeatureConfigured: true, + isFeatureEnabled: true, + getConfig: vitest.fn().mockReturnValue({ + isEnabled: true, + isConfigured: true, + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { openAiNativeApiKey: "test-key" }, + qdrantUrl: "http://localhost:6333", + qdrantApiKey: "test-key", + searchMinScore: 0.4, + }), + } + ;(manager as any)._configManager = mockConfigManager + }) + + it("should validate embedder during _recreateServices when validation succeeds", async () => { + // Arrange + mockServiceFactoryInstance.validateEmbedder.mockResolvedValue({ valid: true }) + + // Act - directly call the private method for testing + await (manager as any)._recreateServices() + + // Assert + expect(mockServiceFactoryInstance.createServices).toHaveBeenCalled() + const createdEmbedder = mockServiceFactoryInstance.createServices.mock.results[0].value.embedder + expect(mockServiceFactoryInstance.validateEmbedder).toHaveBeenCalledWith(createdEmbedder) + expect(mockStateManager.setSystemState).not.toHaveBeenCalledWith("Error", expect.any(String)) + }) + + it("should set error state when embedder validation fails", async () => { + // Arrange + mockServiceFactoryInstance.validateEmbedder.mockResolvedValue({ + valid: false, + error: "embeddings:validation.authenticationFailed", + }) + + // Act & Assert + await expect((manager as any)._recreateServices()).rejects.toThrow( + "embeddings:validation.authenticationFailed", + ) + + // Assert other expectations + expect(mockServiceFactoryInstance.createServices).toHaveBeenCalled() + const createdEmbedder = mockServiceFactoryInstance.createServices.mock.results[0].value.embedder + expect(mockServiceFactoryInstance.validateEmbedder).toHaveBeenCalledWith(createdEmbedder) + expect(mockStateManager.setSystemState).toHaveBeenCalledWith( + "Error", + "embeddings:validation.authenticationFailed", + ) + }) + + it("should set generic error state when embedder validation throws", async () => { + // Arrange + // Since the real service factory catches exceptions, we should mock it to resolve with an error + mockServiceFactoryInstance.validateEmbedder.mockResolvedValue({ + valid: false, + error: "embeddings:validation.configurationError", + }) + + // Act & Assert + await expect((manager as any)._recreateServices()).rejects.toThrow( + "embeddings:validation.configurationError", + ) + + // Assert other expectations + expect(mockServiceFactoryInstance.createServices).toHaveBeenCalled() + const createdEmbedder = mockServiceFactoryInstance.createServices.mock.results[0].value.embedder + expect(mockServiceFactoryInstance.validateEmbedder).toHaveBeenCalledWith(createdEmbedder) + expect(mockStateManager.setSystemState).toHaveBeenCalledWith( + "Error", + "embeddings:validation.configurationError", + ) + }) + + it("should handle embedder creation failure", async () => { + // Arrange + mockServiceFactoryInstance.createServices.mockImplementation(() => { + throw new Error("Invalid configuration") + }) + + // Act & Assert - should throw the error + await expect((manager as any)._recreateServices()).rejects.toThrow("Invalid configuration") + + // Should not attempt validation if embedder creation fails + expect(mockServiceFactoryInstance.validateEmbedder).not.toHaveBeenCalled() }) }) }) diff --git a/src/services/code-index/__tests__/service-factory.spec.ts b/src/services/code-index/__tests__/service-factory.spec.ts index a539549bad94..c2b56a046336 100644 --- a/src/services/code-index/__tests__/service-factory.spec.ts +++ b/src/services/code-index/__tests__/service-factory.spec.ts @@ -3,12 +3,14 @@ import { CodeIndexServiceFactory } from "../service-factory" import { OpenAiEmbedder } from "../embedders/openai" import { CodeIndexOllamaEmbedder } from "../embedders/ollama" import { OpenAICompatibleEmbedder } from "../embedders/openai-compatible" +import { GeminiEmbedder } from "../embedders/gemini" import { QdrantVectorStore } from "../vector-store/qdrant-client" // Mock the embedders and vector store vitest.mock("../embedders/openai") vitest.mock("../embedders/ollama") vitest.mock("../embedders/openai-compatible") +vitest.mock("../embedders/gemini") vitest.mock("../vector-store/qdrant-client") // Mock the embedding models module @@ -20,6 +22,7 @@ vitest.mock("../../../shared/embeddingModels", () => ({ const MockedOpenAiEmbedder = OpenAiEmbedder as MockedClass const MockedCodeIndexOllamaEmbedder = CodeIndexOllamaEmbedder as MockedClass const MockedOpenAICompatibleEmbedder = OpenAICompatibleEmbedder as MockedClass +const MockedGeminiEmbedder = GeminiEmbedder as MockedClass const MockedQdrantVectorStore = QdrantVectorStore as MockedClass // Import the mocked functions @@ -259,6 +262,49 @@ describe("CodeIndexServiceFactory", () => { ) }) + it("should create GeminiEmbedder when using Gemini provider", () => { + // Arrange + const testConfig = { + embedderProvider: "gemini", + geminiOptions: { + apiKey: "test-gemini-api-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act + factory.createEmbedder() + + // Assert + expect(MockedGeminiEmbedder).toHaveBeenCalledWith("test-gemini-api-key") + }) + + it("should throw error when Gemini API key is missing", () => { + // Arrange + const testConfig = { + embedderProvider: "gemini", + geminiOptions: { + apiKey: undefined, + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act & Assert + expect(() => factory.createEmbedder()).toThrow("Gemini configuration missing for embedder creation") + }) + + it("should throw error when Gemini options are missing", () => { + // Arrange + const testConfig = { + embedderProvider: "gemini", + geminiOptions: undefined, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act & Assert + expect(() => factory.createEmbedder()).toThrow("Gemini configuration missing for embedder creation") + }) + it("should throw error for invalid embedder provider", () => { // Arrange const testConfig = { @@ -454,6 +500,30 @@ describe("CodeIndexServiceFactory", () => { ) }) + it("should use fixed dimension 768 for Gemini provider", () => { + // Arrange + const testConfig = { + embedderProvider: "gemini", + modelId: "text-embedding-004", // This is ignored by Gemini + qdrantUrl: "http://localhost:6333", + qdrantApiKey: "test-key", + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act + factory.createVectorStore() + + // Assert + // getModelDimension should not be called for Gemini + expect(mockGetModelDimension).not.toHaveBeenCalled() + expect(MockedQdrantVectorStore).toHaveBeenCalledWith( + "/test/workspace", + "http://localhost:6333", + 768, // Fixed dimension for Gemini + "test-key", + ) + }) + it("should use default model when config.modelId is undefined", () => { // Arrange const testConfig = { @@ -510,4 +580,187 @@ describe("CodeIndexServiceFactory", () => { expect(() => factory.createVectorStore()).toThrow("Qdrant URL missing for vector store creation") }) }) + + describe("validateEmbedder", () => { + let mockEmbedderInstance: any + + beforeEach(() => { + mockEmbedderInstance = { + validateConfiguration: vitest.fn(), + } + }) + + it("should validate OpenAI embedder successfully", async () => { + // Arrange + const testConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { + openAiNativeApiKey: "test-api-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + MockedOpenAiEmbedder.mockImplementation(() => mockEmbedderInstance) + mockEmbedderInstance.validateConfiguration.mockResolvedValue({ valid: true }) + + // Act + const embedder = factory.createEmbedder() + const result = await factory.validateEmbedder(embedder) + + // Assert + expect(result).toEqual({ valid: true }) + expect(mockEmbedderInstance.validateConfiguration).toHaveBeenCalled() + }) + + it("should return validation error from OpenAI embedder", async () => { + // Arrange + const testConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { + openAiNativeApiKey: "invalid-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + MockedOpenAiEmbedder.mockImplementation(() => mockEmbedderInstance) + mockEmbedderInstance.validateConfiguration.mockResolvedValue({ + valid: false, + error: "embeddings:validation.authenticationFailed", + }) + + // Act + const embedder = factory.createEmbedder() + const result = await factory.validateEmbedder(embedder) + + // Assert + expect(result).toEqual({ + valid: false, + error: "embeddings:validation.authenticationFailed", + }) + }) + + it("should validate Ollama embedder successfully", async () => { + // Arrange + const testConfig = { + embedderProvider: "ollama", + modelId: "nomic-embed-text", + ollamaOptions: { + ollamaBaseUrl: "http://localhost:11434", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + MockedCodeIndexOllamaEmbedder.mockImplementation(() => mockEmbedderInstance) + mockEmbedderInstance.validateConfiguration.mockResolvedValue({ valid: true }) + + // Act + const embedder = factory.createEmbedder() + const result = await factory.validateEmbedder(embedder) + + // Assert + expect(result).toEqual({ valid: true }) + expect(mockEmbedderInstance.validateConfiguration).toHaveBeenCalled() + }) + + it("should validate OpenAI Compatible embedder successfully", async () => { + // Arrange + const testConfig = { + embedderProvider: "openai-compatible", + modelId: "custom-model", + openAiCompatibleOptions: { + baseUrl: "https://api.example.com/v1", + apiKey: "test-api-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + MockedOpenAICompatibleEmbedder.mockImplementation(() => mockEmbedderInstance) + mockEmbedderInstance.validateConfiguration.mockResolvedValue({ valid: true }) + + // Act + const embedder = factory.createEmbedder() + const result = await factory.validateEmbedder(embedder) + + // Assert + expect(result).toEqual({ valid: true }) + expect(mockEmbedderInstance.validateConfiguration).toHaveBeenCalled() + }) + + it("should validate Gemini embedder successfully", async () => { + // Arrange + const testConfig = { + embedderProvider: "gemini", + geminiOptions: { + apiKey: "test-gemini-api-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + MockedGeminiEmbedder.mockImplementation(() => mockEmbedderInstance) + mockEmbedderInstance.validateConfiguration.mockResolvedValue({ valid: true }) + + // Act + const embedder = factory.createEmbedder() + const result = await factory.validateEmbedder(embedder) + + // Assert + expect(result).toEqual({ valid: true }) + expect(mockEmbedderInstance.validateConfiguration).toHaveBeenCalled() + }) + + it("should handle validation exceptions", async () => { + // Arrange + const testConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { + openAiNativeApiKey: "test-api-key", + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + MockedOpenAiEmbedder.mockImplementation(() => mockEmbedderInstance) + const networkError = new Error("Network error") + mockEmbedderInstance.validateConfiguration.mockRejectedValue(networkError) + + // Act + const embedder = factory.createEmbedder() + const result = await factory.validateEmbedder(embedder) + + // Assert + expect(result).toEqual({ + valid: false, + error: "Network error", + }) + expect(mockEmbedderInstance.validateConfiguration).toHaveBeenCalled() + }) + + it("should return error for invalid embedder configuration", async () => { + // Arrange + const testConfig = { + embedderProvider: "openai", + modelId: "text-embedding-3-small", + openAiOptions: { + openAiNativeApiKey: undefined, // Missing API key + }, + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act & Assert + // This should throw when trying to create the embedder + await expect(async () => { + const embedder = factory.createEmbedder() + await factory.validateEmbedder(embedder) + }).rejects.toThrow("OpenAI configuration missing for embedder creation") + }) + + it("should return error for unknown embedder provider", async () => { + // Arrange + const testConfig = { + embedderProvider: "unknown-provider", + modelId: "some-model", + } + mockConfigManager.getConfig.mockReturnValue(testConfig as any) + + // Act & Assert + // This should throw when trying to create the embedder + expect(() => factory.createEmbedder()).toThrow("Invalid embedder type configured: unknown-provider") + }) + }) }) diff --git a/src/services/code-index/config-manager.ts b/src/services/code-index/config-manager.ts index 678cec36a12c..26ea0e2a48e3 100644 --- a/src/services/code-index/config-manager.ts +++ b/src/services/code-index/config-manager.ts @@ -2,8 +2,8 @@ import { ApiHandlerOptions } from "../../shared/api" import { ContextProxy } from "../../core/config/ContextProxy" import { EmbedderProvider } from "./interfaces/manager" import { CodeIndexConfig, PreviousConfigSnapshot } from "./interfaces/config" -import { SEARCH_MIN_SCORE } from "./constants" -import { getDefaultModelId, getModelDimension } from "../../shared/embeddingModels" +import { DEFAULT_SEARCH_MIN_SCORE, DEFAULT_MAX_SEARCH_RESULTS } from "./constants" +import { getDefaultModelId, getModelDimension, getModelScoreThreshold } from "../../shared/embeddingModels" /** * Manages configuration state and validation for the code indexing feature. @@ -16,15 +16,24 @@ export class CodeIndexConfigManager { private openAiOptions?: ApiHandlerOptions private ollamaOptions?: ApiHandlerOptions private openAiCompatibleOptions?: { baseUrl: string; apiKey: string; modelDimension?: number } + private geminiOptions?: { apiKey: string } private qdrantUrl?: string = "http://localhost:6333" private qdrantApiKey?: string private searchMinScore?: number + private searchMaxResults?: number constructor(private readonly contextProxy: ContextProxy) { // Initialize with current configuration to avoid false restart triggers this._loadAndSetConfiguration() } + /** + * Gets the context proxy instance + */ + public getContextProxy(): ContextProxy { + return this.contextProxy + } + /** * Private method that handles loading configuration from storage and updating instance variables. * This eliminates code duplication between initializeWithCurrentConfig() and loadConfiguration(). @@ -34,10 +43,11 @@ export class CodeIndexConfigManager { const codebaseIndexConfig = this.contextProxy?.getGlobalState("codebaseIndexConfig") ?? { codebaseIndexEnabled: false, codebaseIndexQdrantUrl: "http://localhost:6333", - codebaseIndexSearchMinScore: 0.4, codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderBaseUrl: "", codebaseIndexEmbedderModelId: "", + codebaseIndexSearchMinScore: undefined, + codebaseIndexSearchMaxResults: undefined, } const { @@ -46,28 +56,35 @@ export class CodeIndexConfigManager { codebaseIndexEmbedderProvider, codebaseIndexEmbedderBaseUrl, codebaseIndexEmbedderModelId, + codebaseIndexSearchMinScore, + codebaseIndexSearchMaxResults, } = codebaseIndexConfig const openAiKey = this.contextProxy?.getSecret("codeIndexOpenAiKey") ?? "" const qdrantApiKey = this.contextProxy?.getSecret("codeIndexQdrantApiKey") ?? "" - const openAiCompatibleBaseUrl = this.contextProxy?.getGlobalState("codebaseIndexOpenAiCompatibleBaseUrl") ?? "" + // Fix: Read OpenAI Compatible settings from the correct location within codebaseIndexConfig + const openAiCompatibleBaseUrl = codebaseIndexConfig.codebaseIndexOpenAiCompatibleBaseUrl ?? "" const openAiCompatibleApiKey = this.contextProxy?.getSecret("codebaseIndexOpenAiCompatibleApiKey") ?? "" - const openAiCompatibleModelDimension = this.contextProxy?.getGlobalState( - "codebaseIndexOpenAiCompatibleModelDimension", - ) as number | undefined + const openAiCompatibleModelDimension = codebaseIndexConfig.codebaseIndexOpenAiCompatibleModelDimension as + | number + | undefined + const geminiApiKey = this.contextProxy?.getSecret("codebaseIndexGeminiApiKey") ?? "" // Update instance variables with configuration this.isEnabled = codebaseIndexEnabled || false this.qdrantUrl = codebaseIndexQdrantUrl this.qdrantApiKey = qdrantApiKey ?? "" + this.searchMinScore = codebaseIndexSearchMinScore + this.searchMaxResults = codebaseIndexSearchMaxResults this.openAiOptions = { openAiNativeApiKey: openAiKey } - this.searchMinScore = SEARCH_MIN_SCORE // Set embedder provider with support for openai-compatible if (codebaseIndexEmbedderProvider === "ollama") { this.embedderProvider = "ollama" } else if (codebaseIndexEmbedderProvider === "openai-compatible") { this.embedderProvider = "openai-compatible" + } else if (codebaseIndexEmbedderProvider === "gemini") { + this.embedderProvider = "gemini" } else { this.embedderProvider = "openai" } @@ -86,6 +103,8 @@ export class CodeIndexConfigManager { modelDimension: openAiCompatibleModelDimension, } : undefined + + this.geminiOptions = geminiApiKey ? { apiKey: geminiApiKey } : undefined } /** @@ -101,6 +120,7 @@ export class CodeIndexConfigManager { openAiOptions?: ApiHandlerOptions ollamaOptions?: ApiHandlerOptions openAiCompatibleOptions?: { baseUrl: string; apiKey: string } + geminiOptions?: { apiKey: string } qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -118,10 +138,14 @@ export class CodeIndexConfigManager { openAiCompatibleBaseUrl: this.openAiCompatibleOptions?.baseUrl ?? "", openAiCompatibleApiKey: this.openAiCompatibleOptions?.apiKey ?? "", openAiCompatibleModelDimension: this.openAiCompatibleOptions?.modelDimension, + geminiApiKey: this.geminiOptions?.apiKey ?? "", qdrantUrl: this.qdrantUrl ?? "", qdrantApiKey: this.qdrantApiKey ?? "", } + // Refresh secrets from VSCode storage to ensure we have the latest values + await this.contextProxy.refreshSecrets() + // Load new configuration from storage and update instance variables this._loadAndSetConfiguration() @@ -137,9 +161,10 @@ export class CodeIndexConfigManager { openAiOptions: this.openAiOptions, ollamaOptions: this.ollamaOptions, openAiCompatibleOptions: this.openAiCompatibleOptions, + geminiOptions: this.geminiOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, - searchMinScore: this.searchMinScore, + searchMinScore: this.currentSearchMinScore, }, requiresRestart, } @@ -152,39 +177,56 @@ export class CodeIndexConfigManager { if (this.embedderProvider === "openai") { const openAiKey = this.openAiOptions?.openAiNativeApiKey const qdrantUrl = this.qdrantUrl - const isConfigured = !!(openAiKey && qdrantUrl) - return isConfigured + return !!(openAiKey && qdrantUrl) } else if (this.embedderProvider === "ollama") { // Ollama model ID has a default, so only base URL is strictly required for config const ollamaBaseUrl = this.ollamaOptions?.ollamaBaseUrl const qdrantUrl = this.qdrantUrl - const isConfigured = !!(ollamaBaseUrl && qdrantUrl) - return isConfigured + return !!(ollamaBaseUrl && qdrantUrl) } else if (this.embedderProvider === "openai-compatible") { const baseUrl = this.openAiCompatibleOptions?.baseUrl const apiKey = this.openAiCompatibleOptions?.apiKey const qdrantUrl = this.qdrantUrl - return !!(baseUrl && apiKey && qdrantUrl) + const isConfigured = !!(baseUrl && apiKey && qdrantUrl) + return isConfigured + } else if (this.embedderProvider === "gemini") { + const apiKey = this.geminiOptions?.apiKey + const qdrantUrl = this.qdrantUrl + const isConfigured = !!(apiKey && qdrantUrl) + return isConfigured } return false // Should not happen if embedderProvider is always set correctly } /** * Determines if a configuration change requires restarting the indexing process. + * Simplified logic: only restart for critical changes that affect service functionality. + * + * CRITICAL CHANGES (require restart): + * - Provider changes (openai -> ollama, etc.) + * - Authentication changes (API keys, base URLs) + * - Vector dimension changes (model changes that affect embedding size) + * - Qdrant connection changes (URL, API key) + * - Feature enable/disable transitions + * + * MINOR CHANGES (no restart needed): + * - Search minimum score adjustments + * - UI-only settings + * - Non-functional configuration tweaks */ doesConfigChangeRequireRestart(prev: PreviousConfigSnapshot): boolean { const nowConfigured = this.isConfigured() - // Handle null/undefined values safely - use empty strings for consistency with loaded config + // Handle null/undefined values safely const prevEnabled = prev?.enabled ?? false const prevConfigured = prev?.configured ?? false const prevProvider = prev?.embedderProvider ?? "openai" - const prevModelId = prev?.modelId ?? undefined const prevOpenAiKey = prev?.openAiKey ?? "" const prevOllamaBaseUrl = prev?.ollamaBaseUrl ?? "" const prevOpenAiCompatibleBaseUrl = prev?.openAiCompatibleBaseUrl ?? "" const prevOpenAiCompatibleApiKey = prev?.openAiCompatibleApiKey ?? "" const prevOpenAiCompatibleModelDimension = prev?.openAiCompatibleModelDimension + const prevGeminiApiKey = prev?.geminiApiKey ?? "" const prevQdrantUrl = prev?.qdrantUrl ?? "" const prevQdrantApiKey = prev?.qdrantApiKey ?? "" @@ -203,52 +245,53 @@ export class CodeIndexConfigManager { return false } - // 4. Check for changes in relevant settings if the feature is enabled (or was enabled) + // 4. CRITICAL CHANGES - Always restart for these if (this.isEnabled || prevEnabled) { // Provider change if (prevProvider !== this.embedderProvider) { return true } - if (this._hasVectorDimensionChanged(prevProvider, prevModelId)) { + // Authentication changes (API keys) + const currentOpenAiKey = this.openAiOptions?.openAiNativeApiKey ?? "" + const currentOllamaBaseUrl = this.ollamaOptions?.ollamaBaseUrl ?? "" + const currentOpenAiCompatibleBaseUrl = this.openAiCompatibleOptions?.baseUrl ?? "" + const currentOpenAiCompatibleApiKey = this.openAiCompatibleOptions?.apiKey ?? "" + const currentOpenAiCompatibleModelDimension = this.openAiCompatibleOptions?.modelDimension + const currentGeminiApiKey = this.geminiOptions?.apiKey ?? "" + const currentQdrantUrl = this.qdrantUrl ?? "" + const currentQdrantApiKey = this.qdrantApiKey ?? "" + + if (prevOpenAiKey !== currentOpenAiKey) { return true } - // Authentication changes - if (this.embedderProvider === "openai") { - const currentOpenAiKey = this.openAiOptions?.openAiNativeApiKey ?? "" - if (prevOpenAiKey !== currentOpenAiKey) { - return true - } + if (prevOllamaBaseUrl !== currentOllamaBaseUrl) { + return true } - if (this.embedderProvider === "ollama") { - const currentOllamaBaseUrl = this.ollamaOptions?.ollamaBaseUrl ?? "" - if (prevOllamaBaseUrl !== currentOllamaBaseUrl) { - return true - } + if ( + prevOpenAiCompatibleBaseUrl !== currentOpenAiCompatibleBaseUrl || + prevOpenAiCompatibleApiKey !== currentOpenAiCompatibleApiKey + ) { + return true } - if (this.embedderProvider === "openai-compatible") { - const currentOpenAiCompatibleBaseUrl = this.openAiCompatibleOptions?.baseUrl ?? "" - const currentOpenAiCompatibleApiKey = this.openAiCompatibleOptions?.apiKey ?? "" - const currentOpenAiCompatibleModelDimension = this.openAiCompatibleOptions?.modelDimension - if ( - prevOpenAiCompatibleBaseUrl !== currentOpenAiCompatibleBaseUrl || - prevOpenAiCompatibleApiKey !== currentOpenAiCompatibleApiKey || - prevOpenAiCompatibleModelDimension !== currentOpenAiCompatibleModelDimension - ) { + // Check for OpenAI Compatible modelDimension changes + if (this.embedderProvider === "openai-compatible" || prevProvider === "openai-compatible") { + if (prevOpenAiCompatibleModelDimension !== currentOpenAiCompatibleModelDimension) { return true } } - // Qdrant configuration changes - const currentQdrantUrl = this.qdrantUrl ?? "" - const currentQdrantApiKey = this.qdrantApiKey ?? "" - if (prevQdrantUrl !== currentQdrantUrl || prevQdrantApiKey !== currentQdrantApiKey) { return true } + + // Vector dimension changes (still important for compatibility) + if (this._hasVectorDimensionChanged(prevProvider, prev?.modelId)) { + return true + } } return false @@ -292,9 +335,11 @@ export class CodeIndexConfigManager { openAiOptions: this.openAiOptions, ollamaOptions: this.ollamaOptions, openAiCompatibleOptions: this.openAiCompatibleOptions, + geminiOptions: this.geminiOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, - searchMinScore: this.searchMinScore, + searchMinScore: this.currentSearchMinScore, + searchMaxResults: this.currentSearchMaxResults, } } @@ -337,9 +382,26 @@ export class CodeIndexConfigManager { } /** - * Gets the configured minimum search score. + * Gets the configured minimum search score based on user setting, model-specific threshold, or fallback. + * Priority: 1) User setting, 2) Model-specific threshold, 3) Default DEFAULT_SEARCH_MIN_SCORE constant. + */ + public get currentSearchMinScore(): number { + // First check if user has configured a custom score threshold + if (this.searchMinScore !== undefined) { + return this.searchMinScore + } + + // Fall back to model-specific threshold + const currentModelId = this.modelId ?? getDefaultModelId(this.embedderProvider) + const modelSpecificThreshold = getModelScoreThreshold(this.embedderProvider, currentModelId) + return modelSpecificThreshold ?? DEFAULT_SEARCH_MIN_SCORE + } + + /** + * Gets the configured maximum search results. + * Returns user setting if configured, otherwise returns default. */ - public get currentSearchMinScore(): number | undefined { - return this.searchMinScore + public get currentSearchMaxResults(): number { + return this.searchMaxResults ?? DEFAULT_MAX_SEARCH_RESULTS } } diff --git a/src/services/code-index/constants/index.ts b/src/services/code-index/constants/index.ts index cbf694181781..c2567f5635b4 100644 --- a/src/services/code-index/constants/index.ts +++ b/src/services/code-index/constants/index.ts @@ -1,12 +1,14 @@ +import { CODEBASE_INDEX_DEFAULTS } from "@roo-code/types" + /**Parser */ export const MAX_BLOCK_CHARS = 1000 -export const MIN_BLOCK_CHARS = 100 +export const MIN_BLOCK_CHARS = 50 export const MIN_CHUNK_REMAINDER_CHARS = 200 // Minimum characters for the *next* chunk after a split export const MAX_CHARS_TOLERANCE_FACTOR = 1.15 // 15% tolerance for max chars /**Search */ -export const SEARCH_MIN_SCORE = 0.4 -export const MAX_SEARCH_RESULTS = 50 // Maximum number of search results to return +export const DEFAULT_SEARCH_MIN_SCORE = CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_MIN_SCORE +export const DEFAULT_MAX_SEARCH_RESULTS = CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_RESULTS /**File Watcher */ export const QDRANT_CODE_BLOCK_NAMESPACE = "f47ac10b-58cc-4372-a567-0e02b2c3d479" @@ -23,3 +25,6 @@ export const PARSING_CONCURRENCY = 10 export const MAX_BATCH_TOKENS = 100000 export const MAX_ITEM_TOKENS = 8191 export const BATCH_PROCESSING_CONCURRENCY = 10 + +/**Gemini Embedder */ +export const GEMINI_MAX_ITEM_TOKENS = 2048 diff --git a/src/services/code-index/embedders/__tests__/gemini.spec.ts b/src/services/code-index/embedders/__tests__/gemini.spec.ts new file mode 100644 index 000000000000..856f5bf7c62c --- /dev/null +++ b/src/services/code-index/embedders/__tests__/gemini.spec.ts @@ -0,0 +1,108 @@ +import { vitest, describe, it, expect, beforeEach } from "vitest" +import type { MockedClass } from "vitest" +import { GeminiEmbedder } from "../gemini" +import { OpenAICompatibleEmbedder } from "../openai-compatible" + +// Mock the OpenAICompatibleEmbedder +vitest.mock("../openai-compatible") + +const MockedOpenAICompatibleEmbedder = OpenAICompatibleEmbedder as MockedClass + +describe("GeminiEmbedder", () => { + let embedder: GeminiEmbedder + + beforeEach(() => { + vitest.clearAllMocks() + }) + + describe("constructor", () => { + it("should create an instance with correct fixed values passed to OpenAICompatibleEmbedder", () => { + // Arrange + const apiKey = "test-gemini-api-key" + + // Act + embedder = new GeminiEmbedder(apiKey) + + // Assert + expect(MockedOpenAICompatibleEmbedder).toHaveBeenCalledWith( + "https://generativelanguage.googleapis.com/v1beta/openai/", + apiKey, + "text-embedding-004", + 2048, + ) + }) + + it("should throw error when API key is not provided", () => { + // Act & Assert + expect(() => new GeminiEmbedder("")).toThrow("API key is required for Gemini embedder") + expect(() => new GeminiEmbedder(null as any)).toThrow("API key is required for Gemini embedder") + expect(() => new GeminiEmbedder(undefined as any)).toThrow("API key is required for Gemini embedder") + }) + }) + + describe("embedderInfo", () => { + it("should return correct embedder info with dimension 768", () => { + // Arrange + embedder = new GeminiEmbedder("test-api-key") + + // Act + const info = embedder.embedderInfo + + // Assert + expect(info).toEqual({ + name: "gemini", + }) + expect(GeminiEmbedder.dimension).toBe(768) + }) + }) + + describe("validateConfiguration", () => { + let mockValidateConfiguration: any + + beforeEach(() => { + mockValidateConfiguration = vitest.fn() + MockedOpenAICompatibleEmbedder.prototype.validateConfiguration = mockValidateConfiguration + }) + + it("should delegate validation to OpenAICompatibleEmbedder", async () => { + // Arrange + embedder = new GeminiEmbedder("test-api-key") + mockValidateConfiguration.mockResolvedValue({ valid: true }) + + // Act + const result = await embedder.validateConfiguration() + + // Assert + expect(mockValidateConfiguration).toHaveBeenCalled() + expect(result).toEqual({ valid: true }) + }) + + it("should pass through validation errors from OpenAICompatibleEmbedder", async () => { + // Arrange + embedder = new GeminiEmbedder("test-api-key") + mockValidateConfiguration.mockResolvedValue({ + valid: false, + error: "embeddings:validation.authenticationFailed", + }) + + // Act + const result = await embedder.validateConfiguration() + + // Assert + expect(mockValidateConfiguration).toHaveBeenCalled() + expect(result).toEqual({ + valid: false, + error: "embeddings:validation.authenticationFailed", + }) + }) + + it("should handle validation exceptions", async () => { + // Arrange + embedder = new GeminiEmbedder("test-api-key") + mockValidateConfiguration.mockRejectedValue(new Error("Validation failed")) + + // Act & Assert + await expect(embedder.validateConfiguration()).rejects.toThrow("Validation failed") + }) + }) +}) diff --git a/src/services/code-index/embedders/__tests__/ollama.spec.ts b/src/services/code-index/embedders/__tests__/ollama.spec.ts new file mode 100644 index 000000000000..30e605738870 --- /dev/null +++ b/src/services/code-index/embedders/__tests__/ollama.spec.ts @@ -0,0 +1,238 @@ +import { vitest, describe, it, expect, beforeEach, afterEach } from "vitest" +import type { MockedFunction } from "vitest" +import { CodeIndexOllamaEmbedder } from "../ollama" + +// Mock fetch +global.fetch = vitest.fn() as MockedFunction + +// Mock i18n +vitest.mock("../../../../i18n", () => ({ + t: (key: string, params?: Record) => { + const translations: Record = { + "embeddings:validation.serviceUnavailable": + "The embedder service is not available. Please ensure it is running and accessible.", + "embeddings:validation.modelNotAvailable": + "The specified model is not available. Please check your model configuration.", + "embeddings:validation.connectionFailed": + "Failed to connect to the embedder service. Please check your connection settings and ensure the service is running.", + "embeddings:validation.configurationError": "Invalid embedder configuration. Please review your settings.", + "embeddings:errors.ollama.serviceNotRunning": + "Ollama service is not running at {{baseUrl}}. Please start Ollama first.", + "embeddings:errors.ollama.serviceUnavailable": + "Ollama service is unavailable at {{baseUrl}}. HTTP status: {{status}}", + "embeddings:errors.ollama.modelNotFound": + "Model '{{model}}' not found. Available models: {{availableModels}}", + "embeddings:errors.ollama.modelNotEmbedding": "Model '{{model}}' is not embedding capable", + "embeddings:errors.ollama.hostNotFound": "Ollama host not found: {{baseUrl}}", + "embeddings:errors.ollama.connectionTimeout": "Connection to Ollama timed out at {{baseUrl}}", + } + // Handle parameter substitution + let result = translations[key] || key + if (params) { + Object.entries(params).forEach(([param, value]) => { + result = result.replace(new RegExp(`{{${param}}}`, "g"), String(value)) + }) + } + return result + }, +})) + +// Mock console methods +const consoleMocks = { + error: vitest.spyOn(console, "error").mockImplementation(() => {}), +} + +describe("CodeIndexOllamaEmbedder", () => { + let embedder: CodeIndexOllamaEmbedder + let mockFetch: MockedFunction + + beforeEach(() => { + vitest.clearAllMocks() + consoleMocks.error.mockClear() + + mockFetch = global.fetch as MockedFunction + + embedder = new CodeIndexOllamaEmbedder({ + ollamaModelId: "nomic-embed-text", + ollamaBaseUrl: "http://localhost:11434", + }) + }) + + afterEach(() => { + vitest.clearAllMocks() + }) + + describe("constructor", () => { + it("should initialize with provided options", () => { + expect(embedder.embedderInfo.name).toBe("ollama") + }) + + it("should use default values when not provided", () => { + const embedderWithDefaults = new CodeIndexOllamaEmbedder({}) + expect(embedderWithDefaults.embedderInfo.name).toBe("ollama") + }) + }) + + describe("validateConfiguration", () => { + it("should validate successfully when service is available and model exists", async () => { + // Mock successful /api/tags call + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: true, + status: 200, + json: () => + Promise.resolve({ + models: [{ name: "nomic-embed-text:latest" }, { name: "llama2:latest" }], + }), + } as Response), + ) + + // Mock successful /api/embed test call + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: true, + status: 200, + json: () => + Promise.resolve({ + embeddings: [[0.1, 0.2, 0.3]], + }), + } as Response), + ) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockFetch).toHaveBeenCalledTimes(2) + + // Check first call (GET /api/tags) + const firstCall = mockFetch.mock.calls[0] + expect(firstCall[0]).toBe("http://localhost:11434/api/tags") + expect(firstCall[1]?.method).toBe("GET") + expect(firstCall[1]?.headers).toEqual({ "Content-Type": "application/json" }) + expect(firstCall[1]?.signal).toBeDefined() // AbortSignal for timeout + + // Check second call (POST /api/embed) + const secondCall = mockFetch.mock.calls[1] + expect(secondCall[0]).toBe("http://localhost:11434/api/embed") + expect(secondCall[1]?.method).toBe("POST") + expect(secondCall[1]?.headers).toEqual({ "Content-Type": "application/json" }) + expect(secondCall[1]?.body).toBe(JSON.stringify({ model: "nomic-embed-text", input: ["test"] })) + expect(secondCall[1]?.signal).toBeDefined() // AbortSignal for timeout + }) + + it("should fail validation when service is not available", async () => { + mockFetch.mockRejectedValueOnce(new Error("ECONNREFUSED")) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("Connection to Ollama timed out at http://localhost:11434") + }) + + it("should fail validation when tags endpoint returns 404", async () => { + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: false, + status: 404, + } as Response), + ) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe( + "Ollama service is not running at http://localhost:11434. Please start Ollama first.", + ) + }) + + it("should fail validation when tags endpoint returns other error", async () => { + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: false, + status: 500, + } as Response), + ) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("Ollama service is unavailable at http://localhost:11434. HTTP status: 500") + }) + + it("should fail validation when model does not exist", async () => { + // Mock successful /api/tags call with different models + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: true, + status: 200, + json: () => + Promise.resolve({ + models: [{ name: "llama2:latest" }, { name: "mistral:latest" }], + }), + } as Response), + ) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe( + "Model 'nomic-embed-text' not found. Available models: llama2:latest, mistral:latest", + ) + }) + + it("should fail validation when model exists but doesn't support embeddings", async () => { + // Mock successful /api/tags call + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: true, + status: 200, + json: () => + Promise.resolve({ + models: [{ name: "nomic-embed-text" }], + }), + } as Response), + ) + + // Mock failed /api/embed test call + mockFetch.mockImplementationOnce(() => + Promise.resolve({ + ok: false, + status: 400, + } as Response), + ) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("Model 'nomic-embed-text' is not embedding capable") + }) + + it("should handle ECONNREFUSED errors", async () => { + mockFetch.mockRejectedValueOnce(new Error("ECONNREFUSED")) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("Connection to Ollama timed out at http://localhost:11434") + }) + + it("should handle ENOTFOUND errors", async () => { + mockFetch.mockRejectedValueOnce(new Error("ENOTFOUND")) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("Ollama host not found: http://localhost:11434") + }) + + it("should handle generic network errors", async () => { + mockFetch.mockRejectedValueOnce(new Error("Network timeout")) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("Network timeout") + }) + }) +}) diff --git a/src/services/code-index/embedders/__tests__/openai-compatible.spec.ts b/src/services/code-index/embedders/__tests__/openai-compatible.spec.ts index 271d68cc205a..d1f45d75cab6 100644 --- a/src/services/code-index/embedders/__tests__/openai-compatible.spec.ts +++ b/src/services/code-index/embedders/__tests__/openai-compatible.spec.ts @@ -6,6 +6,9 @@ import { MAX_ITEM_TOKENS, INITIAL_RETRY_DELAY_MS } from "../../constants" // Mock the OpenAI SDK vitest.mock("openai") +// Mock global fetch +global.fetch = vitest.fn() + // Mock i18n vitest.mock("../../../../i18n", () => ({ t: (key: string, params?: Record) => { @@ -613,5 +616,399 @@ describe("OpenAICompatibleEmbedder", () => { expect(returnedArray).toEqual([0.25, 0.5, 0.75, 1.0]) }) }) + + /** + * Test Azure OpenAI compatibility with helper functions for conciseness + */ + describe("Azure OpenAI compatibility", () => { + const azureUrl = + "https://myresource.openai.azure.com/openai/deployments/mymodel/embeddings?api-version=2024-02-01" + const baseUrl = "https://api.openai.com/v1" + + // Helper to create mock fetch response + const createMockResponse = (data: any, status = 200, ok = true) => ({ + ok, + status, + json: vitest.fn().mockResolvedValue(data), + text: vitest.fn().mockResolvedValue(status === 200 ? "" : "Error message"), + }) + + // Helper to create base64 embedding + const createBase64Embedding = (values: number[]) => { + const embedding = new Float32Array(values) + return Buffer.from(embedding.buffer).toString("base64") + } + + // Helper to verify embedding values with floating-point tolerance + const expectEmbeddingValues = (actual: number[], expected: number[]) => { + expect(actual).toHaveLength(expected.length) + expected.forEach((val, i) => expect(actual[i]).toBeCloseTo(val, 5)) + } + + beforeEach(() => { + vitest.clearAllMocks() + ;(global.fetch as MockedFunction).mockReset() + }) + + describe("URL detection", () => { + it.each([ + [ + "https://myresource.openai.azure.com/openai/deployments/mymodel/embeddings?api-version=2024-02-01", + true, + ], + ["https://myresource.openai.azure.com/openai/deployments/text-embedding-ada-002/embeddings", true], + ["https://api.openai.com/v1", false], + ["https://api.example.com", false], + ["http://localhost:8080", false], + ])("should detect URL type correctly: %s -> %s", (url, expected) => { + const embedder = new OpenAICompatibleEmbedder(url, testApiKey, testModelId) + const isFullUrl = (embedder as any).isFullEndpointUrl(url) + expect(isFullUrl).toBe(expected) + }) + + // Edge cases where 'embeddings' or 'deployments' appear in non-endpoint contexts + it("should return false for URLs with 'embeddings' in non-endpoint contexts", () => { + const testUrls = [ + "https://api.example.com/embeddings-service/v1", + "https://embeddings.example.com/api", + "https://api.example.com/v1/embeddings-api", + "https://my-embeddings-provider.com/v1", + ] + + testUrls.forEach((url) => { + const embedder = new OpenAICompatibleEmbedder(url, testApiKey, testModelId) + const isFullUrl = (embedder as any).isFullEndpointUrl(url) + expect(isFullUrl).toBe(false) + }) + }) + + it("should return false for URLs with 'deployments' in non-endpoint contexts", () => { + const testUrls = [ + "https://deployments.example.com/api", + "https://api.deployments.com/v1", + "https://my-deployments-service.com/api/v1", + "https://deployments-manager.example.com", + ] + + testUrls.forEach((url) => { + const embedder = new OpenAICompatibleEmbedder(url, testApiKey, testModelId) + const isFullUrl = (embedder as any).isFullEndpointUrl(url) + expect(isFullUrl).toBe(false) + }) + }) + + it("should correctly identify actual endpoint URLs", () => { + const endpointUrls = [ + "https://api.example.com/v1/embeddings", + "https://api.example.com/v1/embeddings?api-version=2024", + "https://myresource.openai.azure.com/openai/deployments/mymodel/embeddings", + "https://api.example.com/embed", + "https://api.example.com/embed?version=1", + ] + + endpointUrls.forEach((url) => { + const embedder = new OpenAICompatibleEmbedder(url, testApiKey, testModelId) + const isFullUrl = (embedder as any).isFullEndpointUrl(url) + expect(isFullUrl).toBe(true) + }) + }) + }) + + describe("direct HTTP requests", () => { + it("should use direct fetch for Azure URLs and SDK for base URLs", async () => { + const testTexts = ["Test text"] + const base64String = createBase64Embedding([0.1, 0.2, 0.3]) + + // Test Azure URL (direct fetch) + const azureEmbedder = new OpenAICompatibleEmbedder(azureUrl, testApiKey, testModelId) + const mockFetchResponse = createMockResponse({ + data: [{ embedding: base64String }], + usage: { prompt_tokens: 10, total_tokens: 15 }, + }) + ;(global.fetch as MockedFunction).mockResolvedValue(mockFetchResponse as any) + + const azureResult = await azureEmbedder.createEmbeddings(testTexts) + expect(global.fetch).toHaveBeenCalledWith( + azureUrl, + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + "api-key": testApiKey, + Authorization: `Bearer ${testApiKey}`, + }), + }), + ) + expect(mockEmbeddingsCreate).not.toHaveBeenCalled() + expectEmbeddingValues(azureResult.embeddings[0], [0.1, 0.2, 0.3]) + + // Reset and test base URL (SDK) + vitest.clearAllMocks() + const baseEmbedder = new OpenAICompatibleEmbedder(baseUrl, testApiKey, testModelId) + mockEmbeddingsCreate.mockResolvedValue({ + data: [{ embedding: [0.4, 0.5, 0.6] }], + usage: { prompt_tokens: 10, total_tokens: 15 }, + }) + + const baseResult = await baseEmbedder.createEmbeddings(testTexts) + expect(mockEmbeddingsCreate).toHaveBeenCalled() + expect(global.fetch).not.toHaveBeenCalled() + expect(baseResult.embeddings[0]).toEqual([0.4, 0.5, 0.6]) + }) + + it.each([ + [401, "Authentication failed. Please check your API key."], + [500, "Failed to create embeddings after 3 attempts"], + ])("should handle HTTP errors: %d", async (status, expectedMessage) => { + const embedder = new OpenAICompatibleEmbedder(azureUrl, testApiKey, testModelId) + const mockResponse = createMockResponse({}, status, false) + ;(global.fetch as MockedFunction).mockResolvedValue(mockResponse as any) + + await expect(embedder.createEmbeddings(["test"])).rejects.toThrow(expectedMessage) + }) + + it("should handle rate limiting with retries", async () => { + vitest.useFakeTimers() + const embedder = new OpenAICompatibleEmbedder(azureUrl, testApiKey, testModelId) + const base64String = createBase64Embedding([0.1, 0.2, 0.3]) + + ;(global.fetch as MockedFunction) + .mockResolvedValueOnce(createMockResponse({}, 429, false) as any) + .mockResolvedValueOnce(createMockResponse({}, 429, false) as any) + .mockResolvedValueOnce( + createMockResponse({ + data: [{ embedding: base64String }], + usage: { prompt_tokens: 10, total_tokens: 15 }, + }) as any, + ) + + const resultPromise = embedder.createEmbeddings(["test"]) + await vitest.advanceTimersByTimeAsync(INITIAL_RETRY_DELAY_MS * 3) + const result = await resultPromise + + expect(global.fetch).toHaveBeenCalledTimes(3) + expect(console.warn).toHaveBeenCalledWith(expect.stringContaining("Rate limit hit")) + expectEmbeddingValues(result.embeddings[0], [0.1, 0.2, 0.3]) + vitest.useRealTimers() + }) + + it("should handle multiple embeddings and network errors", async () => { + const embedder = new OpenAICompatibleEmbedder(azureUrl, testApiKey, testModelId) + + // Test multiple embeddings + const base64_1 = createBase64Embedding([0.25, 0.5]) + const base64_2 = createBase64Embedding([0.75, 1.0]) + const mockResponse = createMockResponse({ + data: [{ embedding: base64_1 }, { embedding: base64_2 }], + usage: { prompt_tokens: 20, total_tokens: 30 }, + }) + ;(global.fetch as MockedFunction).mockResolvedValue(mockResponse as any) + + const result = await embedder.createEmbeddings(["test1", "test2"]) + expect(result.embeddings).toHaveLength(2) + expectEmbeddingValues(result.embeddings[0], [0.25, 0.5]) + expectEmbeddingValues(result.embeddings[1], [0.75, 1.0]) + + // Test network error + const networkError = new Error("Network failed") + ;(global.fetch as MockedFunction).mockRejectedValue(networkError) + await expect(embedder.createEmbeddings(["test"])).rejects.toThrow( + "Failed to create embeddings after 3 attempts", + ) + }) + }) + }) + }) + + describe("URL detection", () => { + it("should detect Azure deployment URLs as full endpoints", async () => { + const embedder = new OpenAICompatibleEmbedder( + "https://myinstance.openai.azure.com/openai/deployments/my-deployment/embeddings?api-version=2023-05-15", + "test-key", + ) + + // The private method is tested indirectly through the createEmbeddings behavior + // If it's detected as a full URL, it will make a direct HTTP request + const mockFetch = vitest.fn().mockResolvedValue({ + ok: true, + json: async () => ({ + data: [{ embedding: [0.1, 0.2] }], + usage: { prompt_tokens: 10, total_tokens: 15 }, + }), + }) + global.fetch = mockFetch + + await embedder.createEmbeddings(["test"]) + + // Should make direct HTTP request to the full URL + expect(mockFetch).toHaveBeenCalledWith( + "https://myinstance.openai.azure.com/openai/deployments/my-deployment/embeddings?api-version=2023-05-15", + expect.any(Object), + ) + }) + + it("should detect /embed endpoints as full URLs", async () => { + const embedder = new OpenAICompatibleEmbedder("https://api.example.com/v1/embed", "test-key") + + const mockFetch = vitest.fn().mockResolvedValue({ + ok: true, + json: async () => ({ + data: [{ embedding: [0.1, 0.2] }], + usage: { prompt_tokens: 10, total_tokens: 15 }, + }), + }) + global.fetch = mockFetch + + await embedder.createEmbeddings(["test"]) + + // Should make direct HTTP request to the full URL + expect(mockFetch).toHaveBeenCalledWith("https://api.example.com/v1/embed", expect.any(Object)) + }) + + it("should treat base URLs without endpoint patterns as SDK URLs", async () => { + const embedder = new OpenAICompatibleEmbedder("https://api.openai.com/v1", "test-key") + + // Mock the OpenAI SDK's embeddings.create method + const mockCreate = vitest.fn().mockResolvedValue({ + data: [{ embedding: [0.1, 0.2] }], + usage: { prompt_tokens: 10, total_tokens: 15 }, + }) + embedder["embeddingsClient"].embeddings = { + create: mockCreate, + } as any + + await embedder.createEmbeddings(["test"]) + + // Should use SDK which will append /embeddings + expect(mockCreate).toHaveBeenCalled() + }) + }) + + describe("validateConfiguration", () => { + let embedder: OpenAICompatibleEmbedder + let mockFetch: MockedFunction + + beforeEach(() => { + vitest.clearAllMocks() + // Reset and re-assign the global fetch mock + global.fetch = vitest.fn() + mockFetch = global.fetch as MockedFunction + }) + + it("should validate successfully with valid configuration and base URL", async () => { + embedder = new OpenAICompatibleEmbedder(testBaseUrl, testApiKey, testModelId) + + const mockResponse = { + data: [{ embedding: [0.1, 0.2, 0.3] }], + usage: { prompt_tokens: 2, total_tokens: 2 }, + } + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: testModelId, + encoding_format: "base64", + }) + }) + + it("should validate successfully with full endpoint URL", async () => { + const fullUrl = "https://api.example.com/v1/embeddings" + embedder = new OpenAICompatibleEmbedder(fullUrl, testApiKey, testModelId) + + mockFetch.mockResolvedValueOnce({ + ok: true, + status: 200, + json: async () => ({ + data: [{ embedding: [0.1, 0.2, 0.3] }], + usage: { prompt_tokens: 2, total_tokens: 2 }, + }), + text: async () => "", + } as any) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockFetch).toHaveBeenCalledWith( + fullUrl, + expect.objectContaining({ + method: "POST", + headers: expect.objectContaining({ + Authorization: `Bearer ${testApiKey}`, + }), + }), + ) + }) + + it("should fail validation with authentication error", async () => { + embedder = new OpenAICompatibleEmbedder(testBaseUrl, testApiKey, testModelId) + + const authError = new Error("Invalid API key") + ;(authError as any).status = 401 + mockEmbeddingsCreate.mockRejectedValue(authError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.authenticationFailed") + }) + + it("should fail validation with connection error", async () => { + embedder = new OpenAICompatibleEmbedder(testBaseUrl, testApiKey, testModelId) + + const connectionError = new Error("ECONNREFUSED") + mockEmbeddingsCreate.mockRejectedValue(connectionError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.connectionFailed") + }) + + it("should fail validation with invalid endpoint for full URL", async () => { + const fullUrl = "https://api.example.com/v1/embeddings" + embedder = new OpenAICompatibleEmbedder(fullUrl, testApiKey, testModelId) + + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 404, + json: async () => ({ error: "Not found" }), + text: async () => "Not found", + } as any) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.invalidEndpoint") + }) + + it("should fail validation with rate limit error", async () => { + embedder = new OpenAICompatibleEmbedder(testBaseUrl, testApiKey, testModelId) + + const rateLimitError = new Error("Rate limit exceeded") + ;(rateLimitError as any).status = 429 + mockEmbeddingsCreate.mockRejectedValue(rateLimitError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.serviceUnavailable") + }) + + it("should fail validation with generic error", async () => { + embedder = new OpenAICompatibleEmbedder(testBaseUrl, testApiKey, testModelId) + + const genericError = new Error("Unknown error") + ;(genericError as any).status = 500 + mockEmbeddingsCreate.mockRejectedValue(genericError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.configurationError") + }) }) }) diff --git a/src/services/code-index/embedders/__tests__/openai.spec.ts b/src/services/code-index/embedders/__tests__/openai.spec.ts index c93c0498442e..3f46fc248b64 100644 --- a/src/services/code-index/embedders/__tests__/openai.spec.ts +++ b/src/services/code-index/embedders/__tests__/openai.spec.ts @@ -464,4 +464,66 @@ describe("OpenAiEmbedder", () => { }) }) }) + + describe("validateConfiguration", () => { + it("should validate successfully with valid configuration", async () => { + const mockResponse = { + data: [{ embedding: [0.1, 0.2, 0.3] }], + usage: { prompt_tokens: 2, total_tokens: 2 }, + } + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: "text-embedding-3-small", + }) + }) + + it("should fail validation with authentication error", async () => { + const authError = new Error("Invalid API key") + ;(authError as any).status = 401 + mockEmbeddingsCreate.mockRejectedValue(authError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.authenticationFailed") + }) + + it("should fail validation with rate limit error", async () => { + const rateLimitError = new Error("Rate limit exceeded") + ;(rateLimitError as any).status = 429 + mockEmbeddingsCreate.mockRejectedValue(rateLimitError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.serviceUnavailable") + }) + + it("should fail validation with connection error", async () => { + const connectionError = new Error("ECONNREFUSED") + mockEmbeddingsCreate.mockRejectedValue(connectionError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.connectionFailed") + }) + + it("should fail validation with generic error", async () => { + const genericError = new Error("Unknown error") + ;(genericError as any).status = 500 + mockEmbeddingsCreate.mockRejectedValue(genericError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.configurationError") + }) + }) }) diff --git a/src/services/code-index/embedders/gemini.ts b/src/services/code-index/embedders/gemini.ts new file mode 100644 index 000000000000..f99ae4c1d7ad --- /dev/null +++ b/src/services/code-index/embedders/gemini.ts @@ -0,0 +1,74 @@ +import { OpenAICompatibleEmbedder } from "./openai-compatible" +import { IEmbedder, EmbeddingResponse, EmbedderInfo } from "../interfaces/embedder" +import { GEMINI_MAX_ITEM_TOKENS } from "../constants" + +/** + * Gemini embedder implementation that wraps the OpenAI Compatible embedder + * with fixed configuration for Google's Gemini embedding API. + * + * Fixed values: + * - Base URL: https://generativelanguage.googleapis.com/v1beta/openai/ + * - Model: text-embedding-004 + * - Dimension: 768 + */ +export class GeminiEmbedder implements IEmbedder { + private readonly openAICompatibleEmbedder: OpenAICompatibleEmbedder + private static readonly GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/" + private static readonly GEMINI_MODEL = "text-embedding-004" + private static readonly GEMINI_DIMENSION = 768 + + /** + * Creates a new Gemini embedder + * @param apiKey The Gemini API key for authentication + */ + constructor(apiKey: string) { + if (!apiKey) { + throw new Error("API key is required for Gemini embedder") + } + + // Create an OpenAI Compatible embedder with Gemini's fixed configuration + this.openAICompatibleEmbedder = new OpenAICompatibleEmbedder( + GeminiEmbedder.GEMINI_BASE_URL, + apiKey, + GeminiEmbedder.GEMINI_MODEL, + GEMINI_MAX_ITEM_TOKENS, + ) + } + + /** + * Creates embeddings for the given texts using Gemini's embedding API + * @param texts Array of text strings to embed + * @param model Optional model identifier (ignored - always uses text-embedding-004) + * @returns Promise resolving to embedding response + */ + async createEmbeddings(texts: string[], model?: string): Promise { + // Always use the fixed Gemini model, ignoring any passed model parameter + return this.openAICompatibleEmbedder.createEmbeddings(texts, GeminiEmbedder.GEMINI_MODEL) + } + + /** + * Validates the Gemini embedder configuration by delegating to the underlying OpenAI-compatible embedder + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + // Delegate validation to the OpenAI-compatible embedder + // The error messages will be specific to Gemini since we're using Gemini's base URL + return this.openAICompatibleEmbedder.validateConfiguration() + } + + /** + * Returns information about this embedder + */ + get embedderInfo(): EmbedderInfo { + return { + name: "gemini", + } + } + + /** + * Gets the fixed dimension for Gemini embeddings + */ + static get dimension(): number { + return GeminiEmbedder.GEMINI_DIMENSION + } +} diff --git a/src/services/code-index/embedders/ollama.ts b/src/services/code-index/embedders/ollama.ts index 748ed188a409..f9001a743ed7 100644 --- a/src/services/code-index/embedders/ollama.ts +++ b/src/services/code-index/embedders/ollama.ts @@ -1,6 +1,9 @@ import { ApiHandlerOptions } from "../../../shared/api" import { EmbedderInfo, EmbeddingResponse, IEmbedder } from "../interfaces" +import { getModelQueryPrefix } from "../../../shared/embeddingModels" +import { MAX_ITEM_TOKENS } from "../constants" import { t } from "../../../i18n" +import { withValidationErrorHandling } from "../shared/validation-helpers" /** * Implements the IEmbedder interface using a local Ollama instance. @@ -25,6 +28,31 @@ export class CodeIndexOllamaEmbedder implements IEmbedder { const modelToUse = model || this.defaultModelId const url = `${this.baseUrl}/api/embed` // Endpoint as specified + // Apply model-specific query prefix if required + const queryPrefix = getModelQueryPrefix("ollama", modelToUse) + const processedTexts = queryPrefix + ? texts.map((text, index) => { + // Prevent double-prefixing + if (text.startsWith(queryPrefix)) { + return text + } + const prefixedText = `${queryPrefix}${text}` + const estimatedTokens = Math.ceil(prefixedText.length / 4) + if (estimatedTokens > MAX_ITEM_TOKENS) { + console.warn( + t("embeddings:textWithPrefixExceedsTokenLimit", { + index, + estimatedTokens, + maxTokens: MAX_ITEM_TOKENS, + }), + ) + // Return original text if adding prefix would exceed limit + return text + } + return prefixedText + }) + : texts + try { // Note: Standard Ollama API uses 'prompt' for single text, not 'input' for array. // Implementing based on user's specific request structure. @@ -35,7 +63,7 @@ export class CodeIndexOllamaEmbedder implements IEmbedder { }, body: JSON.stringify({ model: modelToUse, - input: texts, // Using 'input' as requested + input: processedTexts, // Using 'input' as requested }), }) @@ -74,6 +102,127 @@ export class CodeIndexOllamaEmbedder implements IEmbedder { } } + /** + * Validates the Ollama embedder configuration by checking service availability and model existence + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + return withValidationErrorHandling( + async () => { + // First check if Ollama service is running by trying to list models + const modelsUrl = `${this.baseUrl}/api/tags` + + // Add timeout to prevent indefinite hanging + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 5000) // 5 second timeout + + const modelsResponse = await fetch(modelsUrl, { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + signal: controller.signal, + }) + clearTimeout(timeoutId) + + if (!modelsResponse.ok) { + if (modelsResponse.status === 404) { + return { + valid: false, + error: t("embeddings:errors.ollama.serviceNotRunning", { baseUrl: this.baseUrl }), + } + } + return { + valid: false, + error: t("embeddings:errors.ollama.serviceUnavailable", { + baseUrl: this.baseUrl, + status: modelsResponse.status, + }), + } + } + + // Check if the specific model exists + const modelsData = await modelsResponse.json() + const models = modelsData.models || [] + + // Check both with and without :latest suffix + const modelExists = models.some((m: any) => { + const modelName = m.name || "" + return ( + modelName === this.defaultModelId || + modelName === `${this.defaultModelId}:latest` || + modelName === this.defaultModelId.replace(":latest", "") + ) + }) + + if (!modelExists) { + const availableModels = models.map((m: any) => m.name).join(", ") + return { + valid: false, + error: t("embeddings:errors.ollama.modelNotFound", { + model: this.defaultModelId, + availableModels, + }), + } + } + + // Try a test embedding to ensure the model works for embeddings + const testUrl = `${this.baseUrl}/api/embed` + + // Add timeout for test request too + const testController = new AbortController() + const testTimeoutId = setTimeout(() => testController.abort(), 5000) + + const testResponse = await fetch(testUrl, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: this.defaultModelId, + input: ["test"], + }), + signal: testController.signal, + }) + clearTimeout(testTimeoutId) + + if (!testResponse.ok) { + return { + valid: false, + error: t("embeddings:errors.ollama.modelNotEmbedding", { model: this.defaultModelId }), + } + } + + return { valid: true } + }, + "ollama", + { + beforeStandardHandling: (error: any) => { + // Handle Ollama-specific connection errors + if (error?.message === "ECONNREFUSED") { + return { + valid: false, + error: t("embeddings:errors.ollama.connectionTimeout", { baseUrl: this.baseUrl }), + } + } else if (error?.message === "ENOTFOUND") { + return { + valid: false, + error: t("embeddings:errors.ollama.hostNotFound", { baseUrl: this.baseUrl }), + } + } else if (error?.name === "AbortError") { + // Handle timeout + return { + valid: false, + error: t("embeddings:errors.ollama.connectionTimeout", { baseUrl: this.baseUrl }), + } + } + // Let standard handling take over + return undefined + }, + }, + ) + } + get embedderInfo(): EmbedderInfo { return { name: "ollama", diff --git a/src/services/code-index/embedders/openai-compatible.ts b/src/services/code-index/embedders/openai-compatible.ts index 0983cc297f7b..b378bbe7acdd 100644 --- a/src/services/code-index/embedders/openai-compatible.ts +++ b/src/services/code-index/embedders/openai-compatible.ts @@ -6,8 +6,9 @@ import { MAX_BATCH_RETRIES as MAX_RETRIES, INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS, } from "../constants" -import { getDefaultModelId } from "../../../shared/embeddingModels" +import { getDefaultModelId, getModelQueryPrefix } from "../../../shared/embeddingModels" import { t } from "../../../i18n" +import { withValidationErrorHandling, HttpError, formatEmbeddingError } from "../shared/validation-helpers" interface EmbeddingItem { embedding: string | number[] @@ -26,17 +27,23 @@ interface OpenAIEmbeddingResponse { * OpenAI Compatible implementation of the embedder interface with batching and rate limiting. * This embedder allows using any OpenAI-compatible API endpoint by specifying a custom baseURL. */ + export class OpenAICompatibleEmbedder implements IEmbedder { private embeddingsClient: OpenAI private readonly defaultModelId: string + private readonly baseUrl: string + private readonly apiKey: string + private readonly isFullUrl: boolean + private readonly maxItemTokens: number /** * Creates a new OpenAI Compatible embedder * @param baseUrl The base URL for the OpenAI-compatible API endpoint * @param apiKey The API key for authentication * @param modelId Optional model identifier (defaults to "text-embedding-3-small") + * @param maxItemTokens Optional maximum tokens per item (defaults to MAX_ITEM_TOKENS) */ - constructor(baseUrl: string, apiKey: string, modelId?: string) { + constructor(baseUrl: string, apiKey: string, modelId?: string, maxItemTokens?: number) { if (!baseUrl) { throw new Error("Base URL is required for OpenAI Compatible embedder") } @@ -44,11 +51,16 @@ export class OpenAICompatibleEmbedder implements IEmbedder { throw new Error("API key is required for OpenAI Compatible embedder") } + this.baseUrl = baseUrl + this.apiKey = apiKey this.embeddingsClient = new OpenAI({ baseURL: baseUrl, apiKey: apiKey, }) this.defaultModelId = modelId || getDefaultModelId("openai-compatible") + // Cache the URL type check for performance + this.isFullUrl = this.isFullEndpointUrl(baseUrl) + this.maxItemTokens = maxItemTokens || MAX_ITEM_TOKENS } /** @@ -59,9 +71,35 @@ export class OpenAICompatibleEmbedder implements IEmbedder { */ async createEmbeddings(texts: string[], model?: string): Promise { const modelToUse = model || this.defaultModelId + + // Apply model-specific query prefix if required + const queryPrefix = getModelQueryPrefix("openai-compatible", modelToUse) + const processedTexts = queryPrefix + ? texts.map((text, index) => { + // Prevent double-prefixing + if (text.startsWith(queryPrefix)) { + return text + } + const prefixedText = `${queryPrefix}${text}` + const estimatedTokens = Math.ceil(prefixedText.length / 4) + if (estimatedTokens > MAX_ITEM_TOKENS) { + console.warn( + t("embeddings:textWithPrefixExceedsTokenLimit", { + index, + estimatedTokens, + maxTokens: MAX_ITEM_TOKENS, + }), + ) + // Return original text if adding prefix would exceed limit + return text + } + return prefixedText + }) + : texts + const allEmbeddings: number[][] = [] const usage = { promptTokens: 0, totalTokens: 0 } - const remainingTexts = [...texts] + const remainingTexts = [...processedTexts] while (remainingTexts.length > 0) { const currentBatch: string[] = [] @@ -72,12 +110,12 @@ export class OpenAICompatibleEmbedder implements IEmbedder { const text = remainingTexts[i] const itemTokens = Math.ceil(text.length / 4) - if (itemTokens > MAX_ITEM_TOKENS) { + if (itemTokens > this.maxItemTokens) { console.warn( t("embeddings:textExceedsTokenLimit", { index: i, itemTokens, - maxTokens: MAX_ITEM_TOKENS, + maxTokens: this.maxItemTokens, }), ) processedIndices.push(i) @@ -109,6 +147,82 @@ export class OpenAICompatibleEmbedder implements IEmbedder { return { embeddings: allEmbeddings, usage } } + /** + * Determines if the provided URL is a full endpoint URL or a base URL that needs the endpoint appended by the SDK. + * Uses smart pattern matching for known providers while accepting we can't cover all possible patterns. + * @param url The URL to check + * @returns true if it's a full endpoint URL, false if it's a base URL + */ + private isFullEndpointUrl(url: string): boolean { + // Known patterns for major providers + const patterns = [ + // Azure OpenAI: /deployments/{deployment-name}/embeddings + /\/deployments\/[^\/]+\/embeddings(\?|$)/, + // Direct endpoints: ends with /embeddings (before query params) + /\/embeddings(\?|$)/, + // Some providers use /embed instead of /embeddings + /\/embed(\?|$)/, + ] + + return patterns.some((pattern) => pattern.test(url)) + } + + /** + * Makes a direct HTTP request to the embeddings endpoint + * Used when the user provides a full endpoint URL (e.g., Azure OpenAI with query parameters) + * @param url The full endpoint URL + * @param batchTexts Array of texts to embed + * @param model Model identifier to use + * @returns Promise resolving to OpenAI-compatible response + */ + private async makeDirectEmbeddingRequest( + url: string, + batchTexts: string[], + model: string, + ): Promise { + const response = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json", + // Azure OpenAI uses 'api-key' header, while OpenAI uses 'Authorization' + // We'll try 'api-key' first for Azure compatibility + "api-key": this.apiKey, + Authorization: `Bearer ${this.apiKey}`, + }, + body: JSON.stringify({ + input: batchTexts, + model: model, + encoding_format: "base64", + }), + }) + + if (!response || !response.ok) { + const status = response?.status || 0 + let errorText = "No response" + try { + if (response && typeof response.text === "function") { + errorText = await response.text() + } else if (response) { + errorText = `Error ${status}` + } + } catch { + // Ignore text parsing errors + errorText = `Error ${status}` + } + const error = new Error(`HTTP ${status}: ${errorText}`) as HttpError + error.status = status || response?.status || 0 + throw error + } + + try { + return await response.json() + } catch (e) { + const error = new Error(`Failed to parse response JSON`) as HttpError + error.status = response.status + throw error + } + } + /** * Helper method to handle batch embedding with retries and exponential backoff * @param batchTexts Array of texts to embed in this batch @@ -119,16 +233,27 @@ export class OpenAICompatibleEmbedder implements IEmbedder { batchTexts: string[], model: string, ): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> { + // Use cached value for performance + const isFullUrl = this.isFullUrl + for (let attempts = 0; attempts < MAX_RETRIES; attempts++) { try { - const response = (await this.embeddingsClient.embeddings.create({ - input: batchTexts, - model: model, - // OpenAI package (as of v4.78.1) has a parsing issue that truncates embedding dimensions to 256 - // when processing numeric arrays, which breaks compatibility with models using larger dimensions. - // By requesting base64 encoding, we bypass the package's parser and handle decoding ourselves. - encoding_format: "base64", - })) as OpenAIEmbeddingResponse + let response: OpenAIEmbeddingResponse + + if (isFullUrl) { + // Use direct HTTP request for full endpoint URLs + response = await this.makeDirectEmbeddingRequest(this.baseUrl, batchTexts, model) + } else { + // Use OpenAI SDK for base URLs + response = (await this.embeddingsClient.embeddings.create({ + input: batchTexts, + model: model, + // OpenAI package (as of v4.78.1) has a parsing issue that truncates embedding dimensions to 256 + // when processing numeric arrays, which breaks compatibility with models using larger dimensions. + // By requesting base64 encoding, we bypass the package's parser and handle decoding ourselves. + encoding_format: "base64", + })) as OpenAIEmbeddingResponse + } // Convert base64 embeddings to float32 arrays const processedEmbeddings = response.data.map((item: EmbeddingItem) => { @@ -158,11 +283,12 @@ export class OpenAICompatibleEmbedder implements IEmbedder { totalTokens: response.usage?.total_tokens || 0, }, } - } catch (error: any) { - const isRateLimitError = error?.status === 429 + } catch (error) { const hasMoreAttempts = attempts < MAX_RETRIES - 1 - if (isRateLimitError && hasMoreAttempts) { + // Check if it's a rate limit error + const httpError = error as HttpError + if (httpError?.status === 429 && hasMoreAttempts) { const delayMs = INITIAL_DELAY_MS * Math.pow(2, attempts) console.warn( t("embeddings:rateLimitRetry", { @@ -178,35 +304,48 @@ export class OpenAICompatibleEmbedder implements IEmbedder { // Log the error for debugging console.error(`OpenAI Compatible embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error) - // Provide more context in the error message using robust error extraction - let errorMessage = t("embeddings:unknownError") - if (error?.message) { - errorMessage = error.message - } else if (typeof error === "string") { - errorMessage = error - } else if (error && typeof error.toString === "function") { - try { - errorMessage = error.toString() - } catch { - errorMessage = t("embeddings:unknownError") - } - } + // Format and throw the error + throw formatEmbeddingError(error, MAX_RETRIES) + } + } - const statusCode = error?.status || error?.response?.status + throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + } - if (statusCode === 401) { - throw new Error(t("embeddings:authenticationFailed")) - } else if (statusCode) { - throw new Error( - t("embeddings:failedWithStatus", { attempts: MAX_RETRIES, statusCode, errorMessage }), - ) - } else { - throw new Error(t("embeddings:failedWithError", { attempts: MAX_RETRIES, errorMessage })) + /** + * Validates the OpenAI-compatible embedder configuration by testing endpoint connectivity and API key + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + return withValidationErrorHandling(async () => { + // Test with a minimal embedding request + const testTexts = ["test"] + const modelToUse = this.defaultModelId + + let response: OpenAIEmbeddingResponse + + if (this.isFullUrl) { + // Test direct HTTP request for full endpoint URLs + response = await this.makeDirectEmbeddingRequest(this.baseUrl, testTexts, modelToUse) + } else { + // Test using OpenAI SDK for base URLs + response = (await this.embeddingsClient.embeddings.create({ + input: testTexts, + model: modelToUse, + encoding_format: "base64", + })) as OpenAIEmbeddingResponse + } + + // Check if we got a valid response + if (!response?.data || response.data.length === 0) { + return { + valid: false, + error: "embeddings:validation.invalidResponse", } } - } - throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + return { valid: true } + }, "openai-compatible") } /** diff --git a/src/services/code-index/embedders/openai.ts b/src/services/code-index/embedders/openai.ts index d0dc132df7a7..a620edc307a1 100644 --- a/src/services/code-index/embedders/openai.ts +++ b/src/services/code-index/embedders/openai.ts @@ -8,7 +8,9 @@ import { MAX_BATCH_RETRIES as MAX_RETRIES, INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS, } from "../constants" +import { getModelQueryPrefix } from "../../../shared/embeddingModels" import { t } from "../../../i18n" +import { withValidationErrorHandling, formatEmbeddingError, HttpError } from "../shared/validation-helpers" /** * OpenAI implementation of the embedder interface with batching and rate limiting @@ -36,9 +38,35 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder { */ async createEmbeddings(texts: string[], model?: string): Promise { const modelToUse = model || this.defaultModelId + + // Apply model-specific query prefix if required + const queryPrefix = getModelQueryPrefix("openai", modelToUse) + const processedTexts = queryPrefix + ? texts.map((text, index) => { + // Prevent double-prefixing + if (text.startsWith(queryPrefix)) { + return text + } + const prefixedText = `${queryPrefix}${text}` + const estimatedTokens = Math.ceil(prefixedText.length / 4) + if (estimatedTokens > MAX_ITEM_TOKENS) { + console.warn( + t("embeddings:textWithPrefixExceedsTokenLimit", { + index, + estimatedTokens, + maxTokens: MAX_ITEM_TOKENS, + }), + ) + // Return original text if adding prefix would exceed limit + return text + } + return prefixedText + }) + : texts + const allEmbeddings: number[][] = [] const usage = { promptTokens: 0, totalTokens: 0 } - const remainingTexts = [...texts] + const remainingTexts = [...processedTexts] while (remainingTexts.length > 0) { const currentBatch: string[] = [] @@ -111,10 +139,11 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder { }, } } catch (error: any) { - const isRateLimitError = error?.status === 429 const hasMoreAttempts = attempts < MAX_RETRIES - 1 - if (isRateLimitError && hasMoreAttempts) { + // Check if it's a rate limit error + const httpError = error as HttpError + if (httpError?.status === 429 && hasMoreAttempts) { const delayMs = INITIAL_DELAY_MS * Math.pow(2, attempts) console.warn( t("embeddings:rateLimitRetry", { @@ -130,35 +159,36 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder { // Log the error for debugging console.error(`OpenAI embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error) - // Provide more context in the error message using robust error extraction - let errorMessage = "Unknown error" - if (error?.message) { - errorMessage = error.message - } else if (typeof error === "string") { - errorMessage = error - } else if (error && typeof error.toString === "function") { - try { - errorMessage = error.toString() - } catch { - errorMessage = "Unknown error" - } - } + // Format and throw the error + throw formatEmbeddingError(error, MAX_RETRIES) + } + } - const statusCode = error?.status || error?.response?.status + throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + } - if (statusCode === 401) { - throw new Error(t("embeddings:authenticationFailed")) - } else if (statusCode) { - throw new Error( - t("embeddings:failedWithStatus", { attempts: MAX_RETRIES, statusCode, errorMessage }), - ) - } else { - throw new Error(t("embeddings:failedWithError", { attempts: MAX_RETRIES, errorMessage })) + /** + * Validates the OpenAI embedder configuration by attempting a minimal embedding request + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + return withValidationErrorHandling(async () => { + // Test with a minimal embedding request + const response = await this.embeddingsClient.embeddings.create({ + input: ["test"], + model: this.defaultModelId, + }) + + // Check if we got a valid response + if (!response.data || response.data.length === 0) { + return { + valid: false, + error: t("embeddings:openai.invalidResponseFormat"), } } - } - throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + return { valid: true } + }, "openai") } get embedderInfo(): EmbedderInfo { diff --git a/src/services/code-index/interfaces/config.ts b/src/services/code-index/interfaces/config.ts index 0843120fd9f8..0600f29c2ae5 100644 --- a/src/services/code-index/interfaces/config.ts +++ b/src/services/code-index/interfaces/config.ts @@ -12,9 +12,11 @@ export interface CodeIndexConfig { openAiOptions?: ApiHandlerOptions ollamaOptions?: ApiHandlerOptions openAiCompatibleOptions?: { baseUrl: string; apiKey: string; modelDimension?: number } + geminiOptions?: { apiKey: string } qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number + searchMaxResults?: number } /** @@ -30,6 +32,7 @@ export type PreviousConfigSnapshot = { openAiCompatibleBaseUrl?: string openAiCompatibleApiKey?: string openAiCompatibleModelDimension?: number + geminiApiKey?: string qdrantUrl?: string qdrantApiKey?: string } diff --git a/src/services/code-index/interfaces/embedder.ts b/src/services/code-index/interfaces/embedder.ts index 820fba9b8eaa..0a74446d5e74 100644 --- a/src/services/code-index/interfaces/embedder.ts +++ b/src/services/code-index/interfaces/embedder.ts @@ -10,6 +10,13 @@ export interface IEmbedder { * @returns Promise resolving to an EmbeddingResponse */ createEmbeddings(texts: string[], model?: string): Promise + + /** + * Validates the embedder configuration by testing connectivity and credentials. + * @returns Promise resolving to validation result with success status and optional error message + */ + validateConfiguration(): Promise<{ valid: boolean; error?: string }> + get embedderInfo(): EmbedderInfo } @@ -21,7 +28,7 @@ export interface EmbeddingResponse { } } -export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" +export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" | "gemini" export interface EmbedderInfo { name: AvailableEmbedders diff --git a/src/services/code-index/interfaces/manager.ts b/src/services/code-index/interfaces/manager.ts index f3d577d82f07..70e3fd976595 100644 --- a/src/services/code-index/interfaces/manager.ts +++ b/src/services/code-index/interfaces/manager.ts @@ -70,7 +70,7 @@ export interface ICodeIndexManager { } export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error" -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" +export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" export interface IndexProgressUpdate { systemStatus: IndexingState diff --git a/src/services/code-index/interfaces/vector-store.ts b/src/services/code-index/interfaces/vector-store.ts index 1896942a01cf..dde602fb4d9a 100644 --- a/src/services/code-index/interfaces/vector-store.ts +++ b/src/services/code-index/interfaces/vector-store.ts @@ -23,10 +23,17 @@ export interface IVectorStore { /** * Searches for similar vectors * @param queryVector Vector to search for - * @param limit Maximum number of results to return + * @param directoryPrefix Optional directory prefix to filter results + * @param minScore Optional minimum score threshold + * @param maxResults Optional maximum number of results to return * @returns Promise resolving to search results */ - search(queryVector: number[], directoryPrefix?: string, minScore?: number): Promise + search( + queryVector: number[], + directoryPrefix?: string, + minScore?: number, + maxResults?: number, + ): Promise /** * Deletes points by file path diff --git a/src/services/code-index/manager.ts b/src/services/code-index/manager.ts index 735bcee6705a..7002283226f7 100644 --- a/src/services/code-index/manager.ts +++ b/src/services/code-index/manager.ts @@ -26,12 +26,17 @@ export class CodeIndexManager { private _cacheManager: CacheManager | undefined public static getInstance(context: vscode.ExtensionContext): CodeIndexManager | undefined { - const workspacePath = getWorkspacePath() // Assumes single workspace for now - - if (!workspacePath) { + // Use first workspace folder consistently + const workspaceFolders = vscode.workspace.workspaceFolders + if (!workspaceFolders || workspaceFolders.length === 0) { return undefined } + // Always use the first workspace folder for consistency across all indexing operations. + // This ensures that the same workspace context is used throughout the indexing pipeline, + // preventing path resolution errors in multi-workspace scenarios. + const workspacePath = workspaceFolders[0].uri.fsPath + if (!CodeIndexManager.instances.has(workspacePath)) { CodeIndexManager.instances.set(workspacePath, new CodeIndexManager(workspacePath, context)) } @@ -113,7 +118,14 @@ export class CodeIndexManager { return { requiresRestart } } - // 3. CacheManager Initialization + // 3. Check if workspace is available + const workspacePath = getWorkspacePath() + if (!workspacePath) { + this._stateManager.setSystemState("Standby", "No workspace folder open") + return { requiresRestart } + } + + // 4. CacheManager Initialization if (!this._cacheManager) { this._cacheManager = new CacheManager(this.context, this.workspacePath) await this._cacheManager.initialize() @@ -203,13 +215,16 @@ export class CodeIndexManager { /** * Private helper method to recreate services with current configuration. - * Used by both initialize() and handleExternalSettingsChange(). + * Used by both initialize() and handleSettingsChange(). */ private async _recreateServices(): Promise { // Stop watcher if it exists if (this._orchestrator) { this.stopWatcher() } + // Clear existing services to ensure clean state + this._orchestrator = undefined + this._searchService = undefined // (Re)Initialize service factory this._serviceFactory = new CodeIndexServiceFactory( @@ -219,7 +234,14 @@ export class CodeIndexManager { ) const ignoreInstance = ignore() - const ignorePath = path.join(getWorkspacePath(), ".gitignore") + const workspacePath = getWorkspacePath() + + if (!workspacePath) { + this._stateManager.setSystemState("Standby", "") + return + } + + const ignorePath = path.join(workspacePath, ".gitignore") try { const content = await fs.readFile(ignorePath, "utf8") ignoreInstance.add(content) @@ -236,6 +258,17 @@ export class CodeIndexManager { ignoreInstance, ) + // Validate embedder configuration before proceeding + const validationResult = await this._serviceFactory.validateEmbedder(embedder) + if (!validationResult.valid) { + // Set error state with clear message + this._stateManager.setSystemState( + "Error", + validationResult.error || "Embedder configuration validation failed", + ) + throw new Error(validationResult.error || "Invalid embedder configuration") + } + // (Re)Initialize orchestrator this._orchestrator = new CodeIndexOrchestrator( this._configManager!, @@ -254,28 +287,34 @@ export class CodeIndexManager { embedder, vectorStore, ) + + // Clear any error state after successful recreation + this._stateManager.setSystemState("Standby", "") } /** - * Handles external settings changes by reloading configuration. - * This method should be called when API provider settings are updated + * Handle code index settings changes. + * This method should be called when code index settings are updated * to ensure the CodeIndexConfigManager picks up the new configuration. * If the configuration changes require a restart, the service will be restarted. */ - public async handleExternalSettingsChange(): Promise { + public async handleSettingsChange(): Promise { if (this._configManager) { const { requiresRestart } = await this._configManager.loadConfiguration() const isFeatureEnabled = this.isFeatureEnabled const isFeatureConfigured = this.isFeatureConfigured - // If configuration changes require a restart and the manager is initialized, restart the service - if (requiresRestart && isFeatureEnabled && isFeatureConfigured && this.isInitialized) { - // Recreate services with new configuration - await this._recreateServices() - - // Start indexing with new services - await this.startIndexing() + if (requiresRestart && isFeatureEnabled && isFeatureConfigured) { + try { + // Recreate services with new configuration + await this._recreateServices() + } catch (error) { + // Error state already set in _recreateServices + console.error("Failed to recreate services:", error) + // Re-throw the error so the caller knows validation failed + throw error + } } } } diff --git a/src/services/code-index/orchestrator.ts b/src/services/code-index/orchestrator.ts index 0d8151b5e121..948a86fae5d8 100644 --- a/src/services/code-index/orchestrator.ts +++ b/src/services/code-index/orchestrator.ts @@ -164,6 +164,31 @@ export class CodeIndexOrchestrator { } } + // Check for partial failures - if a significant portion of blocks failed + const failureRate = (cumulativeBlocksFoundSoFar - cumulativeBlocksIndexed) / cumulativeBlocksFoundSoFar + if (batchErrors.length > 0 && failureRate > 0.1) { + // More than 10% of blocks failed to index + const firstError = batchErrors[0] + throw new Error( + `Indexing partially failed: Only ${cumulativeBlocksIndexed} of ${cumulativeBlocksFoundSoFar} blocks were indexed. ${firstError.message}`, + ) + } + + // CRITICAL: If there were ANY batch errors and NO blocks were successfully indexed, + // this is a complete failure regardless of the failure rate calculation + if (batchErrors.length > 0 && cumulativeBlocksIndexed === 0) { + const firstError = batchErrors[0] + throw new Error(`Indexing failed completely: ${firstError.message}`) + } + + // Final sanity check: If we found blocks but indexed none and somehow no errors were reported, + // this is still a failure + if (cumulativeBlocksFoundSoFar > 0 && cumulativeBlocksIndexed === 0) { + throw new Error( + "Indexing failed: No code blocks were successfully indexed despite finding files to process. This indicates a critical embedder failure.", + ) + } + await this._startWatcher() this.stateManager.setSystemState("Indexed", "File watcher started.") diff --git a/src/services/code-index/processors/__tests__/parser.spec.ts b/src/services/code-index/processors/__tests__/parser.spec.ts index 76ce3ff46107..15b75e33819c 100644 --- a/src/services/code-index/processors/__tests__/parser.spec.ts +++ b/src/services/code-index/processors/__tests__/parser.spec.ts @@ -2,6 +2,7 @@ import { CodeParser, codeParser } from "../parser" import { loadRequiredLanguageParsers } from "../../../tree-sitter/languageParser" +import { parseMarkdown } from "../../../tree-sitter/markdownParser" import { readFile } from "fs/promises" import { Node } from "web-tree-sitter" @@ -23,6 +24,7 @@ vi.mock("fs/promises", () => ({ })) vi.mock("../../../tree-sitter/languageParser") +vi.mock("../../../tree-sitter/markdownParser") const mockLanguageParser = { js: { @@ -186,6 +188,50 @@ describe("CodeParser", () => { const result = await parser["_performFallbackChunking"]("test.js", shortContent, "hash", new Set()) expect(result).toEqual([]) }) + + it("should respect 50-character minimum threshold for all languages", async () => { + // Test content that is exactly 49 characters (should be filtered) + const shortContent = "function f() { return 1; } // Exactly 49 chars!!!" + expect(shortContent.length).toBe(49) + + // Test content that is exactly 50 characters (should be included) + const minContent = "function g() { return 42; } // Exactly 50 chars!!!" + expect(minContent.length).toBe(50) + + // Test content that is longer than 50 characters (should be included) + const longContent = "function calculate() { return 1 + 2 + 3; } // This is longer than 50 characters" + expect(longContent.length).toBeGreaterThan(50) + + // Mock the language parser to return captures for our test content + const mockCapture = (content: string, startLine: number = 0) => ({ + node: { + text: content, + startPosition: { row: startLine }, + endPosition: { row: startLine }, + type: "function_declaration", + childForFieldName: vi.fn().mockReturnValue(null), + children: [], + }, + name: "definition.function", + }) + + // Test short content (49 chars) - should be filtered out + mockLanguageParser.js.query.captures.mockReturnValue([mockCapture(shortContent)]) + const shortResult = await parser["parseContent"]("test.js", shortContent, "hash1") + expect(shortResult).toEqual([]) + + // Test minimum content (50 chars) - should be included + mockLanguageParser.js.query.captures.mockReturnValue([mockCapture(minContent)]) + const minResult = await parser["parseContent"]("test.js", minContent, "hash2") + expect(minResult.length).toBe(1) + expect(minResult[0].content).toBe(minContent) + + // Test longer content - should be included + mockLanguageParser.js.query.captures.mockReturnValue([mockCapture(longContent)]) + const longResult = await parser["parseContent"]("test.js", longContent, "hash3") + expect(longResult.length).toBe(1) + expect(longResult[0].content).toBe(longContent) + }) }) describe("_chunkLeafNodeByLines", () => { @@ -215,7 +261,7 @@ describe("CodeParser", () => { it("should handle oversized lines by splitting them", async () => { const longLine = "a".repeat(2000) const lines = ["normal", longLine, "normal"] - const result = await parser["_chunkTextByLines"](lines, "test.js", "hash", "test_type", new Set()) + const result = await parser["_chunkTextByLines"](lines, "test.js", "hash", "test_type", new Set(), 100) const segments = result.filter((r) => r.type === "test_type_segment") expect(segments.length).toBeGreaterThan(1) @@ -225,7 +271,7 @@ describe("CodeParser", () => { const lines = Array(100) .fill("line with 10 chars") .map((_, i) => `${i}: line`) - const result = await parser["_chunkTextByLines"](lines, "test.js", "hash", "test_type", new Set()) + const result = await parser["_chunkTextByLines"](lines, "test.js", "hash", "test_type", new Set(), 100) result.forEach((chunk) => { expect(chunk.content.length).toBeGreaterThanOrEqual(100) @@ -242,4 +288,716 @@ describe("CodeParser", () => { expect(result2).toBeDefined() }) }) + + describe("Markdown Support", () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it("should generate unique segment hashes for each markdown block", async () => { + const markdownContent = `# Section One +This is a section with substantial content that meets the minimum character requirements. +It contains detailed information and multiple paragraphs to ensure proper indexing. +The content is comprehensive and provides valuable information for search functionality. + +## Section Two +Another section with different content but also meeting the minimum requirements. +This ensures we can test that different sections get different segment hashes. +Each section should have its own unique hash based on its content.` + + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { startPosition: { row: 0 }, endPosition: { row: 4 }, text: "Section One" }, + name: "name.definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 0 }, endPosition: { row: 4 }, text: "Section One" }, + name: "definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 5 }, endPosition: { row: 8 }, text: "Section Two" }, + name: "name.definition.header.h2", + patternIndex: 0, + }, + { + node: { startPosition: { row: 5 }, endPosition: { row: 8 }, text: "Section Two" }, + name: "definition.header.h2", + patternIndex: 0, + }, + ] as any) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + expect(result).toHaveLength(2) + + // Verify each block has unique segment hash + expect(result[0].segmentHash).toMatch(/^[a-f0-9]{64}$/) + expect(result[1].segmentHash).toMatch(/^[a-f0-9]{64}$/) + expect(result[0].segmentHash).not.toBe(result[1].segmentHash) + + // Verify file hash is consistent + expect(result[0].fileHash).toBe(result[1].fileHash) + expect(result[0].fileHash).toMatch(/^[a-f0-9]{64}$/) + }) + + it("should use fallback chunking for markdown files without headers", async () => { + const markdownContent = `This is a markdown file without any headers but with substantial content. +It contains multiple paragraphs and detailed information that should be indexed. +The content is long enough to meet the minimum character requirements for fallback chunking. +This ensures that even headerless markdown files can be properly indexed and searched. +Additional content to ensure we exceed the minimum block size requirements for proper indexing.` + + vi.mocked(parseMarkdown).mockReturnValue([]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + expect(parseMarkdown).toHaveBeenCalledWith(markdownContent) + expect(result).toHaveLength(1) + expect(result[0].type).toBe("markdown_content") + expect(result[0].content).toBe(markdownContent) + expect(result[0].start_line).toBe(1) + + // Verify hash generation for fallback chunks + expect(result[0].segmentHash).toMatch(/^[a-f0-9]{64}$/) + expect(result[0].fileHash).toMatch(/^[a-f0-9]{64}$/) + }) + + it("should chunk large markdown files with no headers", async () => { + // Create a large markdown file without headers (2000+ chars) + const lines = [] + for (let i = 0; i < 80; i++) { + lines.push(`This is line ${i} with substantial content to ensure proper chunking behavior.`) + } + const largeMarkdownContent = lines.join("\n") // ~80 lines * ~78 chars = ~6240 chars + + vi.mocked(parseMarkdown).mockReturnValue([]) + + const result = await parser.parseFile("test.md", { content: largeMarkdownContent }) + + expect(parseMarkdown).toHaveBeenCalledWith(largeMarkdownContent) + // Should have multiple chunks due to size + expect(result.length).toBeGreaterThan(1) + // All chunks should be of type markdown_content + result.forEach((block) => { + expect(block.type).toBe("markdown_content") + expect(block.identifier).toBeNull() + // Each chunk should respect MAX_BLOCK_CHARS * MAX_CHARS_TOLERANCE_FACTOR + expect(block.content.length).toBeLessThanOrEqual(1150) + }) + // Verify chunks cover the entire content + const totalLines = result.reduce((acc, block) => { + return acc + (block.end_line - block.start_line + 1) + }, 0) + expect(totalLines).toBe(80) + }) + + it("should enforce MIN_BLOCK_CHARS for all markdown sections", async () => { + const markdownContent = `# Short +Small content. + +## Another Short +Also small. + +### Long Section +This section has substantial content that exceeds the minimum character requirements. +It includes multiple lines with detailed information to ensure proper indexing. +The content is comprehensive enough to be included in the search results.` + + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { startPosition: { row: 0 }, endPosition: { row: 1 }, text: "Short" }, + name: "name.definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 0 }, endPosition: { row: 1 }, text: "Short" }, + name: "definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 3 }, endPosition: { row: 4 }, text: "Another Short" }, + name: "name.definition.header.h2", + patternIndex: 0, + }, + { + node: { startPosition: { row: 3 }, endPosition: { row: 4 }, text: "Another Short" }, + name: "definition.header.h2", + patternIndex: 0, + }, + { + node: { startPosition: { row: 6 }, endPosition: { row: 9 }, text: "Long Section" }, + name: "name.definition.header.h3", + patternIndex: 0, + }, + { + node: { startPosition: { row: 6 }, endPosition: { row: 9 }, text: "Long Section" }, + name: "definition.header.h3", + patternIndex: 0, + }, + ] as any) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Only the long section should be included + expect(result).toHaveLength(1) + expect(result[0].identifier).toBe("Long Section") + expect(result[0].content.length).toBeGreaterThanOrEqual(100) // MIN_BLOCK_CHARS + }) + + it("should chunk large markdown sections and generate unique hashes for each chunk", async () => { + // Create content with multiple lines + const lines = [] + // Add header + lines.push("# Large Section Header") + // Add 50 lines of content, each ~30 chars = ~1500 chars total + for (let i = 0; i < 50; i++) { + lines.push(`This is line ${i} with some content.`) + } + + const markdownContent = lines.join("\n") + + // The mock should return sections that span the actual content + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 50 }, // Header + 50 lines of content + text: "Large Section Header", + }, + name: "name.definition.header.h1", + patternIndex: 0, + }, + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 50 }, // Header + 50 lines of content + text: markdownContent, // Full section content + }, + name: "definition.header.h1", + patternIndex: 0, + }, + ] as any) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Large section should be chunked into multiple blocks + const h1Blocks = result.filter((r) => r.type === "markdown_header_h1") + expect(h1Blocks.length).toBeGreaterThan(1) + + // Each chunk should have a unique segment hash + const segmentHashes = h1Blocks.map((block) => block.segmentHash) + const uniqueHashes = new Set(segmentHashes) + expect(uniqueHashes.size).toBe(h1Blocks.length) + + // All chunks should preserve the header identifier + h1Blocks.forEach((block) => { + expect(block.identifier).toBe("Large Section Header") + // Each chunk should respect MAX_BLOCK_CHARS * MAX_CHARS_TOLERANCE_FACTOR + expect(block.content.length).toBeLessThanOrEqual(1150) + // Each chunk should have valid hashes + expect(block.segmentHash).toMatch(/^[a-f0-9]{64}$/) + expect(block.fileHash).toMatch(/^[a-f0-9]{64}$/) + }) + }) + + it("should handle markdown with very long single lines with chunking", async () => { + const veryLongLine = "a".repeat(2000) // Single line exceeding max chars + const markdownContent = `# Section with Long Line +Normal content here. +${veryLongLine} +More normal content.` + + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { startPosition: { row: 0 }, endPosition: { row: 3 }, text: "Section with Long Line" }, + name: "name.definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 0 }, endPosition: { row: 3 }, text: markdownContent }, + name: "definition.header.h1", + patternIndex: 0, + }, + ] as any) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Should create multiple blocks due to chunking + expect(result.length).toBeGreaterThan(1) + // Should have segment blocks for the oversized line + const segmentBlocks = result.filter((r) => r.type === "markdown_header_h1_segment") + expect(segmentBlocks.length).toBeGreaterThan(0) + // All blocks should preserve the header identifier + result.forEach((block) => { + expect(block.identifier).toBe("Section with Long Line") + }) + }) + + it("should preserve header information when chunking large sections", async () => { + const largeContent = Array(100).fill("Line with substantial content to ensure proper handling.").join("\n") + const markdownContent = `### Deep Header Level 3 +${largeContent}` + + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { startPosition: { row: 0 }, endPosition: { row: 100 }, text: "Deep Header Level 3" }, + name: "name.definition.header.h3", + patternIndex: 0, + }, + { + node: { startPosition: { row: 0 }, endPosition: { row: 100 }, text: markdownContent }, + name: "definition.header.h3", + patternIndex: 0, + }, + ] as any) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Should have multiple blocks due to chunking + expect(result.length).toBeGreaterThan(1) + // All blocks should have the same type and identifier + result.forEach((block) => { + expect(block.type).toBe("markdown_header_h3") + expect(block.identifier).toBe("Deep Header Level 3") + }) + }) + + it("should apply chunking logic based on MAX_BLOCK_CHARS and re-balancing", async () => { + // Create content that will trigger re-balancing logic + // 60 lines * 30 chars = 1800 chars, which should trigger chunking + const lines = [] + for (let i = 0; i < 60; i++) { + lines.push(`Line ${i}: Some content here to test.`) // ~30 chars per line + } + const markdownContent = lines.join("\n") + + vi.mocked(parseMarkdown).mockReturnValue([]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Should have multiple chunks due to size + expect(result.length).toBeGreaterThan(1) + + // Verify re-balancing: chunks should be roughly equal in size + const chunkSizes = result.map((block) => block.content.length) + const avgSize = chunkSizes.reduce((a, b) => a + b, 0) / chunkSizes.length + + chunkSizes.forEach((size) => { + // Each chunk should be within 30% of average size (re-balanced) + expect(Math.abs(size - avgSize) / avgSize).toBeLessThan(0.3) + // Each chunk should respect MIN_BLOCK_CHARS + expect(size).toBeGreaterThanOrEqual(50) + }) + + // Verify each chunk has unique segment hash + const segmentHashes = result.map((block) => block.segmentHash) + expect(new Set(segmentHashes).size).toBe(result.length) + }) + + it("should handle markdown content before the first header", async () => { + const preHeaderContent = `This is content before any headers that contains substantial information. +It has multiple lines and should be indexed because it meets the minimum size requirements. +This content contains important documentation that would be lost without proper handling. +We need to ensure that all content is captured, not just content within header sections. +This paragraph continues with more details to ensure we exceed the minimum block size.` + + const headerContent = `# First Header + +Content under the first header with enough text to be indexed properly. +This section contains multiple lines to ensure it meets the minimum character requirements. +We need at least 50 characters for a section to be included in the index. +This additional content ensures the header section will be processed correctly.` + + const markdownContent = `${preHeaderContent} + +${headerContent}` + + // Mock the parseMarkdown function to return headers + // The header section spans from line 6 to line 10 (5 lines total) + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { + startPosition: { row: 6 }, + endPosition: { row: 10 }, + text: "First Header", + }, + name: "name.definition.header.h1", + patternIndex: 0, + } as any, + { + node: { + startPosition: { row: 6 }, + endPosition: { row: 10 }, + text: "First Header", + }, + name: "definition.header.h1", + patternIndex: 0, + } as any, + ]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // With MIN_BLOCK_CHARS=50, content may be split into more blocks + expect(result.length).toBeGreaterThanOrEqual(2) + + // First block should be the content before the header + expect(result[0]).toMatchObject({ + file_path: "test.md", + type: "markdown_content", + start_line: 1, + end_line: 6, // Up to the header line + }) + expect(result[0].content).toContain("This is content before any headers") + + // Second block should be the header section + expect(result[1]).toMatchObject({ + file_path: "test.md", + identifier: "First Header", + type: "markdown_header_h1", + start_line: 7, + end_line: 11, + }) + }) + + it("should handle markdown content after the last header", async () => { + const markdownContent = `# Header + +Header content with enough text to meet the minimum requirements for proper indexing. +This header section needs to have at least 100 characters to be included in the results. +We're adding this extra line to ensure the header section meets the minimum size threshold. + +This is content after the last header that contains substantial documentation. +It has multiple lines and should be indexed because it's important information. +This content would be lost without proper handling of content outside header sections. +We're adding more content here to ensure we meet the minimum block size requirements. +This ensures that trailing content in markdown files is properly captured and indexed.` + + // Mock the parseMarkdown function to return headers + // The header section spans from line 0 to line 4 (5 lines) + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 4 }, + text: "Header", + }, + name: "name.definition.header.h1", + patternIndex: 0, + } as any, + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 4 }, + text: "Header", + }, + name: "definition.header.h1", + patternIndex: 0, + } as any, + ]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Should have exactly 2 blocks: header section and post-header content + expect(result.length).toBe(2) + + // First block should be the header section + expect(result[0]).toMatchObject({ + file_path: "test.md", + identifier: "Header", + type: "markdown_header_h1", + start_line: 1, + end_line: 5, + }) + + // Second block should be the content after the header + expect(result[1]).toMatchObject({ + file_path: "test.md", + type: "markdown_content", + start_line: 6, + }) + expect(result[1].content).toContain("This is content after the last header") + }) + + it("should handle very long paragraphs with chunking", async () => { + // Create a very long paragraph + const longParagraph = "This is a very long paragraph that contains substantial content. ".repeat(50) + const markdownContent = `# Introduction + +Some intro text. + +${longParagraph} + +## Conclusion + +Final thoughts that need to be long enough to meet the minimum character requirement. +This conclusion section contains multiple lines to ensure it exceeds 100 characters.` + + const lines = markdownContent.split("\n") + + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 0 }, + text: "Introduction", + }, + name: "name.definition.header.h1", + patternIndex: 0, + } as any, + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 4 }, + text: "Introduction", + }, + name: "definition.header.h1", + patternIndex: 0, + } as any, + { + node: { + startPosition: { row: 6 }, + endPosition: { row: 6 }, + text: "Conclusion", + }, + name: "name.definition.header.h2", + patternIndex: 0, + } as any, + { + node: { + startPosition: { row: 6 }, + endPosition: { row: 9 }, + text: "Conclusion", + }, + name: "definition.header.h2", + patternIndex: 0, + } as any, + ]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // The introduction section should be chunked + const h1Blocks = result.filter( + (r) => r.type === "markdown_header_h1" || r.type === "markdown_header_h1_segment", + ) + expect(h1Blocks.length).toBeGreaterThan(1) + // All chunks should preserve the identifier + h1Blocks.forEach((block) => { + expect(block.identifier).toBe("Introduction") + }) + + // Conclusion should be a single block + const h2Blocks = result.filter((r) => r.type === "markdown_header_h2") + expect(h2Blocks.length).toBe(1) + }) + + it("should continue processing after encountering a very long line", async () => { + // Create a markdown file with a very long single line followed by more content + const veryLongLine = "a".repeat(5000) // 5000 characters - exceeds MAX_BLOCK_CHARS * MAX_CHARS_TOLERANCE_FACTOR + + // Create content that will be chunked + const markdownContent = `This is content before the very long line that should be properly indexed. +It contains multiple lines to ensure it meets the minimum character requirements. +We need enough content here to trigger the chunking behavior. + +${veryLongLine} + +This is content after the very long line that must also be properly indexed. +It's critical that this content is not ignored due to the oversized line bug. +We need to ensure all content is processed, not just content before the long line. +Adding more content to ensure we meet minimum block requirements.` + + // Mock parseMarkdown to return no headers (testing fallback chunking) + vi.mocked(parseMarkdown).mockReturnValue([]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // The content should be chunked due to the oversized line + expect(result.length).toBeGreaterThan(1) + + // Should have segment blocks for the oversized line + const segmentBlocks = result.filter((r) => r.type.includes("_segment")) + expect(segmentBlocks.length).toBeGreaterThan(0) + + // Verify that content after the long line is included + const lastBlock = result[result.length - 1] + expect(lastBlock.content).toContain("content after the very long line") + + // Verify all segments are from the oversized line + segmentBlocks.forEach((block) => { + expect(block.content).toMatch(/^a+$/) + }) + }) + + it("should handle multiple oversized lines in sequence", async () => { + // Test with multiple consecutive oversized lines + const longLine1 = "x".repeat(3000) + const longLine2 = "y".repeat(3000) + const longLine3 = "z".repeat(3000) + + const markdownContent = `# Test Multiple Long Lines +Normal content before the long lines. +${longLine1} +${longLine2} +${longLine3} +Normal content after the long lines that must be indexed. +This content verifies that processing continues after multiple oversized lines.` + + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 6 }, + text: "Test Multiple Long Lines", + }, + name: "name.definition.header.h1", + patternIndex: 0, + } as any, + { + node: { + startPosition: { row: 0 }, + endPosition: { row: 6 }, + text: "Test Multiple Long Lines", + }, + name: "definition.header.h1", + patternIndex: 0, + } as any, + ]) + + const result = await parser.parseFile("test.md", { content: markdownContent }) + + // Should have multiple segment blocks + const segmentBlocks = result.filter((r) => r.type === "markdown_header_h1_segment") + expect(segmentBlocks.length).toBeGreaterThan(6) // At least 3 segments per long line + + // Should also have regular blocks for the normal content + const regularBlocks = result.filter((r) => r.type === "markdown_header_h1" && !r.type.includes("_segment")) + expect(regularBlocks.length).toBeGreaterThan(0) + + // Verify the last block includes content after the long lines + const lastRegularBlock = regularBlocks[regularBlocks.length - 1] + expect(lastRegularBlock.content).toContain("Normal content after the long lines") + }) + }) + + describe("Edge case: Single oversized line in markdown", () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it("should properly chunk a markdown file with a single very long line", async () => { + const parser = new CodeParser() + const veryLongLine = "x".repeat(5000) // 5000 chars in a single line + + // Mock parseMarkdown to return empty array (no headers) + vi.mocked(parseMarkdown).mockReturnValue([]) + + const results = await parser["parseContent"]("test.md", veryLongLine, "test-hash") + + // Should create multiple segments + expect(results.length).toBeGreaterThan(1) + expect(results.length).toBe(5) // 5000 / 1000 = 5 segments + + // All chunks should be segments + const segments = results.filter((r) => r.type === "markdown_content_segment") + expect(segments.length).toBe(5) + + // Verify content is preserved + const reconstructed = results.map((r) => r.content).join("") + expect(reconstructed).toBe(veryLongLine) + + // Each segment (except possibly the last) should be MAX_BLOCK_CHARS (1000) + for (let i = 0; i < segments.length - 1; i++) { + expect(segments[i].content.length).toBe(1000) + } + + // Last segment should have the remainder + expect(segments[segments.length - 1].content.length).toBe(1000) + }) + + it("should handle markdown with headers followed by oversized lines", async () => { + const parser = new CodeParser() + const longLineA = "a".repeat(2000) + const longLineB = "b".repeat(3000) + const content = `# Header 1\n\n${longLineA}\n\n## Header 2\n\n${longLineB}` + + // Mock parseMarkdown to return headers + vi.mocked(parseMarkdown).mockReturnValue([ + { + node: { startPosition: { row: 0 }, endPosition: { row: 2 }, text: "Header 1" }, + name: "name.definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 0 }, endPosition: { row: 2 }, text: "Header 1" }, + name: "definition.header.h1", + patternIndex: 0, + }, + { + node: { startPosition: { row: 4 }, endPosition: { row: 6 }, text: "Header 2" }, + name: "name.definition.header.h2", + patternIndex: 0, + }, + { + node: { startPosition: { row: 4 }, endPosition: { row: 6 }, text: "Header 2" }, + name: "definition.header.h2", + patternIndex: 0, + }, + ] as any) + + const results = await parser["parseContent"]("test.md", content, "test-hash") + + // Should create multiple chunks + expect(results.length).toBeGreaterThan(2) + + // Should have both header chunks and segments + const headers = results.filter((r) => r.type.startsWith("markdown_header")) + const segments = results.filter((r) => r.type.includes("_segment")) + + expect(headers.length).toBeGreaterThan(0) + expect(segments.length).toBeGreaterThan(0) + + // Verify segments were created for oversized lines + // 2000 chars = 2 segments, 3000 chars = 3 segments + expect(segments.length).toBeGreaterThanOrEqual(5) + }) + + it("should not chunk markdown files with lines under the threshold", async () => { + const parser = new CodeParser() + const normalContent = "This is a normal line.\n".repeat(50) // Multiple normal lines + const totalLength = normalContent.length + + // Mock parseMarkdown to return empty array (no headers) + vi.mocked(parseMarkdown).mockReturnValue([]) + + const results = await parser["parseContent"]("test.md", normalContent, "test-hash") + + // Since total content is 1150 chars (23 * 50), it's just over the threshold + // But no individual line is oversized, so it depends on total length + if (totalLength > 1150) { + // Content exceeds threshold, should be chunked + expect(results.length).toBeGreaterThan(1) + } else { + // Content is under threshold, should be single chunk + expect(results.length).toBe(1) + expect(results[0].type).toBe("markdown_content") + } + }) + + it("should return empty array for markdown content below MIN_BLOCK_CHARS threshold", async () => { + const parser = new CodeParser() + // Create content that is below the new MIN_BLOCK_CHARS threshold of 50 + const smallContent = "Small markdown.\nJust a bit.\nTiny." + + // Mock parseMarkdown to return empty array (no headers) + vi.mocked(parseMarkdown).mockReturnValue([]) + + const results = await parser["parseContent"]("test.md", smallContent, "test-hash") + + // Should return empty array since content is below MIN_BLOCK_CHARS (50) + expect(results.length).toBe(0) + expect(smallContent.length).toBeLessThan(50) // Verify our test assumption + }) + }) }) diff --git a/src/services/code-index/processors/__tests__/scanner.spec.ts b/src/services/code-index/processors/__tests__/scanner.spec.ts index b3debb88a49a..2de7ebf23778 100644 --- a/src/services/code-index/processors/__tests__/scanner.spec.ts +++ b/src/services/code-index/processors/__tests__/scanner.spec.ts @@ -241,5 +241,141 @@ describe("DirectoryScanner", () => { // Verify the stats expect(mockCodeParser.parseFile).toHaveBeenCalledTimes(2) }) + + it("should process markdown files alongside code files", async () => { + const { listFiles } = await import("../../../glob/list-files") + vi.mocked(listFiles).mockResolvedValue([["test/README.md", "test/app.js", "docs/guide.markdown"], false]) + + const mockMarkdownBlocks: any[] = [ + { + file_path: "test/README.md", + content: "# Introduction\nThis is a comprehensive guide...", + start_line: 1, + end_line: 10, + identifier: "Introduction", + type: "markdown_header_h1", + fileHash: "md-hash", + segmentHash: "md-segment-hash", + }, + ] + + const mockJsBlocks: any[] = [ + { + file_path: "test/app.js", + content: "function main() { return 'hello'; }", + start_line: 1, + end_line: 3, + identifier: "main", + type: "function", + fileHash: "js-hash", + segmentHash: "js-segment-hash", + }, + ] + + const mockMarkdownBlocks2: any[] = [ + { + file_path: "docs/guide.markdown", + content: "## Getting Started\nFollow these steps...", + start_line: 1, + end_line: 8, + identifier: "Getting Started", + type: "markdown_header_h2", + fileHash: "markdown-hash", + segmentHash: "markdown-segment-hash", + }, + ] + + // Mock parseFile to return different blocks based on file extension + ;(mockCodeParser.parseFile as any).mockImplementation((filePath: string) => { + if (filePath.endsWith(".md")) { + return mockMarkdownBlocks + } else if (filePath.endsWith(".markdown")) { + return mockMarkdownBlocks2 + } else if (filePath.endsWith(".js")) { + return mockJsBlocks + } + return [] + }) + + const result = await scanner.scanDirectory("/test") + + // Verify all files were processed + expect(mockCodeParser.parseFile).toHaveBeenCalledTimes(3) + expect(mockCodeParser.parseFile).toHaveBeenCalledWith("test/README.md", expect.any(Object)) + expect(mockCodeParser.parseFile).toHaveBeenCalledWith("test/app.js", expect.any(Object)) + expect(mockCodeParser.parseFile).toHaveBeenCalledWith("docs/guide.markdown", expect.any(Object)) + + // Verify code blocks include both markdown and code content + expect(result.codeBlocks).toHaveLength(3) + expect(result.codeBlocks).toEqual( + expect.arrayContaining([ + expect.objectContaining({ type: "markdown_header_h1" }), + expect.objectContaining({ type: "function" }), + expect.objectContaining({ type: "markdown_header_h2" }), + ]), + ) + + expect(result.stats.processed).toBe(3) + }) + + it("should generate unique point IDs for each block from the same file", async () => { + const { listFiles } = await import("../../../glob/list-files") + vi.mocked(listFiles).mockResolvedValue([["test/large-doc.md"], false]) + + // Mock multiple blocks from the same file with different segmentHash values + const mockBlocks: any[] = [ + { + file_path: "test/large-doc.md", + content: "# Introduction\nThis is the intro section...", + start_line: 1, + end_line: 10, + identifier: "Introduction", + type: "markdown_header_h1", + fileHash: "same-file-hash", + segmentHash: "unique-segment-hash-1", + }, + { + file_path: "test/large-doc.md", + content: "## Getting Started\nHere's how to begin...", + start_line: 11, + end_line: 20, + identifier: "Getting Started", + type: "markdown_header_h2", + fileHash: "same-file-hash", + segmentHash: "unique-segment-hash-2", + }, + { + file_path: "test/large-doc.md", + content: "## Advanced Topics\nFor advanced users...", + start_line: 21, + end_line: 30, + identifier: "Advanced Topics", + type: "markdown_header_h2", + fileHash: "same-file-hash", + segmentHash: "unique-segment-hash-3", + }, + ] + + ;(mockCodeParser.parseFile as any).mockResolvedValue(mockBlocks) + + await scanner.scanDirectory("/test") + + // Verify that upsertPoints was called with unique IDs for each block + expect(mockVectorStore.upsertPoints).toHaveBeenCalledTimes(1) + const upsertCall = mockVectorStore.upsertPoints.mock.calls[0] + const points = upsertCall[0] + + // Extract the IDs from the points + const pointIds = points.map((point: any) => point.id) + + // Verify all IDs are unique + expect(pointIds).toHaveLength(3) + expect(new Set(pointIds).size).toBe(3) // All IDs should be unique + + // Verify that each point has the correct payload + expect(points[0].payload.segmentHash).toBe("unique-segment-hash-1") + expect(points[1].payload.segmentHash).toBe("unique-segment-hash-2") + expect(points[2].payload.segmentHash).toBe("unique-segment-hash-3") + }) }) }) diff --git a/src/services/code-index/processors/file-watcher.ts b/src/services/code-index/processors/file-watcher.ts index 9a1fc3c9af45..a12752bea6c1 100644 --- a/src/services/code-index/processors/file-watcher.ts +++ b/src/services/code-index/processors/file-watcher.ts @@ -464,7 +464,7 @@ export class FileWatcher implements IFileWatcher { } // Check if file should be ignored - const relativeFilePath = generateRelativeFilePath(filePath) + const relativeFilePath = generateRelativeFilePath(filePath, this.workspacePath) if ( !this.ignoreController.validateAccess(filePath) || (this.ignoreInstance && this.ignoreInstance.ignores(relativeFilePath)) @@ -512,7 +512,7 @@ export class FileWatcher implements IFileWatcher { const { embeddings } = await this.embedder.createEmbeddings(texts) pointsToUpsert = blocks.map((block, index) => { - const normalizedAbsolutePath = generateNormalizedAbsolutePath(block.file_path) + const normalizedAbsolutePath = generateNormalizedAbsolutePath(block.file_path, this.workspacePath) const stableName = `${normalizedAbsolutePath}:${block.start_line}` const pointId = uuidv5(stableName, QDRANT_CODE_BLOCK_NAMESPACE) @@ -520,7 +520,7 @@ export class FileWatcher implements IFileWatcher { id: pointId, vector: embeddings[index], payload: { - filePath: generateRelativeFilePath(normalizedAbsolutePath), + filePath: generateRelativeFilePath(normalizedAbsolutePath, this.workspacePath), codeChunk: block.content, startLine: block.start_line, endLine: block.end_line, diff --git a/src/services/code-index/processors/parser.ts b/src/services/code-index/processors/parser.ts index e911b20386b3..9632c0f60f67 100644 --- a/src/services/code-index/processors/parser.ts +++ b/src/services/code-index/processors/parser.ts @@ -3,6 +3,7 @@ import { createHash } from "crypto" import * as path from "path" import { Node } from "web-tree-sitter" import { LanguageParser, loadRequiredLanguageParsers } from "../../tree-sitter/languageParser" +import { parseMarkdown } from "../../tree-sitter/markdownParser" import { ICodeParser, CodeBlock } from "../interfaces" import { scannerExtensions } from "../shared/supported-extensions" import { MAX_BLOCK_CHARS, MIN_BLOCK_CHARS, MIN_CHUNK_REMAINDER_CHARS, MAX_CHARS_TOLERANCE_FACTOR } from "../constants" @@ -13,8 +14,8 @@ import { MAX_BLOCK_CHARS, MIN_BLOCK_CHARS, MIN_CHUNK_REMAINDER_CHARS, MAX_CHARS_ export class CodeParser implements ICodeParser { private loadedParsers: LanguageParser = {} private pendingLoads: Map> = new Map() - // Markdown files are excluded because the current parser logic cannot effectively handle - // potentially large Markdown sections without a tree-sitter-like child node structure for chunking + // Markdown files are now supported using the custom markdown parser + // which extracts headers and sections for semantic indexing /** * Parses a code file into code blocks @@ -87,6 +88,11 @@ export class CodeParser implements ICodeParser { const ext = path.extname(filePath).slice(1).toLowerCase() const seenSegmentHashes = new Set() + // Handle markdown files specially + if (ext === "md" || ext === "markdown") { + return this.parseMarkdownContent(filePath, content, fileHash, seenSegmentHashes) + } + // Check if we already have the parser loaded if (!this.loadedParsers[ext]) { const pendingLoad = this.pendingLoads.get(ext) @@ -155,8 +161,7 @@ export class CodeParser implements ICodeParser { // If it has children, process them instead queue.push(...currentNode.children.filter((child) => child !== null)) } else { - // If it's a leaf node, chunk it (passing MIN_BLOCK_CHARS as per Task 1 Step 5) - // Note: _chunkLeafNodeByLines logic might need further adjustment later + // If it's a leaf node, chunk it const chunkedBlocks = this._chunkLeafNodeByLines( currentNode, filePath, @@ -175,8 +180,9 @@ export class CodeParser implements ICodeParser { const start_line = currentNode.startPosition.row + 1 const end_line = currentNode.endPosition.row + 1 const content = currentNode.text + const contentPreview = content.slice(0, 100) const segmentHash = createHash("sha256") - .update(`${filePath}-${start_line}-${end_line}-${content}`) + .update(`${filePath}-${start_line}-${end_line}-${content.length}-${contentPreview}`) .digest("hex") if (!seenSegmentHashes.has(segmentHash)) { @@ -194,7 +200,7 @@ export class CodeParser implements ICodeParser { } } } - // Nodes smaller than MIN_BLOCK_CHARS are ignored + // Nodes smaller than minBlockChars are ignored } return results @@ -207,7 +213,6 @@ export class CodeParser implements ICodeParser { lines: string[], filePath: string, fileHash: string, - chunkType: string, seenSegmentHashes: Set, baseStartLine: number = 1, // 1-based start line of the *first* line in the `lines` array @@ -223,8 +228,9 @@ export class CodeParser implements ICodeParser { const chunkContent = currentChunkLines.join("\n") const startLine = baseStartLine + chunkStartLineIndex const endLine = baseStartLine + endLineIndex + const contentPreview = chunkContent.slice(0, 100) const segmentHash = createHash("sha256") - .update(`${filePath}-${startLine}-${endLine}-${chunkContent}`) + .update(`${filePath}-${startLine}-${endLine}-${chunkContent.length}-${contentPreview}`) .digest("hex") if (!seenSegmentHashes.has(segmentHash)) { @@ -247,8 +253,11 @@ export class CodeParser implements ICodeParser { } const createSegmentBlock = (segment: string, originalLineNumber: number, startCharIndex: number) => { + const segmentPreview = segment.slice(0, 100) const segmentHash = createHash("sha256") - .update(`${filePath}-${originalLineNumber}-${originalLineNumber}-${startCharIndex}-${segment}`) + .update( + `${filePath}-${originalLineNumber}-${originalLineNumber}-${startCharIndex}-${segment.length}-${segmentPreview}`, + ) .digest("hex") if (!seenSegmentHashes.has(segmentHash)) { @@ -287,6 +296,8 @@ export class CodeParser implements ICodeParser { createSegmentBlock(segment, originalLineNumber, currentSegmentStartChar) currentSegmentStartChar += MAX_BLOCK_CHARS } + // Update chunkStartLineIndex to continue processing from the next line + chunkStartLineIndex = i + 1 continue } @@ -370,6 +381,150 @@ export class CodeParser implements ICodeParser { baseStartLine, ) } + + /** + * Helper method to process markdown content sections with consistent chunking logic + */ + private processMarkdownSection( + lines: string[], + filePath: string, + fileHash: string, + type: string, + seenSegmentHashes: Set, + startLine: number, + identifier: string | null = null, + ): CodeBlock[] { + const content = lines.join("\n") + + if (content.trim().length < MIN_BLOCK_CHARS) { + return [] + } + + // Check if content needs chunking (either total size or individual line size) + const needsChunking = + content.length > MAX_BLOCK_CHARS * MAX_CHARS_TOLERANCE_FACTOR || + lines.some((line) => line.length > MAX_BLOCK_CHARS * MAX_CHARS_TOLERANCE_FACTOR) + + if (needsChunking) { + // Apply chunking for large content or oversized lines + const chunks = this._chunkTextByLines(lines, filePath, fileHash, type, seenSegmentHashes, startLine) + // Preserve identifier in all chunks if provided + if (identifier) { + chunks.forEach((chunk) => { + chunk.identifier = identifier + }) + } + return chunks + } + + // Create a single block for normal-sized content with no oversized lines + const endLine = startLine + lines.length - 1 + const contentPreview = content.slice(0, 100) + const segmentHash = createHash("sha256") + .update(`${filePath}-${startLine}-${endLine}-${content.length}-${contentPreview}`) + .digest("hex") + + if (!seenSegmentHashes.has(segmentHash)) { + seenSegmentHashes.add(segmentHash) + return [ + { + file_path: filePath, + identifier, + type, + start_line: startLine, + end_line: endLine, + content, + segmentHash, + fileHash, + }, + ] + } + + return [] + } + + private parseMarkdownContent( + filePath: string, + content: string, + fileHash: string, + seenSegmentHashes: Set, + ): CodeBlock[] { + const lines = content.split("\n") + const markdownCaptures = parseMarkdown(content) || [] + + if (markdownCaptures.length === 0) { + // No headers found, process entire content + return this.processMarkdownSection(lines, filePath, fileHash, "markdown_content", seenSegmentHashes, 1) + } + + const results: CodeBlock[] = [] + let lastProcessedLine = 0 + + // Process content before the first header + if (markdownCaptures.length > 0) { + const firstHeaderLine = markdownCaptures[0].node.startPosition.row + if (firstHeaderLine > 0) { + const preHeaderLines = lines.slice(0, firstHeaderLine) + const preHeaderBlocks = this.processMarkdownSection( + preHeaderLines, + filePath, + fileHash, + "markdown_content", + seenSegmentHashes, + 1, + ) + results.push(...preHeaderBlocks) + } + } + + // Process markdown captures (headers and sections) + for (let i = 0; i < markdownCaptures.length; i += 2) { + const nameCapture = markdownCaptures[i] + // Ensure we don't go out of bounds when accessing the next capture + if (i + 1 >= markdownCaptures.length) break + const definitionCapture = markdownCaptures[i + 1] + + if (!definitionCapture) continue + + const startLine = definitionCapture.node.startPosition.row + 1 + const endLine = definitionCapture.node.endPosition.row + 1 + const sectionLines = lines.slice(startLine - 1, endLine) + + // Extract header level for type classification + const headerMatch = nameCapture.name.match(/\.h(\d)$/) + const headerLevel = headerMatch ? parseInt(headerMatch[1]) : 1 + const headerText = nameCapture.node.text + + const sectionBlocks = this.processMarkdownSection( + sectionLines, + filePath, + fileHash, + `markdown_header_h${headerLevel}`, + seenSegmentHashes, + startLine, + headerText, + ) + results.push(...sectionBlocks) + + lastProcessedLine = endLine + } + + // Process any remaining content after the last header section + if (lastProcessedLine < lines.length) { + const remainingLines = lines.slice(lastProcessedLine) + const remainingBlocks = this.processMarkdownSection( + remainingLines, + filePath, + fileHash, + "markdown_content", + seenSegmentHashes, + lastProcessedLine + 1, + ) + results.push(...remainingBlocks) + } + + return results + } } // Export a singleton instance for convenience diff --git a/src/services/code-index/processors/scanner.ts b/src/services/code-index/processors/scanner.ts index 40475636bc09..7b79a1c87d1a 100644 --- a/src/services/code-index/processors/scanner.ts +++ b/src/services/code-index/processors/scanner.ts @@ -4,6 +4,7 @@ import { RooIgnoreController } from "../../../core/ignore/RooIgnoreController" import { stat } from "fs/promises" import * as path from "path" import { generateNormalizedAbsolutePath, generateRelativeFilePath } from "../shared/get-relative-path" +import { getWorkspacePathForContext } from "../../../utils/path" import { scannerExtensions } from "../shared/supported-extensions" import * as vscode from "vscode" import { CodeBlock, ICodeParser, IEmbedder, IVectorStore, IDirectoryScanner } from "../interfaces" @@ -49,6 +50,9 @@ export class DirectoryScanner implements IDirectoryScanner { onFileParsed?: (fileBlockCount: number) => void, ): Promise<{ codeBlocks: CodeBlock[]; stats: { processed: number; skipped: number }; totalBlockCount: number }> { const directoryPath = directory + // Capture workspace context at scan start + const scanWorkspace = getWorkspacePathForContext(directoryPath) + // Get all files recursively (handles .gitignore automatically) const [allPaths, _] = await listFiles(directoryPath, true, MAX_LIST_FILES_LIMIT) @@ -66,7 +70,7 @@ export class DirectoryScanner implements IDirectoryScanner { // Filter by supported extensions, ignore patterns, and excluded directories const supportedPaths = allowedPaths.filter((filePath) => { const ext = path.extname(filePath).toLowerCase() - const relativeFilePath = generateRelativeFilePath(filePath) + const relativeFilePath = generateRelativeFilePath(filePath, scanWorkspace) // Check if file is in an ignored directory using the shared helper if (isPathInIgnoredDirectory(filePath)) { @@ -169,6 +173,7 @@ export class DirectoryScanner implements IDirectoryScanner { batchBlocks, batchTexts, batchFileInfos, + scanWorkspace, onError, onBlocksIndexed, ), @@ -185,12 +190,15 @@ export class DirectoryScanner implements IDirectoryScanner { await this.cacheManager.updateHash(filePath, currentFileHash) } } catch (error) { - console.error(`Error processing file ${filePath}:`, error) + console.error(`Error processing file ${filePath} in workspace ${scanWorkspace}:`, error) if (onError) { onError( error instanceof Error - ? error - : new Error(t("embeddings:scanner.unknownErrorProcessingFile", { filePath })), + ? new Error(`${error.message} (Workspace: ${scanWorkspace}, File: ${filePath})`) + : new Error( + t("embeddings:scanner.unknownErrorProcessingFile", { filePath }) + + ` (Workspace: ${scanWorkspace})`, + ), ) } } @@ -214,7 +222,7 @@ export class DirectoryScanner implements IDirectoryScanner { // Queue final batch processing const batchPromise = batchLimiter(() => - this.processBatch(batchBlocks, batchTexts, batchFileInfos, onError, onBlocksIndexed), + this.processBatch(batchBlocks, batchTexts, batchFileInfos, scanWorkspace, onError, onBlocksIndexed), ) activeBatchPromises.push(batchPromise) } finally { @@ -235,15 +243,20 @@ export class DirectoryScanner implements IDirectoryScanner { await this.qdrantClient.deletePointsByFilePath(cachedFilePath) await this.cacheManager.deleteHash(cachedFilePath) } catch (error) { - console.error(`[DirectoryScanner] Failed to delete points for ${cachedFilePath}:`, error) + console.error( + `[DirectoryScanner] Failed to delete points for ${cachedFilePath} in workspace ${scanWorkspace}:`, + error, + ) if (onError) { onError( error instanceof Error - ? error + ? new Error( + `${error.message} (Workspace: ${scanWorkspace}, File: ${cachedFilePath})`, + ) : new Error( t("embeddings:scanner.unknownErrorDeletingPoints", { filePath: cachedFilePath, - }), + }) + ` (Workspace: ${scanWorkspace})`, ), ) } @@ -267,6 +280,7 @@ export class DirectoryScanner implements IDirectoryScanner { batchBlocks: CodeBlock[], batchTexts: string[], batchFileInfos: { filePath: string; fileHash: string; isNew: boolean }[], + scanWorkspace: string, onError?: (error: Error) => void, onBlocksIndexed?: (indexedCount: number) => void, ): Promise { @@ -292,11 +306,14 @@ export class DirectoryScanner implements IDirectoryScanner { await this.qdrantClient.deletePointsByMultipleFilePaths(uniqueFilePaths) } catch (deleteError) { console.error( - `[DirectoryScanner] Failed to delete points for ${uniqueFilePaths.length} files before upsert:`, + `[DirectoryScanner] Failed to delete points for ${uniqueFilePaths.length} files before upsert in workspace ${scanWorkspace}:`, deleteError, ) - // Re-throw the error to stop processing this batch attempt - throw deleteError + // Re-throw the error with workspace context + throw new Error( + `Failed to delete points for ${uniqueFilePaths.length} files. Workspace: ${scanWorkspace}. ${deleteError instanceof Error ? deleteError.message : String(deleteError)}`, + { cause: deleteError }, + ) } } // --- End Deletion Step --- @@ -306,19 +323,20 @@ export class DirectoryScanner implements IDirectoryScanner { // Prepare points for Qdrant const points = batchBlocks.map((block, index) => { - const normalizedAbsolutePath = generateNormalizedAbsolutePath(block.file_path) + const normalizedAbsolutePath = generateNormalizedAbsolutePath(block.file_path, scanWorkspace) - const stableName = `${normalizedAbsolutePath}:${block.start_line}` - const pointId = uuidv5(stableName, QDRANT_CODE_BLOCK_NAMESPACE) + // Use segmentHash for unique ID generation to handle multiple segments from same line + const pointId = uuidv5(block.segmentHash, QDRANT_CODE_BLOCK_NAMESPACE) return { id: pointId, vector: embeddings[index], payload: { - filePath: generateRelativeFilePath(normalizedAbsolutePath), + filePath: generateRelativeFilePath(normalizedAbsolutePath, scanWorkspace), codeChunk: block.content, startLine: block.start_line, endLine: block.end_line, + segmentHash: block.segmentHash, }, } }) @@ -334,7 +352,10 @@ export class DirectoryScanner implements IDirectoryScanner { success = true } catch (error) { lastError = error as Error - console.error(`[DirectoryScanner] Error processing batch (attempt ${attempts}):`, error) + console.error( + `[DirectoryScanner] Error processing batch (attempt ${attempts}) in workspace ${scanWorkspace}:`, + error, + ) if (attempts < MAX_BATCH_RETRIES) { const delay = INITIAL_RETRY_DELAY_MS * Math.pow(2, attempts - 1) diff --git a/src/services/code-index/search-service.ts b/src/services/code-index/search-service.ts index acf6afbf7e6f..6370ec409419 100644 --- a/src/services/code-index/search-service.ts +++ b/src/services/code-index/search-service.ts @@ -30,6 +30,7 @@ export class CodeIndexSearchService { } const minScore = this.configManager.currentSearchMinScore + const maxResults = this.configManager.currentSearchMaxResults const currentState = this.stateManager.getCurrentStatus().systemStatus if (currentState !== "Indexed" && currentState !== "Indexing") { @@ -52,7 +53,7 @@ export class CodeIndexSearchService { } // Perform search - const results = await this.vectorStore.search(vector, normalizedPrefix, minScore) + const results = await this.vectorStore.search(vector, normalizedPrefix, minScore, maxResults) return results } catch (error) { console.error("[CodeIndexSearchService] Error during search:", error) diff --git a/src/services/code-index/service-factory.ts b/src/services/code-index/service-factory.ts index afd083b20476..a9c84481a630 100644 --- a/src/services/code-index/service-factory.ts +++ b/src/services/code-index/service-factory.ts @@ -2,6 +2,7 @@ import * as vscode from "vscode" import { OpenAiEmbedder } from "./embedders/openai" import { CodeIndexOllamaEmbedder } from "./embedders/ollama" import { OpenAICompatibleEmbedder } from "./embedders/openai-compatible" +import { GeminiEmbedder } from "./embedders/gemini" import { EmbedderProvider, getDefaultModelId, getModelDimension } from "../../shared/embeddingModels" import { QdrantVectorStore } from "./vector-store/qdrant-client" import { codeParser, DirectoryScanner, FileWatcher } from "./processors" @@ -29,7 +30,9 @@ export class CodeIndexServiceFactory { const provider = config.embedderProvider as EmbedderProvider if (provider === "openai") { - if (!config.openAiOptions?.openAiNativeApiKey) { + const apiKey = config.openAiOptions?.openAiNativeApiKey + + if (!apiKey) { throw new Error("OpenAI configuration missing for embedder creation") } return new OpenAiEmbedder({ @@ -53,11 +56,33 @@ export class CodeIndexServiceFactory { config.openAiCompatibleOptions.apiKey, config.modelId, ) + } else if (provider === "gemini") { + if (!config.geminiOptions?.apiKey) { + throw new Error("Gemini configuration missing for embedder creation") + } + return new GeminiEmbedder(config.geminiOptions.apiKey) } throw new Error(`Invalid embedder type configured: ${config.embedderProvider}`) } + /** + * Validates an embedder instance to ensure it's properly configured. + * @param embedder The embedder instance to validate + * @returns Promise resolving to validation result + */ + public async validateEmbedder(embedder: IEmbedder): Promise<{ valid: boolean; error?: string }> { + try { + return await embedder.validateConfiguration() + } catch (error) { + // If validation throws an exception, preserve the original error message + return { + valid: false, + error: error instanceof Error ? error.message : "embeddings:validation.configurationError", + } + } + } + /** * Creates a vector store instance using the current configuration. */ @@ -78,6 +103,9 @@ export class CodeIndexServiceFactory { // Fallback if not provided or invalid in openAiCompatibleOptions vectorSize = getModelDimension(provider, modelId) } + } else if (provider === "gemini") { + // Gemini's text-embedding-004 has a fixed dimension of 768 + vectorSize = 768 } else { vectorSize = getModelDimension(provider, modelId) } diff --git a/src/services/code-index/shared/__tests__/get-relative-path.spec.ts b/src/services/code-index/shared/__tests__/get-relative-path.spec.ts new file mode 100644 index 000000000000..5226fbf6e6c8 --- /dev/null +++ b/src/services/code-index/shared/__tests__/get-relative-path.spec.ts @@ -0,0 +1,64 @@ +import { describe, it, expect } from "vitest" +import path from "path" +import { generateNormalizedAbsolutePath, generateRelativeFilePath } from "../get-relative-path" + +describe("get-relative-path", () => { + describe("generateNormalizedAbsolutePath", () => { + it("should use provided workspace root", () => { + const filePath = "src/file.ts" + const workspaceRoot = path.join(path.sep, "custom", "workspace") + const result = generateNormalizedAbsolutePath(filePath, workspaceRoot) + // On Windows, path.resolve adds the drive letter, so we need to use path.resolve for the expected value + expect(result).toBe(path.resolve(workspaceRoot, filePath)) + }) + + it("should handle absolute paths", () => { + const filePath = path.join(path.sep, "absolute", "path", "file.ts") + const workspaceRoot = path.join(path.sep, "custom", "workspace") + const result = generateNormalizedAbsolutePath(filePath, workspaceRoot) + // When an absolute path is provided, it should be resolved to include drive letter on Windows + expect(result).toBe(path.resolve(filePath)) + }) + + it("should normalize paths with . and .. segments", () => { + const filePath = "./src/../src/file.ts" + const workspaceRoot = path.join(path.sep, "custom", "workspace") + const result = generateNormalizedAbsolutePath(filePath, workspaceRoot) + // Use path.resolve to get the expected normalized absolute path + expect(result).toBe(path.resolve(workspaceRoot, "src", "file.ts")) + }) + }) + + describe("generateRelativeFilePath", () => { + it("should use provided workspace root", () => { + const workspaceRoot = path.join(path.sep, "custom", "workspace") + const absolutePath = path.join(workspaceRoot, "src", "file.ts") + const result = generateRelativeFilePath(absolutePath, workspaceRoot) + expect(result).toBe(path.join("src", "file.ts")) + }) + + it("should handle paths outside workspace", () => { + const absolutePath = path.join(path.sep, "outside", "workspace", "file.ts") + const workspaceRoot = path.join(path.sep, "custom", "workspace") + const result = generateRelativeFilePath(absolutePath, workspaceRoot) + // The result will have .. segments to navigate outside + expect(result).toContain("..") + }) + + it("should handle same path as workspace", () => { + const workspaceRoot = path.join(path.sep, "custom", "workspace") + const absolutePath = workspaceRoot + const result = generateRelativeFilePath(absolutePath, workspaceRoot) + expect(result).toBe(".") + }) + + it("should handle multi-workspace scenarios", () => { + // Simulate the error scenario from the issue + const workspaceRoot = path.join(path.sep, "Users", "test", "project") + const absolutePath = path.join(path.sep, "Users", "test", "admin", ".prettierrc.json") + const result = generateRelativeFilePath(absolutePath, workspaceRoot) + // Should generate a valid relative path, not throw an error + expect(result).toBe(path.join("..", "admin", ".prettierrc.json")) + }) + }) +}) diff --git a/src/services/code-index/shared/get-relative-path.ts b/src/services/code-index/shared/get-relative-path.ts index 564afee955f6..642a20720869 100644 --- a/src/services/code-index/shared/get-relative-path.ts +++ b/src/services/code-index/shared/get-relative-path.ts @@ -1,16 +1,14 @@ import path from "path" -import { getWorkspacePath } from "../../../utils/path" /** * Generates a normalized absolute path from a given file path and workspace root. * Handles path resolution and normalization to ensure consistent absolute paths. * * @param filePath - The file path to normalize (can be relative or absolute) - * @param workspaceRoot - The root directory of the workspace + * @param workspaceRoot - The root directory of the workspace (required) * @returns The normalized absolute path */ -export function generateNormalizedAbsolutePath(filePath: string): string { - const workspaceRoot = getWorkspacePath() +export function generateNormalizedAbsolutePath(filePath: string, workspaceRoot: string): string { // Resolve the path to make it absolute if it's relative const resolvedPath = path.resolve(workspaceRoot, filePath) // Normalize to handle any . or .. segments and duplicate slashes @@ -22,11 +20,10 @@ export function generateNormalizedAbsolutePath(filePath: string): string { * Ensures consistent relative path generation across different platforms. * * @param normalizedAbsolutePath - The normalized absolute path to convert - * @param workspaceRoot - The root directory of the workspace + * @param workspaceRoot - The root directory of the workspace (required) * @returns The relative path from workspaceRoot to the file */ -export function generateRelativeFilePath(normalizedAbsolutePath: string): string { - const workspaceRoot = getWorkspacePath() +export function generateRelativeFilePath(normalizedAbsolutePath: string, workspaceRoot: string): string { // Generate the relative path const relativePath = path.relative(workspaceRoot, normalizedAbsolutePath) // Normalize to ensure consistent path separators diff --git a/src/services/code-index/shared/supported-extensions.ts b/src/services/code-index/shared/supported-extensions.ts index 91e3d29c83f3..a5205631a674 100644 --- a/src/services/code-index/shared/supported-extensions.ts +++ b/src/services/code-index/shared/supported-extensions.ts @@ -1,4 +1,4 @@ import { extensions as allExtensions } from "../../tree-sitter" -// Filter out markdown extensions for the scanner -export const scannerExtensions = allExtensions.filter((ext) => ext !== ".md" && ext !== ".markdown") +// Include all extensions including markdown for the scanner +export const scannerExtensions = allExtensions diff --git a/src/services/code-index/shared/validation-helpers.ts b/src/services/code-index/shared/validation-helpers.ts new file mode 100644 index 000000000000..c210c8ec1751 --- /dev/null +++ b/src/services/code-index/shared/validation-helpers.ts @@ -0,0 +1,187 @@ +import { t } from "../../../i18n" +import { serializeError } from "serialize-error" + +/** + * HTTP error interface for embedder errors + */ +export interface HttpError extends Error { + status?: number + response?: { + status?: number + } +} + +/** + * Common error types that can occur during embedder validation + */ +export interface ValidationError { + status?: number + message?: string + name?: string + code?: string +} + +/** + * Maps HTTP status codes to appropriate error messages + */ +export function getErrorMessageForStatus(status: number | undefined, embedderType: string): string | undefined { + switch (status) { + case 401: + case 403: + return "embeddings:validation.authenticationFailed" + case 404: + return embedderType === "openai" + ? "embeddings:validation.modelNotAvailable" + : "embeddings:validation.invalidEndpoint" + case 429: + return "embeddings:validation.serviceUnavailable" + default: + if (status && status >= 400 && status < 600) { + return "embeddings:validation.configurationError" + } + return undefined + } +} + +/** + * Extracts status code from various error formats + */ +export function extractStatusCode(error: any): number | undefined { + // Direct status property + if (error?.status) return error.status + + // Response status property + if (error?.response?.status) return error.response.status + + // Extract from error message (e.g., "HTTP 404: Not Found") + if (error?.message) { + const match = error.message.match(/HTTP (\d+):/) + if (match) { + return parseInt(match[1], 10) + } + } + + // Use serialize-error as fallback for complex objects + const serialized = serializeError(error) + if (serialized?.status) return serialized.status + if (serialized?.response?.status) return serialized.response.status + + return undefined +} + +/** + * Extracts error message from various error formats + */ +export function extractErrorMessage(error: any): string { + if (error?.message) { + return error.message + } + + if (typeof error === "string") { + return error + } + + if (error && typeof error === "object" && "toString" in error) { + try { + return String(error) + } catch { + return "Unknown error" + } + } + + // Use serialize-error as fallback for complex objects + const serialized = serializeError(error) + if (serialized?.message) { + return serialized.message + } + + return "Unknown error" +} + +/** + * Standard validation error handler for embedder configuration validation + * Returns a consistent error response based on the error type + */ +export function handleValidationError( + error: any, + embedderType: string, + customHandlers?: { + beforeStandardHandling?: (error: any) => { valid: boolean; error: string } | undefined + }, +): { valid: boolean; error: string } { + // Serialize the error to ensure we have access to all properties + const serializedError = serializeError(error) + + // Allow custom handling first (pass original error for backward compatibility) + if (customHandlers?.beforeStandardHandling) { + const customResult = customHandlers.beforeStandardHandling(error) + if (customResult) return customResult + } + + // Extract status code and error message from serialized error + const statusCode = extractStatusCode(serializedError) + const errorMessage = extractErrorMessage(serializedError) + + // Check for status-based errors first + const statusError = getErrorMessageForStatus(statusCode, embedderType) + if (statusError) { + return { valid: false, error: statusError } + } + + // Check for connection errors + if (errorMessage) { + if ( + errorMessage.includes("ENOTFOUND") || + errorMessage.includes("ECONNREFUSED") || + errorMessage.includes("ETIMEDOUT") || + errorMessage === "AbortError" || + errorMessage.includes("HTTP 0:") || + errorMessage === "No response" + ) { + return { valid: false, error: "embeddings:validation.connectionFailed" } + } + + if (errorMessage.includes("Failed to parse response JSON")) { + return { valid: false, error: "embeddings:validation.invalidResponse" } + } + } + + // For generic errors, preserve the original error message if it's not a standard one + if (errorMessage && errorMessage !== "Unknown error") { + return { valid: false, error: errorMessage } + } + + // Fallback to generic error + return { valid: false, error: "embeddings:validation.configurationError" } +} + +/** + * Wraps an async validation function with standard error handling + */ +export async function withValidationErrorHandling( + validationFn: () => Promise, + embedderType: string, + customHandlers?: Parameters[2], +): Promise<{ valid: boolean; error?: string }> { + try { + return await validationFn() + } catch (error) { + return handleValidationError(error, embedderType, customHandlers) + } +} + +/** + * Formats an embedding error message based on the error type and context + */ +export function formatEmbeddingError(error: any, maxRetries: number): Error { + const errorMessage = extractErrorMessage(error) + const statusCode = extractStatusCode(error) + + if (statusCode === 401) { + return new Error(t("embeddings:authenticationFailed")) + } else if (statusCode) { + return new Error(t("embeddings:failedWithStatus", { attempts: maxRetries, statusCode, errorMessage })) + } else { + return new Error(t("embeddings:failedWithError", { attempts: maxRetries, errorMessage })) + } +} diff --git a/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts b/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts index 7ed9afb179be..bc4381edf28c 100644 --- a/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts +++ b/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts @@ -3,7 +3,7 @@ import { createHash } from "crypto" import { QdrantVectorStore } from "../qdrant-client" import { getWorkspacePath } from "../../../../utils/path" -import { MAX_SEARCH_RESULTS, SEARCH_MIN_SCORE } from "../../constants" +import { DEFAULT_MAX_SEARCH_RESULTS, DEFAULT_SEARCH_MIN_SCORE } from "../../constants" // Mocks vitest.mock("@qdrant/js-client-rest") @@ -1005,8 +1005,8 @@ describe("QdrantVectorStore", () => { expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { query: queryVector, filter: undefined, - score_threshold: SEARCH_MIN_SCORE, - limit: MAX_SEARCH_RESULTS, + score_threshold: DEFAULT_SEARCH_MIN_SCORE, + limit: DEFAULT_MAX_SEARCH_RESULTS, params: { hnsw_ef: 128, exact: false, @@ -1056,8 +1056,8 @@ describe("QdrantVectorStore", () => { }, ], }, - score_threshold: SEARCH_MIN_SCORE, - limit: MAX_SEARCH_RESULTS, + score_threshold: DEFAULT_SEARCH_MIN_SCORE, + limit: DEFAULT_MAX_SEARCH_RESULTS, params: { hnsw_ef: 128, exact: false, @@ -1083,7 +1083,31 @@ describe("QdrantVectorStore", () => { query: queryVector, filter: undefined, score_threshold: customMinScore, - limit: MAX_SEARCH_RESULTS, + limit: DEFAULT_MAX_SEARCH_RESULTS, + params: { + hnsw_ef: 128, + exact: false, + }, + with_payload: { + include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], + }, + }) + }) + + it("should use custom maxResults when provided", async () => { + const queryVector = [0.1, 0.2, 0.3] + const customMaxResults = 100 + const mockQdrantResults = { points: [] } + + mockQdrantClientInstance.query.mockResolvedValue(mockQdrantResults) + + await vectorStore.search(queryVector, undefined, undefined, customMaxResults) + + expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + query: queryVector, + filter: undefined, + score_threshold: DEFAULT_SEARCH_MIN_SCORE, + limit: customMaxResults, params: { hnsw_ef: 128, exact: false, @@ -1229,8 +1253,8 @@ describe("QdrantVectorStore", () => { }, ], }, - score_threshold: SEARCH_MIN_SCORE, - limit: MAX_SEARCH_RESULTS, + score_threshold: DEFAULT_SEARCH_MIN_SCORE, + limit: DEFAULT_MAX_SEARCH_RESULTS, params: { hnsw_ef: 128, exact: false, @@ -1254,7 +1278,7 @@ describe("QdrantVectorStore", () => { ;(console.error as any).mockRestore() }) - it("should use constants MAX_SEARCH_RESULTS and SEARCH_MIN_SCORE correctly", async () => { + it("should use constants DEFAULT_MAX_SEARCH_RESULTS and DEFAULT_SEARCH_MIN_SCORE correctly", async () => { const queryVector = [0.1, 0.2, 0.3] const mockQdrantResults = { points: [] } @@ -1263,8 +1287,8 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector) const callArgs = mockQdrantClientInstance.query.mock.calls[0][1] - expect(callArgs.limit).toBe(MAX_SEARCH_RESULTS) - expect(callArgs.score_threshold).toBe(SEARCH_MIN_SCORE) + expect(callArgs.limit).toBe(DEFAULT_MAX_SEARCH_RESULTS) + expect(callArgs.score_threshold).toBe(DEFAULT_SEARCH_MIN_SCORE) }) }) }) diff --git a/src/services/code-index/vector-store/qdrant-client.ts b/src/services/code-index/vector-store/qdrant-client.ts index c0f19af28b78..c8883959d55e 100644 --- a/src/services/code-index/vector-store/qdrant-client.ts +++ b/src/services/code-index/vector-store/qdrant-client.ts @@ -4,7 +4,7 @@ import * as path from "path" import { getWorkspacePath } from "../../../utils/path" import { IVectorStore } from "../interfaces/vector-store" import { Payload, VectorStoreSearchResult } from "../interfaces" -import { MAX_SEARCH_RESULTS, SEARCH_MIN_SCORE } from "../constants" +import { DEFAULT_MAX_SEARCH_RESULTS, DEFAULT_SEARCH_MIN_SCORE } from "../constants" import { t } from "../../../i18n" /** @@ -271,13 +271,16 @@ export class QdrantVectorStore implements IVectorStore { /** * Searches for similar vectors * @param queryVector Vector to search for - * @param limit Maximum number of results to return + * @param directoryPrefix Optional directory prefix to filter results + * @param minScore Optional minimum score threshold + * @param maxResults Optional maximum number of results to return * @returns Promise resolving to search results */ async search( queryVector: number[], directoryPrefix?: string, minScore?: number, + maxResults?: number, ): Promise { try { let filter = undefined @@ -296,8 +299,8 @@ export class QdrantVectorStore implements IVectorStore { const searchRequest = { query: queryVector, filter, - score_threshold: SEARCH_MIN_SCORE, - limit: MAX_SEARCH_RESULTS, + score_threshold: minScore ?? DEFAULT_SEARCH_MIN_SCORE, + limit: maxResults ?? DEFAULT_MAX_SEARCH_RESULTS, params: { hnsw_ef: 128, exact: false, @@ -307,10 +310,6 @@ export class QdrantVectorStore implements IVectorStore { }, } - if (minScore !== undefined) { - searchRequest.score_threshold = minScore - } - const operationResult = await this.client.query(this.collectionName, searchRequest) const filteredPoints = operationResult.points.filter((p) => this.isPayloadValid(p.payload)) diff --git a/src/services/glob/__mocks__/list-files.ts b/src/services/glob/__mocks__/list-files.ts index 945452fed117..d798762691a4 100644 --- a/src/services/glob/__mocks__/list-files.ts +++ b/src/services/glob/__mocks__/list-files.ts @@ -30,7 +30,12 @@ const mockResolve = (dirPath: string): string => { * @param limit - Maximum number of files to return * @returns Promise resolving to [file paths, limit reached flag] */ -export const listFiles = vi.fn((dirPath: string, _recursive: boolean, _limit: number) => { +export const listFiles = vi.fn((dirPath: string, _recursive: boolean, limit: number) => { + // Early return for limit of 0 - matches the actual implementation + if (limit === 0) { + return Promise.resolve([[], false]) + } + // Special case: Root or home directories // Prevents tests from trying to list all files in these directories if (dirPath === "/" || dirPath === "/root" || dirPath === "/home/user") { diff --git a/src/services/glob/__tests__/list-files.spec.ts b/src/services/glob/__tests__/list-files.spec.ts index 7797e4072672..2a1542483076 100644 --- a/src/services/glob/__tests__/list-files.spec.ts +++ b/src/services/glob/__tests__/list-files.spec.ts @@ -1,123 +1,18 @@ -import { vi, describe, it, expect, beforeEach } from "vitest" -import * as path from "path" - -// Mock ripgrep to avoid filesystem dependencies -vi.mock("../../ripgrep", () => ({ - getBinPath: vi.fn().mockResolvedValue("/mock/path/to/rg"), -})) - -// Mock vscode -vi.mock("vscode", () => ({ - env: { - appRoot: "/mock/app/root", - }, -})) - -// Mock filesystem operations -vi.mock("fs", () => ({ - promises: { - access: vi.fn().mockRejectedValue(new Error("Not found")), - readFile: vi.fn().mockResolvedValue(""), - readdir: vi.fn().mockResolvedValue([]), - }, -})) - -vi.mock("child_process", () => ({ - spawn: vi.fn(), -})) - -vi.mock("../../path", () => ({ - arePathsEqual: vi.fn().mockReturnValue(false), -})) - +import { describe, it, expect, vi } from "vitest" import { listFiles } from "../list-files" -import * as childProcess from "child_process" - -describe("list-files symlink support", () => { - beforeEach(() => { - vi.clearAllMocks() - }) - - it("should include --follow flag in ripgrep arguments", async () => { - const mockSpawn = vi.mocked(childProcess.spawn) - const mockProcess = { - stdout: { - on: vi.fn((event, callback) => { - if (event === "data") { - // Simulate some output to complete the process - setTimeout(() => callback("test-file.txt\n"), 10) - } - }), - }, - stderr: { - on: vi.fn(), - }, - on: vi.fn((event, callback) => { - if (event === "close") { - setTimeout(() => callback(0), 20) - } - if (event === "error") { - // No error simulation - } - }), - kill: vi.fn(), - } - mockSpawn.mockReturnValue(mockProcess as any) - - // Call listFiles to trigger ripgrep execution - await listFiles("/test/dir", false, 100) - - // Verify that spawn was called with --follow flag (the critical fix) - const [rgPath, args] = mockSpawn.mock.calls[0] - expect(rgPath).toBe("/mock/path/to/rg") - expect(args).toContain("--files") - expect(args).toContain("--hidden") - expect(args).toContain("--follow") // This is the critical assertion - the fix should add this flag - - // Platform-agnostic path check - verify the last argument is the resolved path - const expectedPath = path.resolve("/test/dir") - expect(args[args.length - 1]).toBe(expectedPath) - }) - - it("should include --follow flag for recursive listings too", async () => { - const mockSpawn = vi.mocked(childProcess.spawn) - const mockProcess = { - stdout: { - on: vi.fn((event, callback) => { - if (event === "data") { - setTimeout(() => callback("test-file.txt\n"), 10) - } - }), - }, - stderr: { - on: vi.fn(), - }, - on: vi.fn((event, callback) => { - if (event === "close") { - setTimeout(() => callback(0), 20) - } - if (event === "error") { - // No error simulation - } - }), - kill: vi.fn(), - } - - mockSpawn.mockReturnValue(mockProcess as any) - - // Call listFiles with recursive=true - await listFiles("/test/dir", true, 100) +vi.mock("../list-files", async () => { + const actual = await vi.importActual("../list-files") + return { + ...actual, + handleSpecialDirectories: vi.fn(), + } +}) - // Verify that spawn was called with --follow flag (the critical fix) - const [rgPath, args] = mockSpawn.mock.calls[0] - expect(rgPath).toBe("/mock/path/to/rg") - expect(args).toContain("--files") - expect(args).toContain("--hidden") - expect(args).toContain("--follow") // This should be present in recursive mode too +describe("listFiles", () => { + it("should return empty array immediately when limit is 0", async () => { + const result = await listFiles("/test/path", true, 0) - // Platform-agnostic path check - verify the last argument is the resolved path - const expectedPath = path.resolve("/test/dir") - expect(args[args.length - 1]).toBe(expectedPath) + expect(result).toEqual([[], false]) }) }) diff --git a/src/services/glob/list-files.ts b/src/services/glob/list-files.ts index 0dc2a06f1edc..3fb1b2e154c6 100644 --- a/src/services/glob/list-files.ts +++ b/src/services/glob/list-files.ts @@ -16,6 +16,11 @@ import { DIRS_TO_IGNORE } from "./constants" * @returns Tuple of [file paths array, whether the limit was reached] */ export async function listFiles(dirPath: string, recursive: boolean, limit: number): Promise<[string[], boolean]> { + // Early return for limit of 0 - no need to scan anything + if (limit === 0) { + return [[], false] + } + // Handle special directories const specialResult = await handleSpecialDirectories(dirPath) diff --git a/src/services/tree-sitter/__tests__/inspectGo.spec.ts b/src/services/tree-sitter/__tests__/inspectGo.spec.ts index 61f70cbd244f..b4d6f451d820 100644 --- a/src/services/tree-sitter/__tests__/inspectGo.spec.ts +++ b/src/services/tree-sitter/__tests__/inspectGo.spec.ts @@ -20,4 +20,46 @@ describe("Go Tree-sitter Parser", () => { const result = await testParseSourceCodeDefinitions("file.go", sampleGoContent, testOptions) expect(result).toBeDefined() }) + + // Test 3: Verify no duplicate captures for Go constructs + it("should not create duplicate captures for Go constructs", async () => { + const testOptions = { + language: "go", + wasmFile: "tree-sitter-go.wasm", + queryString: goQuery, + extKey: "go", + } + + const result = await testParseSourceCodeDefinitions("file.go", sampleGoContent, testOptions) + + // Check that we have results + expect(result).toBeDefined() + expect(typeof result).toBe("string") + expect(result!.length).toBeGreaterThan(0) + + // Parse the result to extract line ranges + const lines = result!.split("\n").filter((line) => line.trim() && !line.startsWith("#")) + + // Extract line ranges from the format "startLine--endLine | content" + const lineRanges = lines + .map((line) => { + const match = line.match(/^(\d+)--(\d+)/) + return match ? `${match[1]}-${match[2]}` : null + }) + .filter(Boolean) + + // Check for duplicate line ranges (which was the original problem) + const uniqueLineRanges = [...new Set(lineRanges)] + expect(lineRanges.length).toBe(uniqueLineRanges.length) + + // With the new query that captures full declarations, we expect the entire file + // to be captured as a single block containing all the declarations + expect(lines.length).toBeGreaterThan(0) + + // The line range should cover the entire sample file content + expect(lineRanges[0]).toBe("2-126") + + // The captured content should start with the package declaration + expect(result).toContain("// Package declaration test") + }) }) diff --git a/src/services/tree-sitter/__tests__/markdownIntegration.spec.ts b/src/services/tree-sitter/__tests__/markdownIntegration.spec.ts index de9f1eb1395e..9ada01a078fe 100644 --- a/src/services/tree-sitter/__tests__/markdownIntegration.spec.ts +++ b/src/services/tree-sitter/__tests__/markdownIntegration.spec.ts @@ -20,8 +20,9 @@ describe("Markdown Integration Tests", () => { vi.clearAllMocks() }) - it("should parse markdown files and extract headers", async () => { - // Mock markdown content + it("should parse markdown files and extract headers for definition listing", async () => { + // This test verifies that the tree-sitter integration correctly + // formats markdown headers for the definition listing feature const markdownContent = "# Main Header\n\nThis is some content under the main header.\nIt spans multiple lines to meet the minimum section length.\n\n## Section 1\n\nThis is content for section 1.\nIt also spans multiple lines.\n\n### Subsection 1.1\n\nThis is a subsection with enough lines\nto meet the minimum section length requirement.\n\n## Section 2\n\nFinal section content.\nWith multiple lines.\n" @@ -34,7 +35,7 @@ describe("Markdown Integration Tests", () => { // Verify fs.readFile was called with the correct path expect(fs.readFile).toHaveBeenCalledWith("test.md", "utf8") - // Check the result + // Check the result formatting for definition listing expect(result).toBeDefined() expect(result).toContain("# test.md") expect(result).toContain("1--5 | # Main Header") @@ -43,8 +44,8 @@ describe("Markdown Integration Tests", () => { expect(result).toContain("16--20 | ## Section 2") }) - it("should handle markdown files with no headers", async () => { - // Mock markdown content with no headers + it("should return undefined for markdown files with no extractable definitions", async () => { + // This test verifies behavior when no headers meet the minimum requirements const markdownContent = "This is just some text.\nNo headers here.\nJust plain text." // Mock fs.readFile to return our markdown content @@ -56,45 +57,7 @@ describe("Markdown Integration Tests", () => { // Verify fs.readFile was called with the correct path expect(fs.readFile).toHaveBeenCalledWith("no-headers.md", "utf8") - // Check the result + // Check the result - should be undefined since no definitions found expect(result).toBeUndefined() }) - - it("should handle markdown files with headers that don't meet minimum section length", async () => { - // Mock markdown content with headers but short sections - const markdownContent = "# Header 1\nShort section\n\n# Header 2\nAnother short section" - - // Mock fs.readFile to return our markdown content - ;(fs.readFile as Mock).mockImplementation(() => Promise.resolve(markdownContent)) - - // Call the function with a markdown file path - const result = await parseSourceCodeDefinitionsForFile("short-sections.md") - - // Verify fs.readFile was called with the correct path - expect(fs.readFile).toHaveBeenCalledWith("short-sections.md", "utf8") - - // Check the result - should be undefined since no sections meet the minimum length - expect(result).toBeUndefined() - }) - - it("should handle markdown files with mixed header styles", async () => { - // Mock markdown content with mixed header styles - const markdownContent = - "# ATX Header\nThis is content under an ATX header.\nIt spans multiple lines to meet the minimum section length.\n\nSetext Header\n============\nThis is content under a setext header.\nIt also spans multiple lines to meet the minimum section length.\n" - - // Mock fs.readFile to return our markdown content - ;(fs.readFile as Mock).mockImplementation(() => Promise.resolve(markdownContent)) - - // Call the function with a markdown file path - const result = await parseSourceCodeDefinitionsForFile("mixed-headers.md") - - // Verify fs.readFile was called with the correct path - expect(fs.readFile).toHaveBeenCalledWith("mixed-headers.md", "utf8") - - // Check the result - expect(result).toBeDefined() - expect(result).toContain("# mixed-headers.md") - expect(result).toContain("1--4 | # ATX Header") - expect(result).toContain("5--9 | Setext Header") - }) }) diff --git a/src/services/tree-sitter/__tests__/parseSourceCodeDefinitions.go.spec.ts b/src/services/tree-sitter/__tests__/parseSourceCodeDefinitions.go.spec.ts index d176c755d174..17f3b37ce7e9 100644 --- a/src/services/tree-sitter/__tests__/parseSourceCodeDefinitions.go.spec.ts +++ b/src/services/tree-sitter/__tests__/parseSourceCodeDefinitions.go.spec.ts @@ -37,59 +37,21 @@ describe("Go Source Code Definition Tests", () => { parseResult = result as string }) - it("should parse package declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*package main/) + it("should capture the entire Go file as a single block", () => { + // With the universal 50-character threshold, the entire file is captured as one block + expect(parseResult).toMatch(/2--126 \| \/\/ Package declaration test/) }) - it("should parse import declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*"fmt"/) - expect(parseResult).toMatch(/\d+--\d+ \|\s*"sync"/) - expect(parseResult).toMatch(/\d+--\d+ \|\s*"time"/) + it("should contain package declaration in the captured content", () => { + // The captured block should contain the package declaration + expect(parseResult).toContain("# file.go") + expect(parseResult).toContain("2--126") }) - it("should parse const declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*TestConstDefinition1 = "test1"/) - expect(parseResult).toMatch(/\d+--\d+ \|\s*TestConstDefinition2 = "test2"/) - }) - - it("should parse var declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*TestVarDefinition1 string = "var1"/) - expect(parseResult).toMatch(/\d+--\d+ \|\s*TestVarDefinition2 int\s*= 42/) - }) - - it("should parse interface declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*type TestInterfaceDefinition interface/) - }) - - it("should parse struct declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*type TestStructDefinition struct/) - }) - - it("should parse type declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*type TestTypeDefinition struct/) - }) - - it("should parse function declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*func TestFunctionDefinition\(/) - }) - - it("should parse method declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*func \(t \*TestStructDefinition\) TestMethodDefinition\(/) - }) - - it("should parse channel function declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*func TestChannelDefinition\(/) - }) - - it("should parse goroutine function declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*func TestGoroutineDefinition\(\)/) - }) - - it("should parse defer function declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*func TestDeferDefinition\(\)/) - }) - - it("should parse select function declarations", () => { - expect(parseResult).toMatch(/\d+--\d+ \|\s*func TestSelectDefinition\(/) + it("should not have duplicate captures", () => { + // Should only have one capture for the entire file + const lineRanges = parseResult.match(/\d+--\d+ \|/g) + expect(lineRanges).toBeDefined() + expect(lineRanges!.length).toBe(1) }) }) diff --git a/src/services/tree-sitter/queries/go.ts b/src/services/tree-sitter/queries/go.ts index 9ce82eb32174..3a80fdeb10a8 100644 --- a/src/services/tree-sitter/queries/go.ts +++ b/src/services/tree-sitter/queries/go.ts @@ -1,58 +1,26 @@ /* Go Tree-Sitter Query Patterns +Updated to capture full declarations instead of just identifiers */ export default ` -; Package declarations -(package_clause - (package_identifier) @name.definition.package) +; Function declarations - capture the entire declaration +(function_declaration) @name.definition.function -; Import declarations -(import_declaration - (import_spec_list - (import_spec path: (_) @name.definition.import))) +; Method declarations - capture the entire declaration +(method_declaration) @name.definition.method -; Const declarations -(const_declaration - (const_spec name: (identifier) @name.definition.const)) +; Type declarations (interfaces, structs, type aliases) - capture the entire declaration +(type_declaration) @name.definition.type -; Var declarations -(var_declaration - (var_spec name: (identifier) @name.definition.var)) +; Variable declarations - capture the entire declaration +(var_declaration) @name.definition.var -; Interface declarations -(type_declaration - (type_spec - name: (type_identifier) @name.definition.interface - type: (interface_type))) +; Constant declarations - capture the entire declaration +(const_declaration) @name.definition.const -; Struct declarations -(type_declaration - (type_spec - name: (type_identifier) @name.definition.struct - type: (struct_type))) +; Package clause +(package_clause) @name.definition.package -; Type declarations -(type_declaration - (type_spec - name: (type_identifier) @name.definition.type)) - -; Function declarations -(function_declaration - name: (identifier) @name.definition.function) - -; Method declarations -(method_declaration - name: (field_identifier) @name.definition.method) - -; Channel operations -(channel_type) @name.definition.channel - -; Goroutine declarations -(go_statement) @name.definition.goroutine - -; Defer statements -(defer_statement) @name.definition.defer - -; Select statements -(select_statement) @name.definition.select +; Import declarations - capture the entire import block +(import_declaration) @name.definition.import ` diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index 73ebf59d4c8f..cc93b9246729 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -73,6 +73,10 @@ export interface ExtensionMessage { | "autoApprovalEnabled" | "updateCustomMode" | "deleteCustomMode" + | "exportModeResult" + | "importModeResult" + | "checkRulesDirectoryResult" + | "deleteCustomModeCheck" | "currentCheckpointUpdated" | "showHumanRelayDialog" | "humanRelayResponse" @@ -99,6 +103,9 @@ export interface ExtensionMessage { | "marketplaceInstallResult" | "marketplaceData" | "shareTaskSuccess" + | "codeIndexSettingsSaved" + | "codeIndexSecretStatus" + | "mermaidFixResponse" text?: string payload?: any // Add a generic payload for now, can refine later action?: @@ -141,6 +148,7 @@ export interface ExtensionMessage { error?: string setting?: string value?: any + hasContent?: boolean // For checkRulesDirectoryResult items?: MarketplaceItem[] userInfo?: CloudUserInfo organizationAllowList?: OrganizationAllowList @@ -148,6 +156,9 @@ export interface ExtensionMessage { marketplaceItems?: MarketplaceItem[] marketplaceInstalledMetadata?: MarketplaceInstalledMetadata visibility?: ShareVisibility + rulesFolderPath?: string + settings?: any + fixedCode?: string | null // For mermaidFixResponse } export type ExtensionState = Pick< @@ -172,6 +183,7 @@ export type ExtensionState = Pick< | "alwaysAllowModeSwitch" | "alwaysAllowSubtasks" | "alwaysAllowExecute" + | "alwaysAllowUpdateTodoList" | "allowedCommands" | "allowedMaxRequests" | "browserToolEnabled" diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 7efc97e8c77d..b74b545301a3 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -18,8 +18,13 @@ export type PromptMode = Mode | "enhance" export type AudioType = "notification" | "celebration" | "progress_loop" +export interface UpdateTodoListPayload { + todos: any[] +} + export interface WebviewMessage { type: + | "updateTodoList" | "deleteMultipleTasksWithIds" | "currentApiConfigName" | "saveApiConfiguration" @@ -37,6 +42,9 @@ export interface WebviewMessage { | "alwaysAllowWriteOutsideWorkspace" | "alwaysAllowWriteProtected" | "alwaysAllowExecute" + | "alwaysAllowFollowupQuestions" + | "alwaysAllowUpdateTodoList" + | "followupAutoApproveTimeoutMs" | "webviewDidLaunch" | "newTask" | "askResponse" @@ -71,6 +79,7 @@ export interface WebviewMessage { | "alwaysAllowModeSwitch" | "allowedMaxRequests" | "alwaysAllowSubtasks" + | "alwaysAllowUpdateTodoList" | "autoCondenseContext" | "autoCondenseContextPercent" | "condensingApiConfigId" @@ -138,6 +147,7 @@ export interface WebviewMessage { | "humanRelayResponse" | "humanRelayCancel" | "browserToolEnabled" + | "codebaseIndexEnabled" | "telemetrySetting" | "showRooIgnoredFiles" | "testBrowserConnection" @@ -160,9 +170,10 @@ export interface WebviewMessage { | "indexingStatusUpdate" | "indexCleared" | "focusPanelRequest" - | "codebaseIndexConfig" | "profileThresholds" | "setHistoryPreviewCollapsed" + | "fixMermaidSyntax" + | "mermaidFixResponse" | "openExternal" | "filterMarketplaceItems" | "marketplaceButtonClicked" @@ -175,6 +186,14 @@ export interface WebviewMessage { | "switchTab" | "profileThresholds" | "shareTaskSuccess" + | "exportMode" + | "exportModeResult" + | "importMode" + | "importModeResult" + | "checkRulesDirectory" + | "checkRulesDirectoryResult" + | "saveCodeIndexSettingsAtomic" + | "requestCodeIndexSecretStatus" text?: string tab?: "settings" | "history" | "mcp" | "modes" | "chat" | "marketplace" | "account" disabled?: boolean @@ -213,6 +232,26 @@ export interface WebviewMessage { mpInstallOptions?: InstallMarketplaceItemOptions config?: Record // Add config to the payload visibility?: ShareVisibility // For share visibility + hasContent?: boolean // For checkRulesDirectoryResult + checkOnly?: boolean // For deleteCustomMode check + codeIndexSettings?: { + // Global state settings + codebaseIndexEnabled: boolean + codebaseIndexQdrantUrl: string + codebaseIndexEmbedderProvider: "openai" | "ollama" | "openai-compatible" | "gemini" + codebaseIndexEmbedderBaseUrl?: string + codebaseIndexEmbedderModelId: string + codebaseIndexOpenAiCompatibleBaseUrl?: string + codebaseIndexOpenAiCompatibleModelDimension?: number + codebaseIndexSearchMaxResults?: number + codebaseIndexSearchMinScore?: number + + // Secret settings + codeIndexOpenAiKey?: string + codeIndexQdrantApiKey?: string + codebaseIndexOpenAiCompatibleApiKey?: string + codebaseIndexGeminiApiKey?: string + } } export const checkoutDiffPayloadSchema = z.object({ @@ -257,3 +296,4 @@ export type WebViewMessagePayload = | IndexingStatusPayload | IndexClearedPayload | InstallMarketplaceItemWithParametersPayload + | UpdateTodoListPayload diff --git a/src/shared/__tests__/api.spec.ts b/src/shared/__tests__/api.spec.ts index 0285c897fc70..5cc005e19de0 100644 --- a/src/shared/__tests__/api.spec.ts +++ b/src/shared/__tests__/api.spec.ts @@ -66,7 +66,7 @@ describe("getMaxTokensForModel", () => { expect(getModelMaxOutputTokens({ modelId, model, settings })).toBe(8000) }) - it("should return undefined for non-thinking models with undefined maxTokens", () => { + it("should return 20% of context window for non-thinking models with undefined maxTokens", () => { const model: ModelInfo = { contextWindow: 200_000, supportsPromptCache: true, @@ -76,7 +76,8 @@ describe("getMaxTokensForModel", () => { modelMaxTokens: 4000, } - expect(getModelMaxOutputTokens({ modelId, model, settings })).toBeUndefined() + // Should return 20% of context window when maxTokens is undefined + expect(getModelMaxOutputTokens({ modelId, model, settings })).toBe(40000) }) test("should return maxTokens from modelInfo when thinking is false", () => { diff --git a/src/shared/api.ts b/src/shared/api.ts index d1bfa2794b1f..d3d1a31cd164 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -71,7 +71,9 @@ export const getModelMaxOutputTokens = ({ return ANTHROPIC_DEFAULT_MAX_TOKENS } - return model.maxTokens ?? undefined + // If maxTokens is 0 or undefined, fall back to 20% of context window + // This matches the sliding window logic + return model.maxTokens || Math.ceil(model.contextWindow * 0.2) } // GetModelsOptions diff --git a/src/shared/embeddingModels.ts b/src/shared/embeddingModels.ts index cd7c1d4e6b82..4c6bc24319e3 100644 --- a/src/shared/embeddingModels.ts +++ b/src/shared/embeddingModels.ts @@ -2,10 +2,12 @@ * Defines profiles for different embedding models, including their dimensions. */ -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" // Add other providers as needed +export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" // Add other providers as needed export interface EmbeddingModelProfile { dimension: number + scoreThreshold?: number // Model-specific minimum score threshold for semantic search + queryPrefix?: string // Optional prefix required by the model for queries // Add other model-specific properties if needed, e.g., context window size } @@ -18,21 +20,34 @@ export type EmbeddingModelProfiles = { // Example profiles - expand this list as needed export const EMBEDDING_MODEL_PROFILES: EmbeddingModelProfiles = { openai: { - "text-embedding-3-small": { dimension: 1536 }, - "text-embedding-3-large": { dimension: 3072 }, - "text-embedding-ada-002": { dimension: 1536 }, + "text-embedding-3-small": { dimension: 1536, scoreThreshold: 0.4 }, + "text-embedding-3-large": { dimension: 3072, scoreThreshold: 0.4 }, + "text-embedding-ada-002": { dimension: 1536, scoreThreshold: 0.4 }, }, ollama: { - "nomic-embed-text": { dimension: 768 }, - "mxbai-embed-large": { dimension: 1024 }, - "all-minilm": { dimension: 384 }, + "nomic-embed-text": { dimension: 768, scoreThreshold: 0.4 }, + "nomic-embed-code": { + dimension: 3584, + scoreThreshold: 0.15, + queryPrefix: "Represent this query for searching relevant code: ", + }, + "mxbai-embed-large": { dimension: 1024, scoreThreshold: 0.4 }, + "all-minilm": { dimension: 384, scoreThreshold: 0.4 }, // Add default Ollama model if applicable, e.g.: // 'default': { dimension: 768 } // Assuming a default dimension }, "openai-compatible": { - "text-embedding-3-small": { dimension: 1536 }, - "text-embedding-3-large": { dimension: 3072 }, - "text-embedding-ada-002": { dimension: 1536 }, + "text-embedding-3-small": { dimension: 1536, scoreThreshold: 0.4 }, + "text-embedding-3-large": { dimension: 3072, scoreThreshold: 0.4 }, + "text-embedding-ada-002": { dimension: 1536, scoreThreshold: 0.4 }, + "nomic-embed-code": { + dimension: 3584, + scoreThreshold: 0.15, + queryPrefix: "Represent this query for searching relevant code: ", + }, + }, + gemini: { + "text-embedding-004": { dimension: 768 }, }, } @@ -59,6 +74,38 @@ export function getModelDimension(provider: EmbedderProvider, modelId: string): return modelProfile.dimension } +/** + * Retrieves the score threshold for a given provider and model ID. + * @param provider The embedder provider (e.g., "openai"). + * @param modelId The specific model ID (e.g., "text-embedding-3-small"). + * @returns The score threshold or undefined if the model is not found. + */ +export function getModelScoreThreshold(provider: EmbedderProvider, modelId: string): number | undefined { + const providerProfiles = EMBEDDING_MODEL_PROFILES[provider] + if (!providerProfiles) { + return undefined + } + + const modelProfile = providerProfiles[modelId] + return modelProfile?.scoreThreshold +} + +/** + * Retrieves the query prefix for a given provider and model ID. + * @param provider The embedder provider (e.g., "openai"). + * @param modelId The specific model ID (e.g., "nomic-embed-code"). + * @returns The query prefix or undefined if the model doesn't require one. + */ +export function getModelQueryPrefix(provider: EmbedderProvider, modelId: string): string | undefined { + const providerProfiles = EMBEDDING_MODEL_PROFILES[provider] + if (!providerProfiles) { + return undefined + } + + const modelProfile = providerProfiles[modelId] + return modelProfile?.queryPrefix +} + /** * Gets the default *specific* embedding model ID based on the provider. * Does not include the provider prefix. @@ -85,6 +132,10 @@ export function getDefaultModelId(provider: EmbedderProvider): string { // Return a placeholder or throw an error, depending on desired behavior return "unknown-default" // Placeholder specific model ID } + + case "gemini": + return "text-embedding-004" + default: // Fallback for unknown providers console.warn(`Unknown provider for default model ID: ${provider}. Falling back to OpenAI default.`) diff --git a/src/shared/modes.ts b/src/shared/modes.ts index 47387551ff2a..23efdc45f74a 100644 --- a/src/shared/modes.ts +++ b/src/shared/modes.ts @@ -72,7 +72,7 @@ export const modes: readonly ModeConfig[] = [ description: "Plan and design before implementation", groups: ["read", ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }], "browser", "mcp"], customInstructions: - "1. Do some information gathering (for example using read_file or search_files) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, you should create a detailed plan for how to accomplish the task. Include Mermaid diagrams if they help make your plan clearer.\n\n4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it.\n\n5. Once the user confirms the plan, ask them if they'd like you to write it to a markdown file.\n\n6. Use the switch_mode tool to request that the user switch to another mode to implement the solution.", + "1. Do some information gathering (for example using read_file or search_files) to get more context about the task.\n\n2. You should also ask the user clarifying questions to get a better understanding of the task.\n\n3. Once you've gained more context about the user's request, you should create a detailed plan for how to accomplish the task. Include Mermaid diagrams if they help make your plan clearer.\n\n4. Ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it.\n\n5. Use the switch_mode tool to request that the user switch to another mode to implement the solution.\n\n**IMPORTANT: Do not provide time estimates for how long tasks will take to complete. Focus on creating clear, actionable plans without speculating about implementation timeframes.**", }, { slug: "code", diff --git a/src/shared/todo.ts b/src/shared/todo.ts new file mode 100644 index 000000000000..16e7d085e2f7 --- /dev/null +++ b/src/shared/todo.ts @@ -0,0 +1,23 @@ +import { ClineMessage } from "@roo-code/types" +export function getLatestTodo(clineMessages: ClineMessage[]) { + const todos = clineMessages + .filter( + (msg) => + (msg.type === "ask" && msg.ask === "tool") || (msg.type === "say" && msg.say === "user_edit_todos"), + ) + .map((msg) => { + try { + return JSON.parse(msg.text ?? "{}") + } catch { + return null + } + }) + .filter((item) => item && item.tool === "updateTodoList" && Array.isArray(item.todos)) + .map((item) => item.todos) + .pop() + if (todos) { + return todos + } else { + return [] + } +} diff --git a/src/shared/tools.ts b/src/shared/tools.ts index 0725e2e4d640..67972243fe72 100644 --- a/src/shared/tools.ts +++ b/src/shared/tools.ts @@ -64,6 +64,7 @@ export const toolParamNames = [ "end_line", "query", "args", + "todos", ] as const export type ToolParamName = (typeof toolParamNames)[number] @@ -188,6 +189,7 @@ export const TOOL_DISPLAY_NAMES: Record = { insert_content: "insert content", search_and_replace: "search and replace", codebase_search: "codebase search", + update_todo_list: "update todo list", } as const // Define available tool groups. @@ -226,6 +228,7 @@ export const ALWAYS_AVAILABLE_TOOLS: ToolName[] = [ "attempt_completion", "switch_mode", "new_task", + "update_todo_list", ] as const export type DiffResult = diff --git a/src/utils/path.ts b/src/utils/path.ts index a58d63017257..48e2ce667385 100644 --- a/src/utils/path.ts +++ b/src/utils/path.ts @@ -115,3 +115,18 @@ export const getWorkspacePath = (defaultCwdPath = "") => { } return cwdPath } + +export const getWorkspacePathForContext = (contextPath?: string): string => { + // If context path provided, find its workspace + if (contextPath) { + const workspaceFolder = vscode.workspace.getWorkspaceFolder(vscode.Uri.file(contextPath)) + if (workspaceFolder) { + return workspaceFolder.uri.fsPath + } + // Debug logging when falling back + console.debug(`[CodeIndex] No workspace found for context path: ${contextPath}, falling back to default`) + } + + // Fall back to current behavior + return getWorkspacePath() +} diff --git a/webview-ui/src/components/chat/AutoApproveMenu.tsx b/webview-ui/src/components/chat/AutoApproveMenu.tsx index ac02d6b8c44f..ae363a7b63b7 100644 --- a/webview-ui/src/components/chat/AutoApproveMenu.tsx +++ b/webview-ui/src/components/chat/AutoApproveMenu.tsx @@ -25,6 +25,8 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { alwaysAllowModeSwitch, alwaysAllowSubtasks, alwaysApproveResubmit, + alwaysAllowFollowupQuestions, + alwaysAllowUpdateTodoList, allowedMaxRequests, setAlwaysAllowReadOnly, setAlwaysAllowWrite, @@ -34,6 +36,8 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { setAlwaysAllowModeSwitch, setAlwaysAllowSubtasks, setAlwaysApproveResubmit, + setAlwaysAllowFollowupQuestions, + setAlwaysAllowUpdateTodoList, setAllowedMaxRequests, } = useExtensionState() @@ -68,6 +72,12 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { case "alwaysApproveResubmit": setAlwaysApproveResubmit(value) break + case "alwaysAllowFollowupQuestions": + setAlwaysAllowFollowupQuestions(value) + break + case "alwaysAllowUpdateTodoList": + setAlwaysAllowUpdateTodoList(value) + break } }, [ @@ -79,6 +89,8 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { setAlwaysAllowModeSwitch, setAlwaysAllowSubtasks, setAlwaysApproveResubmit, + setAlwaysAllowFollowupQuestions, + setAlwaysAllowUpdateTodoList, ], ) @@ -94,6 +106,8 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { alwaysAllowModeSwitch: alwaysAllowModeSwitch, alwaysAllowSubtasks: alwaysAllowSubtasks, alwaysApproveResubmit: alwaysApproveResubmit, + alwaysAllowFollowupQuestions: alwaysAllowFollowupQuestions, + alwaysAllowUpdateTodoList: alwaysAllowUpdateTodoList, }), [ alwaysAllowReadOnly, @@ -104,6 +118,8 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { alwaysAllowModeSwitch, alwaysAllowSubtasks, alwaysApproveResubmit, + alwaysAllowFollowupQuestions, + alwaysAllowUpdateTodoList, ], ) @@ -134,7 +150,7 @@ const AutoApproveMenu = ({ style }: AutoApproveMenuProps) => { display: "flex", alignItems: "center", gap: "8px", - padding: isExpanded ? "8px 0" : "8px 0 0 0", + padding: isExpanded ? "8px 0" : "2px 0 0 0", cursor: "pointer", }} onClick={toggleExpanded}> diff --git a/webview-ui/src/components/chat/ChatRow.tsx b/webview-ui/src/components/chat/ChatRow.tsx index 43824c590277..9c90da503f88 100644 --- a/webview-ui/src/components/chat/ChatRow.tsx +++ b/webview-ui/src/components/chat/ChatRow.tsx @@ -10,6 +10,7 @@ import type { ClineMessage } from "@roo-code/types" import { ClineApiReqInfo, ClineAskUseMcpServer, ClineSayTool } from "@roo/ExtensionMessage" import { COMMAND_OUTPUT_STRING } from "@roo/combineCommandSequences" import { safeJsonParse } from "@roo/safeJsonParse" +import { FollowUpData, SuggestionItem } from "@roo-code/types" import { useCopyToClipboard } from "@src/utils/clipboard" import { useExtensionState } from "@src/context/ExtensionStateContext" @@ -20,6 +21,7 @@ import { getLanguageFromPath } from "@src/utils/getLanguageFromPath" import { Button } from "@src/components/ui" import { ToolUseBlock, ToolUseBlockHeader } from "../common/ToolUseBlock" +import UpdateTodoListToolBlock from "./UpdateTodoListToolBlock" import CodeAccordian from "../common/CodeAccordian" import CodeBlock from "../common/CodeBlock" import MarkdownBlock from "../common/MarkdownBlock" @@ -48,8 +50,10 @@ interface ChatRowProps { isStreaming: boolean onToggleExpand: (ts: number) => void onHeightChange: (isTaller: boolean) => void - onSuggestionClick?: (answer: string, event?: React.MouseEvent) => void + onSuggestionClick?: (suggestion: SuggestionItem, event?: React.MouseEvent) => void onBatchFileResponse?: (response: { [key: string]: boolean }) => void + onFollowUpUnmount?: () => void + editable?: boolean } // eslint-disable-next-line @typescript-eslint/no-empty-object-type @@ -98,7 +102,9 @@ export const ChatRowContent = ({ isStreaming, onToggleExpand, onSuggestionClick, + onFollowUpUnmount, onBatchFileResponse, + editable, }: ChatRowContentProps) => { const { t } = useTranslation() const { mcpServers, alwaysAllowMcp, currentCheckpoint } = useExtensionState() @@ -279,7 +285,7 @@ export const ChatRowContent = ({ const followUpData = useMemo(() => { if (message.type === "ask" && message.ask === "followup" && !message.partial) { - return safeJsonParse(message.text) + return safeJsonParse(message.text) } return null }, [message.type, message.ask, message.partial, message.text]) @@ -428,6 +434,21 @@ export const ChatRowContent = ({
  • ) } + case "updateTodoList" as any: { + const todos = (tool as any).todos || [] + return ( + { + if (typeof vscode !== "undefined" && vscode?.postMessage) { + vscode.postMessage({ type: "updateTodoList", payload: { todos: updatedTodos } }) + } + }} + editable={editable && isLast} + /> + ) + } case "newFileCreated": return ( <> @@ -1086,6 +1107,8 @@ export const ChatRowContent = ({ const { query = "", results = [] } = parsed?.content || {} return + case "user_edit_todos": + return {}} /> default: return ( <> @@ -1215,6 +1238,7 @@ export const ChatRowContent = ({ suggestions={followUpData?.suggest} onSuggestionClick={onSuggestionClick} ts={message?.ts} + onUnmount={onFollowUpUnmount} /> ) diff --git a/webview-ui/src/components/chat/ChatTextArea.tsx b/webview-ui/src/components/chat/ChatTextArea.tsx index dc0776407703..51279062d2ba 100644 --- a/webview-ui/src/components/chat/ChatTextArea.tsx +++ b/webview-ui/src/components/chat/ChatTextArea.tsx @@ -787,10 +787,10 @@ const ChatTextArea = forwardRef( "relative", "flex", "flex-col", - "gap-2", + "gap-1", "bg-editor-background", - "m-2 mt-1", - "p-1.5", + "px-1.5", + "pb-1", "outline-none", "border", "border-none", @@ -998,7 +998,7 @@ const ChatTextArea = forwardRef( /> )} -
    +
    messages.at(0), [messages]) + const latestTodos = useMemo(() => { + return getLatestTodo(messages) + }, [messages]) + const modifiedMessages = useMemo(() => combineApiRequests(combineCommandSequences(messages.slice(1))), [messages]) // Has to be after api_req_finished are all reduced into api_req_started messages. @@ -345,9 +354,6 @@ const ChatViewComponent: React.ForwardRefRenderFunction { + // Update local state and notify extension to sync mode change + setMode(modeSlug) + + // Send the mode switch message + vscode.postMessage({ + type: "mode", + text: modeSlug, + }) + }, + [setMode], + ) + const handleSuggestionClickInRow = useCallback( - (answer: string, event?: React.MouseEvent) => { + (suggestion: SuggestionItem, event?: React.MouseEvent) => { + // Check if we need to switch modes + if (suggestion.mode) { + // Only switch modes if it's a manual click (event exists) or auto-approval is allowed + const isManualClick = !!event + if (isManualClick || alwaysAllowModeSwitch) { + // Switch mode without waiting + switchToMode(suggestion.mode) + } + } + if (event?.shiftKey) { // Always append to existing text, don't overwrite setInputValue((currentValue) => { - return currentValue !== "" ? `${currentValue} \n${answer}` : answer + return currentValue !== "" ? `${currentValue} \n${suggestion.answer}` : suggestion.answer }) } else { - handleSendMessage(answer, []) + handleSendMessage(suggestion.answer, []) } }, - [handleSendMessage, setInputValue], // setInputValue is stable, handleSendMessage depends on clineAsk + [handleSendMessage, setInputValue, switchToMode, alwaysAllowModeSwitch], ) const handleBatchFileResponse = useCallback((response: { [key: string]: boolean }) => { @@ -1208,6 +1246,15 @@ const ChatViewComponent: React.ForwardRefRenderFunction { + // Clear the auto-approve timeout to prevent race conditions + if (autoApproveTimeoutRef.current) { + clearTimeout(autoApproveTimeoutRef.current) + autoApproveTimeoutRef.current = null + } + }, []) + const itemContent = useCallback( (index: number, messageOrGroup: ClineMessage | ClineMessage[]) => { // browser session group @@ -1243,6 +1290,25 @@ const ChatViewComponent: React.ForwardRefRenderFunction { + let tool: any = {} + try { + tool = JSON.parse(messageOrGroup.text || "{}") + } catch (_) { + if (messageOrGroup.text?.includes("updateTodoList")) { + tool = { tool: "updateTodoList" } + } + } + if (tool.tool === "updateTodoList" && alwaysAllowUpdateTodoList) { + return false + } + return tool.tool === "updateTodoList" && enableButtons && !!primaryButtonText + })() + } /> ) }, @@ -1255,6 +1321,10 @@ const ChatViewComponent: React.ForwardRefRenderFunction { if (lastMessage?.ask && isAutoApproved(lastMessage)) { - if (lastMessage.ask === "tool" && isWriteToolAction(lastMessage)) { + // Special handling for follow-up questions + if (lastMessage.ask === "followup") { + // Handle invalid JSON + let followUpData: FollowUpData = {} + try { + followUpData = JSON.parse(lastMessage.text || "{}") as FollowUpData + } catch (error) { + console.error("Failed to parse follow-up data:", error) + return + } + + if (followUpData && followUpData.suggest && followUpData.suggest.length > 0) { + // Wait for the configured timeout before auto-selecting the first suggestion + await new Promise((resolve) => { + autoApproveTimeoutRef.current = setTimeout(resolve, followupAutoApproveTimeoutMs) + }) + + // Get the first suggestion + const firstSuggestion = followUpData.suggest[0] + + // Handle the suggestion click + handleSuggestionClickInRow(firstSuggestion) + return + } + } else if (lastMessage.ask === "tool" && isWriteToolAction(lastMessage)) { await new Promise((resolve) => { autoApproveTimeoutRef.current = setTimeout(resolve, writeDelayMs) }) } - if (autoApproveTimeoutRef.current === null || autoApproveTimeoutRef.current) { - vscode.postMessage({ type: "askResponse", askResponse: "yesButtonClicked" }) + vscode.postMessage({ type: "askResponse", askResponse: "yesButtonClicked" }) - setSendingDisabled(true) - setClineAsk(undefined) - setEnableButtons(false) - } + setSendingDisabled(true) + setClineAsk(undefined) + setEnableButtons(false) } } autoApprove() @@ -1303,6 +1395,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction m.slug === mode) const nextModeIndex = (currentModeIndex + 1) % allModes.length // Update local state and notify extension to sync mode change - setMode(allModes[nextModeIndex].slug) - vscode.postMessage({ - type: "mode", - text: allModes[nextModeIndex].slug, - }) - }, [mode, setMode, customModes]) + switchToMode(allModes[nextModeIndex].slug) + }, [mode, customModes, switchToMode]) // Add keyboard event handler const handleKeyDown = useCallback( @@ -1365,6 +1456,8 @@ const ChatViewComponent: React.ForwardRefRenderFunction {(showAnnouncement || showAnnouncementModal) && ( @@ -1392,6 +1485,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction {hasSystemPromptOverride && ( @@ -1466,7 +1560,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction +
    )} @@ -1477,7 +1571,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction
    - - {showScrollToBottom ? ( -
    - -
    { - scrollToBottomSmooth() - disableAutoScrollRef.current = false - }}> - -
    -
    -
    - ) : ( +
    + +
    + {areButtonsVisible && (
    - {primaryButtonText && !isStreaming && ( - - handlePrimaryButtonClick(inputValue, selectedImages)}> - {primaryButtonText} - - - )} - {(secondaryButtonText || isStreaming) && ( - + {showScrollToBottom ? ( + handleSecondaryButtonClick(inputValue, selectedImages)}> - {isStreaming ? t("chat:cancel.title") : secondaryButtonText} + className="flex-[2]" + onClick={() => { + scrollToBottomSmooth() + disableAutoScrollRef.current = false + }}> + + ) : ( + <> + {primaryButtonText && !isStreaming && ( + + handlePrimaryButtonClick(inputValue, selectedImages)}> + {primaryButtonText} + + + )} + {(secondaryButtonText || isStreaming) && ( + + handleSecondaryButtonClick(inputValue, selectedImages)}> + {isStreaming ? t("chat:cancel.title") : secondaryButtonText} + + + )} + )}
    )} diff --git a/webview-ui/src/components/chat/CodeIndexPopover.tsx b/webview-ui/src/components/chat/CodeIndexPopover.tsx new file mode 100644 index 000000000000..6c9ea1c2045e --- /dev/null +++ b/webview-ui/src/components/chat/CodeIndexPopover.tsx @@ -0,0 +1,757 @@ +import React, { useState, useEffect, useMemo } from "react" +import { Trans } from "react-i18next" +import { + VSCodeButton, + VSCodeTextField, + VSCodeDropdown, + VSCodeOption, + VSCodeLink, +} from "@vscode/webview-ui-toolkit/react" +import * as ProgressPrimitive from "@radix-ui/react-progress" +import { vscode } from "@src/utils/vscode" +import { useExtensionState } from "@src/context/ExtensionStateContext" +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { buildDocLink } from "@src/utils/docLinks" +import { cn } from "@src/lib/utils" +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, + AlertDialogTrigger, + Popover, + PopoverContent, + PopoverTrigger, + Slider, + StandardTooltip, +} from "@src/components/ui" +import { useRooPortal } from "@src/components/ui/hooks/useRooPortal" +import type { EmbedderProvider } from "@roo/embeddingModels" +import type { IndexingStatus } from "@roo/ExtensionMessage" +import { CODEBASE_INDEX_DEFAULTS } from "@roo-code/types" + +interface CodeIndexPopoverProps { + children: React.ReactNode + indexingStatus: IndexingStatus +} + +interface LocalCodeIndexSettings { + // Global state settings + codebaseIndexEnabled: boolean + codebaseIndexQdrantUrl: string + codebaseIndexEmbedderProvider: EmbedderProvider + codebaseIndexEmbedderBaseUrl?: string + codebaseIndexEmbedderModelId: string + codebaseIndexSearchMaxResults?: number + codebaseIndexSearchMinScore?: number + + // Secret settings (start empty, will be loaded separately) + codeIndexOpenAiKey?: string + codeIndexQdrantApiKey?: string + codebaseIndexOpenAiCompatibleBaseUrl?: string + codebaseIndexOpenAiCompatibleApiKey?: string + codebaseIndexOpenAiCompatibleModelDimension?: number + codebaseIndexGeminiApiKey?: string +} + +export const CodeIndexPopover: React.FC = ({ + children, + indexingStatus: externalIndexingStatus, +}) => { + const SECRET_PLACEHOLDER = "••••••••••••••••" + const { t } = useAppTranslation() + const { codebaseIndexConfig, codebaseIndexModels } = useExtensionState() + const [open, setOpen] = useState(false) + const [isAdvancedSettingsOpen, setIsAdvancedSettingsOpen] = useState(false) + + const [indexingStatus, setIndexingStatus] = useState(externalIndexingStatus) + + const [saveStatus, setSaveStatus] = useState<"idle" | "saving" | "saved" | "error">("idle") + const [saveError, setSaveError] = useState(null) + + // Default settings template + const getDefaultSettings = (): LocalCodeIndexSettings => ({ + codebaseIndexEnabled: false, + codebaseIndexQdrantUrl: "", + codebaseIndexEmbedderProvider: "openai", + codebaseIndexEmbedderBaseUrl: "", + codebaseIndexEmbedderModelId: "", + codebaseIndexSearchMaxResults: CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_RESULTS, + codebaseIndexSearchMinScore: CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_MIN_SCORE, + codeIndexOpenAiKey: "", + codeIndexQdrantApiKey: "", + codebaseIndexOpenAiCompatibleBaseUrl: "", + codebaseIndexOpenAiCompatibleApiKey: "", + codebaseIndexOpenAiCompatibleModelDimension: undefined, + codebaseIndexGeminiApiKey: "", + }) + + // Initial settings state - stores the settings when popover opens + const [initialSettings, setInitialSettings] = useState(getDefaultSettings()) + + // Current settings state - tracks user changes + const [currentSettings, setCurrentSettings] = useState(getDefaultSettings()) + + // Update indexing status from parent + useEffect(() => { + setIndexingStatus(externalIndexingStatus) + }, [externalIndexingStatus]) + + // Initialize settings from global state + useEffect(() => { + if (codebaseIndexConfig) { + const settings = { + codebaseIndexEnabled: codebaseIndexConfig.codebaseIndexEnabled || false, + codebaseIndexQdrantUrl: codebaseIndexConfig.codebaseIndexQdrantUrl || "", + codebaseIndexEmbedderProvider: codebaseIndexConfig.codebaseIndexEmbedderProvider || "openai", + codebaseIndexEmbedderBaseUrl: codebaseIndexConfig.codebaseIndexEmbedderBaseUrl || "", + codebaseIndexEmbedderModelId: codebaseIndexConfig.codebaseIndexEmbedderModelId || "", + codebaseIndexSearchMaxResults: + codebaseIndexConfig.codebaseIndexSearchMaxResults ?? CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_RESULTS, + codebaseIndexSearchMinScore: + codebaseIndexConfig.codebaseIndexSearchMinScore ?? CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_MIN_SCORE, + codeIndexOpenAiKey: "", + codeIndexQdrantApiKey: "", + codebaseIndexOpenAiCompatibleBaseUrl: codebaseIndexConfig.codebaseIndexOpenAiCompatibleBaseUrl || "", + codebaseIndexOpenAiCompatibleApiKey: "", + codebaseIndexOpenAiCompatibleModelDimension: + codebaseIndexConfig.codebaseIndexOpenAiCompatibleModelDimension || undefined, + codebaseIndexGeminiApiKey: "", + } + setInitialSettings(settings) + setCurrentSettings(settings) + + // Request secret status to check if secrets exist + vscode.postMessage({ type: "requestCodeIndexSecretStatus" }) + } + }, [codebaseIndexConfig]) + + // Request initial indexing status + useEffect(() => { + if (open) { + vscode.postMessage({ type: "requestIndexingStatus" }) + vscode.postMessage({ type: "requestCodeIndexSecretStatus" }) + } + }, [open]) + + // Listen for indexing status updates and save responses + useEffect(() => { + const handleMessage = (event: MessageEvent) => { + if (event.data.type === "indexingStatusUpdate") { + setIndexingStatus({ + systemStatus: event.data.values.systemStatus, + message: event.data.values.message || "", + processedItems: event.data.values.processedItems, + totalItems: event.data.values.totalItems, + currentItemUnit: event.data.values.currentItemUnit || "items", + }) + } else if (event.data.type === "codeIndexSettingsSaved") { + if (event.data.success) { + setSaveStatus("saved") + // Don't update initial settings here - wait for the secret status response + // Request updated secret status after save + vscode.postMessage({ type: "requestCodeIndexSecretStatus" }) + // Reset status after 3 seconds + setTimeout(() => { + setSaveStatus("idle") + }, 3000) + } else { + setSaveStatus("error") + setSaveError(event.data.error || t("settings:codeIndex.saveError")) + // Clear error message after 5 seconds + setTimeout(() => { + setSaveStatus("idle") + setSaveError(null) + }, 5000) + } + } + } + + window.addEventListener("message", handleMessage) + return () => window.removeEventListener("message", handleMessage) + }, [t]) + + // Listen for secret status + useEffect(() => { + const handleMessage = (event: MessageEvent) => { + if (event.data.type === "codeIndexSecretStatus") { + // Update settings to show placeholders for existing secrets + const secretStatus = event.data.values + + // Update both current and initial settings based on what secrets exist + const updateWithSecrets = (prev: LocalCodeIndexSettings): LocalCodeIndexSettings => { + const updated = { ...prev } + + // Only update to placeholder if the field is currently empty or already a placeholder + // This preserves user input when they're actively editing + if (!prev.codeIndexOpenAiKey || prev.codeIndexOpenAiKey === SECRET_PLACEHOLDER) { + updated.codeIndexOpenAiKey = secretStatus.hasOpenAiKey ? SECRET_PLACEHOLDER : "" + } + if (!prev.codeIndexQdrantApiKey || prev.codeIndexQdrantApiKey === SECRET_PLACEHOLDER) { + updated.codeIndexQdrantApiKey = secretStatus.hasQdrantApiKey ? SECRET_PLACEHOLDER : "" + } + if ( + !prev.codebaseIndexOpenAiCompatibleApiKey || + prev.codebaseIndexOpenAiCompatibleApiKey === SECRET_PLACEHOLDER + ) { + updated.codebaseIndexOpenAiCompatibleApiKey = secretStatus.hasOpenAiCompatibleApiKey + ? SECRET_PLACEHOLDER + : "" + } + if (!prev.codebaseIndexGeminiApiKey || prev.codebaseIndexGeminiApiKey === SECRET_PLACEHOLDER) { + updated.codebaseIndexGeminiApiKey = secretStatus.hasGeminiApiKey ? SECRET_PLACEHOLDER : "" + } + + return updated + } + + setCurrentSettings(updateWithSecrets) + setInitialSettings(updateWithSecrets) + } + } + + window.addEventListener("message", handleMessage) + return () => window.removeEventListener("message", handleMessage) + }, []) + + // Generic comparison function that detects changes between initial and current settings + const hasUnsavedChanges = useMemo(() => { + // Get all keys from both objects to handle any field + const allKeys = [...Object.keys(initialSettings), ...Object.keys(currentSettings)] as Array< + keyof LocalCodeIndexSettings + > + + // Use a Set to ensure unique keys + const uniqueKeys = Array.from(new Set(allKeys)) + + for (const key of uniqueKeys) { + const currentValue = currentSettings[key] + const initialValue = initialSettings[key] + + // For secret fields, check if the value has been modified from placeholder + if (currentValue === SECRET_PLACEHOLDER) { + // If it's still showing placeholder, no change + continue + } + + // Compare values - handles all types including undefined + if (currentValue !== initialValue) { + return true + } + } + + return false + }, [currentSettings, initialSettings]) + + const updateSetting = (key: keyof LocalCodeIndexSettings, value: any) => { + setCurrentSettings((prev) => ({ ...prev, [key]: value })) + } + + const handleSaveSettings = () => { + setSaveStatus("saving") + setSaveError(null) + + // Prepare settings to save - include all fields except secrets with placeholder values + const settingsToSave: any = {} + + // Iterate through all current settings + for (const [key, value] of Object.entries(currentSettings)) { + // Skip secret fields that still have placeholder value + if (value === SECRET_PLACEHOLDER) { + continue + } + + // Include all other fields + settingsToSave[key] = value + } + + // Save settings to backend + vscode.postMessage({ + type: "saveCodeIndexSettingsAtomic", + codeIndexSettings: settingsToSave, + }) + } + + const progressPercentage = useMemo( + () => + indexingStatus.totalItems > 0 + ? Math.round((indexingStatus.processedItems / indexingStatus.totalItems) * 100) + : 0, + [indexingStatus.processedItems, indexingStatus.totalItems], + ) + + const transformStyleString = `translateX(-${100 - progressPercentage}%)` + + const getAvailableModels = () => { + if (!codebaseIndexModels) return [] + + const models = codebaseIndexModels[currentSettings.codebaseIndexEmbedderProvider] + return models ? Object.keys(models) : [] + } + + const portalContainer = useRooPortal("roo-portal") + + return ( + + {children} + +
    +

    {t("settings:codeIndex.title")}

    +

    + + + +

    +
    + +
    + {/* Status Section */} +
    +

    {t("settings:codeIndex.statusTitle")}

    +
    + + {t(`settings:codeIndex.indexingStatuses.${indexingStatus.systemStatus.toLowerCase()}`)} + {indexingStatus.message ? ` - ${indexingStatus.message}` : ""} +
    + + {indexingStatus.systemStatus === "Indexing" && ( +
    + + + +
    + )} +
    + + {/* Embedder Provider Section */} +
    + + +
    + + {/* Provider-specific settings */} + {currentSettings.codebaseIndexEmbedderProvider === "openai" && ( + <> +
    + + updateSetting("codeIndexOpenAiKey", e.target.value)} + placeholder={t("settings:codeIndex.openAiKeyPlaceholder")} + className="w-full" + /> +
    + +
    + + updateSetting("codebaseIndexEmbedderModelId", e.target.value)} + className="w-full"> + {t("settings:codeIndex.selectModel")} + {getAvailableModels().map((modelId) => { + const model = + codebaseIndexModels?.[currentSettings.codebaseIndexEmbedderProvider]?.[ + modelId + ] + return ( + + {modelId}{" "} + {model + ? t("settings:codeIndex.modelDimensions", { + dimension: model.dimension, + }) + : ""} + + ) + })} + +
    + + )} + + {currentSettings.codebaseIndexEmbedderProvider === "ollama" && ( + <> +
    + + updateSetting("codebaseIndexEmbedderBaseUrl", e.target.value)} + placeholder={t("settings:codeIndex.ollamaUrlPlaceholder")} + className="w-full" + /> +
    + +
    + + updateSetting("codebaseIndexEmbedderModelId", e.target.value)} + className="w-full"> + {t("settings:codeIndex.selectModel")} + {getAvailableModels().map((modelId) => { + const model = + codebaseIndexModels?.[currentSettings.codebaseIndexEmbedderProvider]?.[ + modelId + ] + return ( + + {modelId}{" "} + {model + ? t("settings:codeIndex.modelDimensions", { + dimension: model.dimension, + }) + : ""} + + ) + })} + +
    + + )} + + {currentSettings.codebaseIndexEmbedderProvider === "openai-compatible" && ( + <> +
    + + + updateSetting("codebaseIndexOpenAiCompatibleBaseUrl", e.target.value) + } + placeholder={t("settings:codeIndex.openAiCompatibleBaseUrlPlaceholder")} + className="w-full" + /> +
    + +
    + + + updateSetting("codebaseIndexOpenAiCompatibleApiKey", e.target.value) + } + placeholder={t("settings:codeIndex.openAiCompatibleApiKeyPlaceholder")} + className="w-full" + /> +
    + +
    + + updateSetting("codebaseIndexEmbedderModelId", e.target.value)} + placeholder={t("settings:codeIndex.modelPlaceholder")} + className="w-full" + /> +
    + +
    + + { + const value = e.target.value ? parseInt(e.target.value) : undefined + updateSetting("codebaseIndexOpenAiCompatibleModelDimension", value) + }} + placeholder={t("settings:codeIndex.modelDimensionPlaceholder")} + className="w-full" + /> +
    + + )} + + {currentSettings.codebaseIndexEmbedderProvider === "gemini" && ( + <> +
    + + updateSetting("codebaseIndexGeminiApiKey", e.target.value)} + placeholder={t("settings:codeIndex.geminiApiKeyPlaceholder")} + className="w-full" + /> +
    + +
    + + updateSetting("codebaseIndexEmbedderModelId", e.target.value)} + className="w-full"> + {t("settings:codeIndex.selectModel")} + {getAvailableModels().map((modelId) => { + const model = + codebaseIndexModels?.[currentSettings.codebaseIndexEmbedderProvider]?.[ + modelId + ] + return ( + + {modelId}{" "} + {model + ? t("settings:codeIndex.modelDimensions", { + dimension: model.dimension, + }) + : ""} + + ) + })} + +
    + + )} + + {/* Qdrant Settings */} +
    + + updateSetting("codebaseIndexQdrantUrl", e.target.value)} + placeholder={t("settings:codeIndex.qdrantUrlPlaceholder")} + className="w-full" + /> +
    + +
    + + updateSetting("codeIndexQdrantApiKey", e.target.value)} + placeholder={t("settings:codeIndex.qdrantApiKeyPlaceholder")} + className="w-full" + /> +
    + + {/* Advanced Settings Disclosure */} +
    + + + {isAdvancedSettingsOpen && ( +
    + {/* Search Score Threshold Slider */} +
    +
    + + + + +
    +
    + + updateSetting("codebaseIndexSearchMinScore", values[0]) + } + className="flex-1" + data-testid="search-min-score-slider" + /> + + {( + currentSettings.codebaseIndexSearchMinScore ?? + CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_MIN_SCORE + ).toFixed(2)} + + + updateSetting( + "codebaseIndexSearchMinScore", + CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_MIN_SCORE, + ) + }> + + +
    +
    + + {/* Maximum Search Results Slider */} +
    +
    + + + + +
    +
    + + updateSetting("codebaseIndexSearchMaxResults", values[0]) + } + className="flex-1" + data-testid="search-max-results-slider" + /> + + {currentSettings.codebaseIndexSearchMaxResults ?? + CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_RESULTS} + + + updateSetting( + "codebaseIndexSearchMaxResults", + CODEBASE_INDEX_DEFAULTS.DEFAULT_SEARCH_RESULTS, + ) + }> + + +
    +
    +
    + )} +
    + + {/* Action Buttons */} +
    +
    + {(indexingStatus.systemStatus === "Error" || indexingStatus.systemStatus === "Standby") && ( + vscode.postMessage({ type: "startIndexing" })} + disabled={saveStatus === "saving" || hasUnsavedChanges}> + {t("settings:codeIndex.startIndexingButton")} + + )} + + {(indexingStatus.systemStatus === "Indexed" || indexingStatus.systemStatus === "Error") && ( + + + + {t("settings:codeIndex.clearIndexDataButton")} + + + + + + {t("settings:codeIndex.clearDataDialog.title")} + + + {t("settings:codeIndex.clearDataDialog.description")} + + + + + {t("settings:codeIndex.clearDataDialog.cancelButton")} + + vscode.postMessage({ type: "clearIndexData" })}> + {t("settings:codeIndex.clearDataDialog.confirmButton")} + + + + + )} +
    + + + {saveStatus === "saving" + ? t("settings:codeIndex.saving") + : t("settings:codeIndex.saveSettings")} + +
    + + {/* Save Status Messages */} + {saveStatus === "error" && ( +
    + + {saveError || t("settings:codeIndex.saveError")} + +
    + )} +
    +
    +
    + ) +} diff --git a/webview-ui/src/components/chat/CodebaseSearchResult.tsx b/webview-ui/src/components/chat/CodebaseSearchResult.tsx index 4a2ced617889..4d48749ecdd9 100644 --- a/webview-ui/src/components/chat/CodebaseSearchResult.tsx +++ b/webview-ui/src/components/chat/CodebaseSearchResult.tsx @@ -1,4 +1,5 @@ import React from "react" +import { useTranslation } from "react-i18next" import { vscode } from "@src/utils/vscode" import { StandardTooltip } from "@/components/ui" @@ -12,6 +13,8 @@ interface CodebaseSearchResultProps { } const CodebaseSearchResult: React.FC = ({ filePath, score, startLine, endLine }) => { + const { t } = useTranslation("chat") + const handleClick = () => { console.log(filePath) vscode.postMessage({ @@ -24,7 +27,7 @@ const CodebaseSearchResult: React.FC = ({ filePath, s } return ( - +
    @@ -35,6 +38,9 @@ const CodebaseSearchResult: React.FC = ({ filePath, s {filePath.split("/").slice(0, -1).join("/")} + + {score.toFixed(3)} +
    diff --git a/webview-ui/src/components/chat/FollowUpSuggest.tsx b/webview-ui/src/components/chat/FollowUpSuggest.tsx index 6e1ec58814ca..5649da744a0e 100644 --- a/webview-ui/src/components/chat/FollowUpSuggest.tsx +++ b/webview-ui/src/components/chat/FollowUpSuggest.tsx @@ -1,40 +1,85 @@ -import { useCallback } from "react" +import { useCallback, useEffect, useState } from "react" import { Edit } from "lucide-react" import { Button, StandardTooltip } from "@/components/ui" -import { vscode } from "@/utils/vscode" import { useAppTranslation } from "@src/i18n/TranslationContext" +import { useExtensionState } from "@src/context/ExtensionStateContext" +import { SuggestionItem } from "@roo-code/types" -interface SuggestionItem { - answer: string - mode?: string -} +const DEFAULT_FOLLOWUP_TIMEOUT_MS = 60000 +const COUNTDOWN_INTERVAL_MS = 1000 interface FollowUpSuggestProps { - suggestions?: (string | SuggestionItem)[] - onSuggestionClick?: (answer: string, event?: React.MouseEvent) => void + suggestions?: SuggestionItem[] + onSuggestionClick?: (suggestion: SuggestionItem, event?: React.MouseEvent) => void ts: number + onUnmount?: () => void } -export const FollowUpSuggest = ({ suggestions = [], onSuggestionClick, ts = 1 }: FollowUpSuggestProps) => { +export const FollowUpSuggest = ({ suggestions = [], onSuggestionClick, ts = 1, onUnmount }: FollowUpSuggestProps) => { + const { autoApprovalEnabled, alwaysAllowFollowupQuestions, followupAutoApproveTimeoutMs } = useExtensionState() + const [countdown, setCountdown] = useState(null) + const [suggestionSelected, setSuggestionSelected] = useState(false) const { t } = useAppTranslation() - const handleSuggestionClick = useCallback( - (suggestion: string | SuggestionItem, event: React.MouseEvent) => { - const suggestionText = typeof suggestion === "string" ? suggestion : suggestion.answer - const mode = typeof suggestion === "object" ? suggestion.mode : undefined - // If there's a mode switch and it's not a shift-click (which just copies to input), switch modes first - if (mode && !event.shiftKey) { - vscode.postMessage({ - type: "mode", - text: mode, + // Start countdown timer when auto-approval is enabled for follow-up questions + useEffect(() => { + // Only start countdown if auto-approval is enabled for follow-up questions and no suggestion has been selected + if (autoApprovalEnabled && alwaysAllowFollowupQuestions && suggestions.length > 0 && !suggestionSelected) { + // Start with the configured timeout in seconds + const timeoutMs = + typeof followupAutoApproveTimeoutMs === "number" && !isNaN(followupAutoApproveTimeoutMs) + ? followupAutoApproveTimeoutMs + : DEFAULT_FOLLOWUP_TIMEOUT_MS + + // Convert milliseconds to seconds for the countdown + setCountdown(Math.floor(timeoutMs / 1000)) + + // Update countdown every second + const intervalId = setInterval(() => { + setCountdown((prevCountdown) => { + if (prevCountdown === null || prevCountdown <= 1) { + clearInterval(intervalId) + return null + } + return prevCountdown - 1 }) + }, COUNTDOWN_INTERVAL_MS) + + // Clean up interval on unmount and notify parent component + return () => { + clearInterval(intervalId) + // Notify parent component that this component is unmounting + // so it can clear any related timeouts + onUnmount?.() + } + } else { + setCountdown(null) + } + }, [ + autoApprovalEnabled, + alwaysAllowFollowupQuestions, + suggestions, + followupAutoApproveTimeoutMs, + suggestionSelected, + onUnmount, + ]) + const handleSuggestionClick = useCallback( + (suggestion: SuggestionItem, event: React.MouseEvent) => { + // Mark a suggestion as selected if it's not a shift-click (which just copies to input) + if (!event.shiftKey) { + setSuggestionSelected(true) + // Also notify parent component to cancel auto-approval timeout + // This prevents race conditions between visual countdown and actual timeout + onUnmount?.() } - onSuggestionClick?.(suggestionText, event) + // Pass the suggestion object to the parent component + // The parent component will handle mode switching if needed + onSuggestionClick?.(suggestion, event) }, - [onSuggestionClick], + [onSuggestionClick, onUnmount], ) // Don't render if there are no suggestions or no click handler. @@ -44,23 +89,29 @@ export const FollowUpSuggest = ({ suggestions = [], onSuggestionClick, ts = 1 }: return (
    - {suggestions.map((suggestion) => { - const suggestionText = typeof suggestion === "string" ? suggestion : suggestion.answer - const mode = typeof suggestion === "object" ? suggestion.mode : undefined + {suggestions.map((suggestion, index) => { + const isFirstSuggestion = index === 0 return ( -
    +
    - {mode && ( + {suggestion.mode && (
    - {mode} + {suggestion.mode}
    )} @@ -69,7 +120,7 @@ export const FollowUpSuggest = ({ suggestions = [], onSuggestionClick, ts = 1 }: onClick={(e) => { e.stopPropagation() // Simulate shift-click by directly calling the handler with shiftKey=true. - onSuggestionClick?.(suggestionText, { ...e, shiftKey: true }) + onSuggestionClick?.(suggestion, { ...e, shiftKey: true }) }}> + aria-label={getTooltipText()}> + {/* Status dot */} + + + {showTooltip && (
    { const [deleteTaskId, setDeleteTaskId] = useState(null) const { t } = useTranslation() + const { copyWithFeedback, showCopyFeedback } = useCopyToClipboard() return (
    @@ -28,6 +30,14 @@ export const TaskActions = ({ item, buttonsDisabled }: TaskActionsProps) => { disabled={buttonsDisabled} onClick={() => vscode.postMessage({ type: "exportCurrentTask" })} /> + {item?.task && ( + copyWithFeedback(item.task, e)} + /> + )} {!!item?.size && item.size > 0 && ( <>
    diff --git a/webview-ui/src/components/chat/TaskHeader.tsx b/webview-ui/src/components/chat/TaskHeader.tsx index 0d75f8f25066..1896df486b33 100644 --- a/webview-ui/src/components/chat/TaskHeader.tsx +++ b/webview-ui/src/components/chat/TaskHeader.tsx @@ -20,6 +20,7 @@ import { TaskActions } from "./TaskActions" import { ShareButton } from "./ShareButton" import { ContextWindowProgress } from "./ContextWindowProgress" import { Mention } from "./Mention" +import { TodoListDisplay } from "./TodoListDisplay" export interface TaskHeaderProps { task: ClineMessage @@ -32,6 +33,7 @@ export interface TaskHeaderProps { buttonsDisabled: boolean handleCondenseContext: (taskId: string) => void onClose: () => void + todos?: any[] } const TaskHeader = ({ @@ -45,6 +47,7 @@ const TaskHeader = ({ buttonsDisabled, handleCondenseContext, onClose, + todos, }: TaskHeaderProps) => { const { t } = useTranslation() const { apiConfiguration, currentTaskItem } = useExtensionState() @@ -68,11 +71,14 @@ const TaskHeader = ({ ) + const hasTodos = todos && Array.isArray(todos) && todos.length > 0 + return (
    )}
    +
    ) } diff --git a/webview-ui/src/components/chat/TodoListDisplay.tsx b/webview-ui/src/components/chat/TodoListDisplay.tsx new file mode 100644 index 000000000000..c9a4d8f4d048 --- /dev/null +++ b/webview-ui/src/components/chat/TodoListDisplay.tsx @@ -0,0 +1,354 @@ +import { useState, useRef, useMemo, useEffect } from "react" + +export function TodoListDisplay({ todos }: { todos: any[] }) { + const [isCollapsed, setIsCollapsed] = useState(true) + const ulRef = useRef(null) + const itemRefs = useRef<(HTMLLIElement | null)[]>([]) + const scrollIndex = useMemo(() => { + const inProgressIdx = todos.findIndex((todo: any) => todo.status === "in_progress") + if (inProgressIdx !== -1) return inProgressIdx + return todos.findIndex((todo: any) => todo.status !== "completed") + }, [todos]) + + // Find the most important todo to display when collapsed + const mostImportantTodo = useMemo(() => { + const inProgress = todos.find((todo: any) => todo.status === "in_progress") + if (inProgress) return inProgress + return todos.find((todo: any) => todo.status !== "completed") + }, [todos]) + useEffect(() => { + if (isCollapsed) return + if (!ulRef.current) return + if (scrollIndex === -1) return + const target = itemRefs.current[scrollIndex] + if (target && ulRef.current) { + const ul = ulRef.current + const targetTop = target.offsetTop - ul.offsetTop + const targetHeight = target.offsetHeight + const ulHeight = ul.clientHeight + const scrollTo = targetTop - (ulHeight / 2 - targetHeight / 2) + ul.scrollTop = scrollTo + } + }, [todos, isCollapsed, scrollIndex]) + if (!Array.isArray(todos) || todos.length === 0) return null + + const totalCount = todos.length + const completedCount = todos.filter((todo: any) => todo.status === "completed").length + + const allCompleted = completedCount === totalCount && totalCount > 0 + + // Create the status icon for the most important todo + const getMostImportantTodoIcon = () => { + if (allCompleted) { + return ( + + ) + } + + if (!mostImportantTodo) { + return ( + + ) + } + + if (mostImportantTodo.status === "completed") { + return ( + + ) + } + + if (mostImportantTodo.status === "in_progress") { + return ( + + ) + } + + // Default not-started todo + return ( + + ) + } + + return ( +
    +
    setIsCollapsed((v) => !v)}> + {getMostImportantTodoIcon()} + + {allCompleted ? "All tasks completed!" : mostImportantTodo?.content || "No pending tasks"} + +
    + + + {completedCount}/{totalCount} + +
    +
    + {/* Floating panel for expanded state */} + {!isCollapsed && ( + <> + {/* Backdrop */} +
    setIsCollapsed(true)} + /> + {/* Floating panel */} +
    + {/* Panel header */} +
    +
    + + Todo List + + {completedCount}/{totalCount} + +
    + { + e.stopPropagation() + setIsCollapsed(true) + }} + onMouseEnter={(e) => { + e.currentTarget.style.opacity = "1" + e.currentTarget.style.background = "var(--vscode-toolbar-hoverBackground)" + }} + onMouseLeave={(e) => { + e.currentTarget.style.opacity = "0.7" + e.currentTarget.style.background = "transparent" + }} + /> +
    + {/* Todo list */} +
      + {todos.map((todo: any, idx: number) => { + let icon + if (todo.status === "completed") { + icon = ( + + ) + } else if (todo.status === "in_progress") { + icon = ( + + ) + } else { + icon = ( + + ) + } + return ( +
    • (itemRefs.current[idx] = el)} + style={{ + marginBottom: 8, + display: "flex", + alignItems: "flex-start", + minHeight: 20, + lineHeight: "1.4", + }}> + {icon} + + {todo.content} + +
    • + ) + })} +
    +
    + + )} +
    + ) +} diff --git a/webview-ui/src/components/chat/UpdateTodoListToolBlock.tsx b/webview-ui/src/components/chat/UpdateTodoListToolBlock.tsx new file mode 100644 index 000000000000..e85128450540 --- /dev/null +++ b/webview-ui/src/components/chat/UpdateTodoListToolBlock.tsx @@ -0,0 +1,498 @@ +import React, { useState, useEffect, useRef } from "react" +import { ToolUseBlock, ToolUseBlockHeader } from "../common/ToolUseBlock" +import MarkdownBlock from "../common/MarkdownBlock" + +interface TodoItem { + id?: string + content: string + status?: "completed" | "in_progress" | string +} + +/** + * @description + * Editable Todo List component. Each time the todo list changes (edit, add, delete, status switch), the parent component will be notified via the onChange callback. + * The parent component should synchronize the latest todos to the model in onChange. + */ +interface UpdateTodoListToolBlockProps { + todos?: TodoItem[] + content?: string + /** + * Callback when todos change, be sure to implement and notify the model with the latest todos + * @param todos Latest todo list + */ + onChange: (todos: TodoItem[]) => void + /** Whether editing is allowed (controlled externally) */ + editable?: boolean + userEdited?: boolean +} + +const STATUS_OPTIONS = [ + { value: "", label: "Not Started", color: "var(--vscode-foreground)", border: "#bbb", bg: "transparent" }, + { + value: "in_progress", + label: "In Progress", + color: "var(--vscode-charts-yellow)", + border: "var(--vscode-charts-yellow)", + bg: "rgba(255, 221, 51, 0.15)", + }, + { + value: "completed", + label: "Completed", + color: "var(--vscode-charts-green)", + border: "var(--vscode-charts-green)", + bg: "var(--vscode-charts-green)", + }, +] + +const genId = () => Math.random().toString(36).slice(2, 10) + +const UpdateTodoListToolBlock: React.FC = ({ + todos = [], + content, + onChange, + editable = true, + userEdited = false, +}) => { + const [editTodos, setEditTodos] = useState( + todos.length > 0 ? todos.map((todo) => ({ ...todo, id: todo.id || genId() })) : [], + ) + const [adding, setAdding] = useState(false) + const [newContent, setNewContent] = useState("") + const newInputRef = useRef(null) + const [deleteId, setDeleteId] = useState(null) + const [isEditing, setIsEditing] = useState(false) + + // Automatically exit edit mode when external editable becomes false + useEffect(() => { + if (!editable && isEditing) { + setIsEditing(false) + } + }, [editable, isEditing]) + + // Check if onChange is passed + useEffect(() => { + if (typeof onChange !== "function") { + console.warn( + "UpdateTodoListToolBlock: onChange callback not passed, cannot notify model after todo changes!", + ) + } + // Only check once on mount + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + // Sync when external props.todos changes + useEffect(() => { + setEditTodos(todos.length > 0 ? todos.map((todo) => ({ ...todo, id: todo.id || genId() })) : []) + }, [todos]) + + // Auto focus on new item + useEffect(() => { + if (adding && newInputRef.current) { + newInputRef.current.focus() + } + }, [adding]) + + // Edit content + const handleContentChange = (id: string, value: string) => { + const newTodos = editTodos.map((todo) => (todo.id === id ? { ...todo, content: value } : todo)) + setEditTodos(newTodos) + onChange?.(newTodos) + } + + // Change status + const handleStatusChange = (id: string, status: string) => { + const newTodos = editTodos.map((todo) => (todo.id === id ? { ...todo, status } : todo)) + setEditTodos(newTodos) + onChange?.(newTodos) + } + + // Delete (confirmation dialog) + const handleDelete = (id: string) => { + setDeleteId(id) + } + const confirmDelete = () => { + if (!deleteId) return + const newTodos = editTodos.filter((todo) => todo.id !== deleteId) + setEditTodos(newTodos) + onChange?.(newTodos) + setDeleteId(null) + } + const cancelDelete = () => setDeleteId(null) + + // Add + const handleAdd = () => { + if (!newContent.trim()) return + const newTodo: TodoItem = { + id: genId(), + content: newContent.trim(), + status: "", + } + const newTodos = [...editTodos, newTodo] + setEditTodos(newTodos) + onChange?.(newTodos) + setNewContent("") + setAdding(false) + } + + // Add on Enter + const handleNewInputKeyDown = (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + handleAdd() + } else if (e.key === "Escape") { + setAdding(false) + setNewContent("") + } + } + + if (userEdited) { + return ( + + +
    + + + User Edit + +
    +
    + +
    + User Edits +
    + + ) + } + + return ( + <> + + +
    + + + Todo List Updated + +
    + {editable && ( + + )} +
    + +
    + {Array.isArray(editTodos) && editTodos.length > 0 ? ( +
      + {editTodos.map((todo, idx) => { + let icon + if (todo.status === "completed") { + icon = ( + + ) + } else if (todo.status === "in_progress") { + icon = ( + + ) + } else { + icon = ( + + ) + } + return ( +
    • + {icon} + {isEditing ? ( + handleContentChange(todo.id!, e.target.value)} + style={{ + flex: 1, + minWidth: 0, + fontWeight: 500, + color: "var(--vscode-input-foreground)", + background: "var(--vscode-input-background)", + border: "none", + outline: "none", + fontSize: 13, + marginRight: 6, + padding: "1px 3px", + borderBottom: "1px solid var(--vscode-input-border)", + }} + onBlur={(e) => { + if (!e.target.value.trim()) { + handleDelete(todo.id!) + } + }} + /> + ) : ( + + {todo.content} + + )} + {isEditing && ( + + )} + {isEditing && ( + + )} +
    • + ) + })} + {adding ? ( +
    • + + setNewContent(e.target.value)} + onKeyDown={handleNewInputKeyDown} + style={{ + flex: 1, + minWidth: 0, + fontWeight: 500, + color: "var(--vscode-foreground)", + background: "transparent", + border: "none", + outline: "none", + fontSize: 13, + marginRight: 6, + padding: "1px 3px", + borderBottom: "1px solid #eee", + }} + /> + + +
    • + ) : ( +
    • + {isEditing && ( + + )} +
    • + )} +
    + ) : ( + + )} +
    + {/* Delete confirmation dialog */} + {deleteId && ( +
    +
    e.stopPropagation()}> +
    + Are you sure you want to delete this todo item? +
    +
    + + +
    +
    +
    + )} + + + ) +} + +export default UpdateTodoListToolBlock diff --git a/webview-ui/src/components/chat/__tests__/ChatView.spec.tsx b/webview-ui/src/components/chat/__tests__/ChatView.spec.tsx index 0343cb6c9637..ba758fbe3b34 100644 --- a/webview-ui/src/components/chat/__tests__/ChatView.spec.tsx +++ b/webview-ui/src/components/chat/__tests__/ChatView.spec.tsx @@ -1020,6 +1020,38 @@ describe("ChatView - Sound Playing Tests", () => { expect(mockPlayFunction).toHaveBeenCalled() }) }) + + it("does not play sound when resuming a task from history", async () => { + renderChatView() + mockPlayFunction.mockClear() + + // Send resume_task message + mockPostMessage({ + clineMessages: [ + { type: "say", say: "task", ts: Date.now() - 2000, text: "Initial task" }, + { type: "ask", ask: "resume_task", ts: Date.now(), text: "Resume task", partial: false }, + ], + }) + + await new Promise((resolve) => setTimeout(resolve, 100)) + expect(mockPlayFunction).not.toHaveBeenCalled() + }) + + it("does not play sound when resuming a completed task from history", async () => { + renderChatView() + mockPlayFunction.mockClear() + + // Send resume_completed_task message + mockPostMessage({ + clineMessages: [ + { type: "say", say: "task", ts: Date.now() - 2000, text: "Initial task" }, + { type: "ask", ask: "resume_completed_task", ts: Date.now(), text: "Resume completed", partial: false }, + ], + }) + + await new Promise((resolve) => setTimeout(resolve, 100)) + expect(mockPlayFunction).not.toHaveBeenCalled() + }) }) describe("ChatView - Focus Grabbing Tests", () => { diff --git a/webview-ui/src/components/chat/__tests__/IndexingStatusBadge.spec.tsx b/webview-ui/src/components/chat/__tests__/IndexingStatusBadge.spec.tsx index 4ef3764841b0..8a44082f585d 100644 --- a/webview-ui/src/components/chat/__tests__/IndexingStatusBadge.spec.tsx +++ b/webview-ui/src/components/chat/__tests__/IndexingStatusBadge.spec.tsx @@ -45,6 +45,7 @@ vi.mock("react-i18next", () => ({ type: "3rdParty", init: vi.fn(), }, + Trans: ({ children }: { children: React.ReactNode }) => <>{children}, })) // Mock vscode API @@ -124,7 +125,7 @@ describe("IndexingStatusDot", () => { expect(button).toHaveAttribute("aria-label", "Index ready") }) - it("posts settingsButtonClicked message when clicked", () => { + it("opens popover when clicked", () => { // Mock window.postMessage const postMessageSpy = vi.spyOn(window, "postMessage") @@ -133,14 +134,10 @@ describe("IndexingStatusDot", () => { const button = screen.getByRole("button") fireEvent.click(button) - expect(postMessageSpy).toHaveBeenCalledWith( - { - type: "action", - action: "settingsButtonClicked", - values: { section: "experimental" }, - }, - "*", - ) + // The button should be clickable and not post a message anymore + // Since the popover is rendered conditionally, we just verify the button is clickable + // and doesn't throw any errors. The actual popover rendering is tested in integration tests. + expect(postMessageSpy).not.toHaveBeenCalled() postMessageSpy.mockRestore() }) diff --git a/webview-ui/src/components/common/MermaidBlock.tsx b/webview-ui/src/components/common/MermaidBlock.tsx index 229b9577659a..982e0bc25f52 100644 --- a/webview-ui/src/components/common/MermaidBlock.tsx +++ b/webview-ui/src/components/common/MermaidBlock.tsx @@ -5,8 +5,10 @@ import { useDebounceEffect } from "@src/utils/useDebounceEffect" import { vscode } from "@src/utils/vscode" import { useAppTranslation } from "@src/i18n/TranslationContext" import { useCopyToClipboard } from "@src/utils/clipboard" +import { MermaidSyntaxFixer } from "@src/services/mermaidSyntaxFixer" import CodeBlock from "./CodeBlock" import { MermaidButton } from "@/components/common/MermaidButton" +import { MermaidFixButton } from "@/components/common/MermaidFixButton" // Removed previous attempts at static imports for individual diagram types // as the paths were incorrect for Mermaid v11.4.1 and caused errors. @@ -46,6 +48,7 @@ mermaid.initialize({ startOnLoad: false, securityLevel: "loose", theme: "dark", + suppressErrorRendering: true, themeVariables: { ...MERMAID_THEME, fontSize: "16px", @@ -91,6 +94,9 @@ export default function MermaidBlock({ code }: MermaidBlockProps) { const [isLoading, setIsLoading] = useState(false) const [error, setError] = useState(null) const [isErrorExpanded, setIsErrorExpanded] = useState(false) + const [svgContent, setSvgContent] = useState("") + const [isFixing, setIsFixing] = useState(false) + const [currentCode, setCurrentCode] = useState("") const { showCopyFeedback, copyWithFeedback } = useCopyToClipboard() const { t } = useAppTranslation() @@ -98,36 +104,57 @@ export default function MermaidBlock({ code }: MermaidBlockProps) { useEffect(() => { setIsLoading(true) setError(null) + setCurrentCode(code) + setIsFixing(false) }, [code]) + const handleSyntaxFix = async () => { + if (isFixing) return + + setIsLoading(true) + setIsFixing(true) + const result = await MermaidSyntaxFixer.autoFixSyntax(currentCode) + if (result.fixedCode) { + // Use the improved code even if not completely successful + setCurrentCode(result.fixedCode) + } + + if (!result.success) { + setError(result.error || t("common:mermaid.errors.fix_failed")) + } + + setIsFixing(false) + setIsLoading(false) + } + // 2) Debounce the actual parse/render + // the LLM is still 'typing', and we do not want to start rendering and/or autofixing before it is fully done. useDebounceEffect( () => { - if (containerRef.current) { - containerRef.current.innerHTML = "" - } + if (isFixing) return + setIsLoading(true) mermaid - .parse(code) + .parse(currentCode) .then(() => { const id = `mermaid-${Math.random().toString(36).substring(2)}` - return mermaid.render(id, code) + return mermaid.render(id, currentCode) }) .then(({ svg }) => { - if (containerRef.current) { - containerRef.current.innerHTML = svg - } + setError(null) + setSvgContent(svg) }) .catch((err) => { console.warn("Mermaid parse/render failed:", err) - setError(err.message || "Failed to render Mermaid diagram") + const errorMessage = err instanceof Error ? err.message : t("common:mermaid.render_error") + setError(errorMessage) }) .finally(() => { setIsLoading(false) }) }, 500, // Delay 500ms - [code], // Dependencies for scheduling + [currentCode, isFixing, code, t], // Dependencies for scheduling ) /** @@ -154,7 +181,11 @@ export default function MermaidBlock({ code }: MermaidBlockProps) { return ( - {isLoading && {t("common:mermaid.loading")}} + {isLoading && ( + + {isFixing ? t("common:mermaid.fixing_syntax") : t("common:mermaid.loading")} + + )} {error ? (
    @@ -188,10 +219,21 @@ export default function MermaidBlock({ code }: MermaidBlockProps) { {t("common:mermaid.render_error")}
    + {!!error && ( + { + e.stopPropagation() + handleSyntaxFix() + }} + disabled={isFixing} + title={t("common:mermaid.fix_syntax_button")}> + + + )} { e.stopPropagation() - const combinedContent = `Error: ${error}\n\n\`\`\`mermaid\n${code}\n\`\`\`` + const combinedContent = `Error: ${error}\n\n\`\`\`mermaid\n${currentCode}\n\`\`\`` copyWithFeedback(combinedContent, e) }}> @@ -209,13 +251,26 @@ export default function MermaidBlock({ code }: MermaidBlockProps) {
    {error}
    - + + {currentCode !== code && ( +
    +
    + {t("common:mermaid.original_code")} +
    + +
    + )}
    )}
    ) : ( - - + + )} diff --git a/webview-ui/src/components/common/MermaidButton.tsx b/webview-ui/src/components/common/MermaidButton.tsx index ede4d0f35e6e..8d77502c6d83 100644 --- a/webview-ui/src/components/common/MermaidButton.tsx +++ b/webview-ui/src/components/common/MermaidButton.tsx @@ -127,7 +127,7 @@ export function MermaidButton({ containerRef, code, isLoading, svgToPng, childre
    {children} {!isLoading && isHovering && ( -
    +
    )} -
    +
    {/* Header with metadata */} diff --git a/webview-ui/src/components/modes/DeleteModeDialog.tsx b/webview-ui/src/components/modes/DeleteModeDialog.tsx new file mode 100644 index 000000000000..d801b3149b41 --- /dev/null +++ b/webview-ui/src/components/modes/DeleteModeDialog.tsx @@ -0,0 +1,56 @@ +import React from "react" +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@src/components/ui" + +interface DeleteModeDialogProps { + open: boolean + onOpenChange: (open: boolean) => void + modeToDelete: { + slug: string + name: string + source?: string + rulesFolderPath?: string + } | null + onConfirm: () => void +} + +export const DeleteModeDialog: React.FC = ({ open, onOpenChange, modeToDelete, onConfirm }) => { + const { t } = useAppTranslation() + + return ( + + + + {t("prompts:deleteMode.title")} + + {modeToDelete && ( + <> + {t("prompts:deleteMode.message", { modeName: modeToDelete.name })} + {modeToDelete.rulesFolderPath && ( +
    + {t("prompts:deleteMode.rulesFolder", { + folderPath: modeToDelete.rulesFolderPath, + })} +
    + )} + + )} +
    +
    + + {t("prompts:deleteMode.cancel")} + {t("prompts:deleteMode.confirm")} + +
    +
    + ) +} diff --git a/webview-ui/src/components/modes/ModesView.tsx b/webview-ui/src/components/modes/ModesView.tsx index 2dd6c6dc76b0..37d435a9945f 100644 --- a/webview-ui/src/components/modes/ModesView.tsx +++ b/webview-ui/src/components/modes/ModesView.tsx @@ -8,7 +8,7 @@ import { VSCodeTextField, } from "@vscode/webview-ui-toolkit/react" import { Trans } from "react-i18next" -import { ChevronDown, X } from "lucide-react" +import { ChevronDown, X, Upload, Download } from "lucide-react" import { ModeConfig, GroupEntry, PromptComponent, ToolGroup, modeConfigSchema } from "@roo-code/types" @@ -47,6 +47,7 @@ import { Input, StandardTooltip, } from "@src/components/ui" +import { DeleteModeDialog } from "@src/components/modes/DeleteModeDialog" // Get all available groups that should show in prompts view const availableGroups = (Object.keys(TOOL_GROUPS) as ToolGroup[]).filter((group) => !TOOL_GROUPS[group].alwaysAvailable) @@ -92,6 +93,17 @@ const ModesView = ({ onDone }: ModesViewProps) => { const [showConfigMenu, setShowConfigMenu] = useState(false) const [isCreateModeDialogOpen, setIsCreateModeDialogOpen] = useState(false) const [isSystemPromptDisclosureOpen, setIsSystemPromptDisclosureOpen] = useState(false) + const [isExporting, setIsExporting] = useState(false) + const [isImporting, setIsImporting] = useState(false) + const [showImportDialog, setShowImportDialog] = useState(false) + const [hasRulesToExport, setHasRulesToExport] = useState>({}) + const [showDeleteConfirm, setShowDeleteConfirm] = useState(false) + const [modeToDelete, setModeToDelete] = useState<{ + slug: string + name: string + source?: string + rulesFolderPath?: string + } | null>(null) // State for mode selection popover and search const [open, setOpen] = useState(false) @@ -190,6 +202,22 @@ const ModesView = ({ onDone }: ModesViewProps) => { return customModes?.find(findMode) || modes.find(findMode) }, [visualMode, customModes, modes]) + // Check if the current mode has rules to export + const checkRulesDirectory = useCallback((slug: string) => { + vscode.postMessage({ + type: "checkRulesDirectory", + slug: slug, + }) + }, []) + + // Check rules directory when mode changes + useEffect(() => { + const currentMode = getCurrentMode() + if (currentMode?.slug && hasRulesToExport[currentMode.slug] === undefined) { + checkRulesDirectory(currentMode.slug) + } + }, [getCurrentMode, checkRulesDirectory, hasRulesToExport]) + // Helper function to safely access mode properties const getModeProperty = ( mode: ModeConfig | undefined, @@ -388,6 +416,14 @@ const ModesView = ({ onDone }: ModesViewProps) => { return () => document.removeEventListener("click", handleClickOutside) }, [showConfigMenu]) + // Use a ref to store the current modeToDelete value + const modeToDeleteRef = useRef(modeToDelete) + + // Update the ref whenever modeToDelete changes + useEffect(() => { + modeToDeleteRef.current = modeToDelete + }, [modeToDelete]) + useEffect(() => { const handler = (event: MessageEvent) => { const message = event.data @@ -397,12 +433,45 @@ const ModesView = ({ onDone }: ModesViewProps) => { setSelectedPromptTitle(`System Prompt (${message.mode} mode)`) setIsDialogOpen(true) } + } else if (message.type === "exportModeResult") { + setIsExporting(false) + + if (!message.success) { + // Show error message + console.error("Failed to export mode:", message.error) + } + } else if (message.type === "importModeResult") { + setIsImporting(false) + setShowImportDialog(false) + + if (!message.success) { + // Only log error if it's not a cancellation + if (message.error !== "cancelled") { + console.error("Failed to import mode:", message.error) + } + } + } else if (message.type === "checkRulesDirectoryResult") { + setHasRulesToExport((prev) => ({ + ...prev, + [message.slug]: message.hasContent, + })) + } else if (message.type === "deleteCustomModeCheck") { + // Handle the check response + // Use the ref to get the current modeToDelete value + const currentModeToDelete = modeToDeleteRef.current + if (message.slug && currentModeToDelete && currentModeToDelete.slug === message.slug) { + setModeToDelete({ + ...currentModeToDelete, + rulesFolderPath: message.rulesFolderPath, + }) + setShowDeleteConfirm(true) + } } } window.addEventListener("message", handler) return () => window.removeEventListener("message", handler) - }, []) + }, []) // Empty dependency array - only register once const handleAgentReset = ( modeSlug: string, @@ -657,10 +726,20 @@ const ModesView = ({ onDone }: ModesViewProps) => { variant="ghost" size="icon" onClick={() => { - vscode.postMessage({ - type: "deleteCustomMode", - slug: visualMode, - }) + const customMode = findModeBySlug(visualMode, customModes) + if (customMode) { + setModeToDelete({ + slug: customMode.slug, + name: customMode.name, + source: customMode.source || "global", + }) + // First check if rules folder exists + vscode.postMessage({ + type: "deleteCustomMode", + slug: customMode.slug, + checkOnly: true, + }) + } }}> @@ -1033,7 +1112,7 @@ const ModesView = ({ onDone }: ModesViewProps) => {
    -
    +
    - {/* Custom System Prompt Disclosure */} + {/* Export/Import Mode Buttons */} +
    + {/* Export button - visible when any mode is selected */} + {getCurrentMode() && ( + + )} + {/* Import button - always visible */} + +
    + + {/* Advanced Features Disclosure */}
    {isSystemPromptDisclosureOpen && ( -
    - { - const currentMode = getCurrentMode() - if (!currentMode) return - - vscode.postMessage({ - type: "openFile", - text: `./.roo/system-prompt-${currentMode.slug}`, - values: { - create: true, - content: "", - }, - }) - }} - /> - ), - "1": ( - - ), - "2": , - }} - /> +
    + {/* Override System Prompt Section */} +
    +

    + Override System Prompt +

    +
    + { + const currentMode = getCurrentMode() + if (!currentMode) return + + vscode.postMessage({ + type: "openFile", + text: `./.roo/system-prompt-${currentMode.slug}`, + values: { + create: true, + content: "", + }, + }) + }} + /> + ), + "1": ( + + ), + "2": , + }} + /> +
    +
    )}
    @@ -1394,6 +1516,85 @@ const ModesView = ({ onDone }: ModesViewProps) => {
    )} + + {/* Import Mode Dialog */} + {showImportDialog && ( +
    +
    +

    {t("prompts:modes.importMode")}

    +

    + {t("prompts:importMode.selectLevel")} +

    +
    + + +
    +
    + + +
    +
    +
    + )} + + {/* Delete Mode Confirmation Dialog */} + { + if (modeToDelete) { + vscode.postMessage({ + type: "deleteCustomMode", + slug: modeToDelete.slug, + }) + setShowDeleteConfirm(false) + setModeToDelete(null) + } + }} + /> ) } diff --git a/webview-ui/src/components/settings/AutoApproveSettings.tsx b/webview-ui/src/components/settings/AutoApproveSettings.tsx index e825ab8d7c98..71283a833bc2 100644 --- a/webview-ui/src/components/settings/AutoApproveSettings.tsx +++ b/webview-ui/src/components/settings/AutoApproveSettings.tsx @@ -25,6 +25,8 @@ type AutoApproveSettingsProps = HTMLAttributes & { alwaysAllowModeSwitch?: boolean alwaysAllowSubtasks?: boolean alwaysAllowExecute?: boolean + alwaysAllowFollowupQuestions?: boolean + followupAutoApproveTimeoutMs?: number allowedCommands?: string[] setCachedStateField: SetCachedStateField< | "alwaysAllowReadOnly" @@ -40,7 +42,10 @@ type AutoApproveSettingsProps = HTMLAttributes & { | "alwaysAllowModeSwitch" | "alwaysAllowSubtasks" | "alwaysAllowExecute" + | "alwaysAllowFollowupQuestions" + | "followupAutoApproveTimeoutMs" | "allowedCommands" + | "alwaysAllowUpdateTodoList" > } @@ -58,6 +63,8 @@ export const AutoApproveSettings = ({ alwaysAllowModeSwitch, alwaysAllowSubtasks, alwaysAllowExecute, + alwaysAllowFollowupQuestions, + followupAutoApproveTimeoutMs = 60000, allowedCommands, setCachedStateField, ...props @@ -95,6 +102,7 @@ export const AutoApproveSettings = ({ alwaysAllowModeSwitch={alwaysAllowModeSwitch} alwaysAllowSubtasks={alwaysAllowSubtasks} alwaysAllowExecute={alwaysAllowExecute} + alwaysAllowFollowupQuestions={alwaysAllowFollowupQuestions} onToggle={(key, value) => setCachedStateField(key, value)} /> @@ -202,6 +210,33 @@ export const AutoApproveSettings = ({
    )} + {alwaysAllowFollowupQuestions && ( +
    +
    + +
    {t("settings:autoApprove.followupQuestions.label")}
    +
    +
    +
    + + setCachedStateField("followupAutoApproveTimeoutMs", value) + } + data-testid="followup-timeout-slider" + /> + {followupAutoApproveTimeoutMs / 1000}s +
    +
    + {t("settings:autoApprove.followupQuestions.timeoutLabel")} +
    +
    +
    + )} + {alwaysAllowExecute && (
    diff --git a/webview-ui/src/components/settings/AutoApproveToggle.tsx b/webview-ui/src/components/settings/AutoApproveToggle.tsx index d2b6694f75f3..e8b51b01ef67 100644 --- a/webview-ui/src/components/settings/AutoApproveToggle.tsx +++ b/webview-ui/src/components/settings/AutoApproveToggle.tsx @@ -14,6 +14,8 @@ type AutoApproveToggles = Pick< | "alwaysAllowModeSwitch" | "alwaysAllowSubtasks" | "alwaysAllowExecute" + | "alwaysAllowFollowupQuestions" + | "alwaysAllowUpdateTodoList" > export type AutoApproveSetting = keyof AutoApproveToggles @@ -83,6 +85,20 @@ export const autoApproveSettingsConfig: Record {Object.values(autoApproveSettingsConfig).map(({ key, descriptionKey, labelKey, icon, testId }) => ( diff --git a/webview-ui/src/components/settings/CodeIndexSettings.tsx b/webview-ui/src/components/settings/CodeIndexSettings.tsx deleted file mode 100644 index f385ae2bc62d..000000000000 --- a/webview-ui/src/components/settings/CodeIndexSettings.tsx +++ /dev/null @@ -1,513 +0,0 @@ -import React, { useState, useEffect } from "react" -import { z } from "zod" -import * as ProgressPrimitive from "@radix-ui/react-progress" -import { VSCodeCheckbox, VSCodeTextField, VSCodeButton, VSCodeLink } from "@vscode/webview-ui-toolkit/react" -import { Trans } from "react-i18next" - -import { CodebaseIndexConfig, CodebaseIndexModels, ProviderSettings } from "@roo-code/types" - -import { EmbedderProvider } from "@roo/embeddingModels" - -import { vscode } from "@src/utils/vscode" -import { useAppTranslation } from "@src/i18n/TranslationContext" -import { buildDocLink } from "@src/utils/docLinks" - -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, - AlertDialog, - AlertDialogAction, - AlertDialogCancel, - AlertDialogContent, - AlertDialogDescription, - AlertDialogFooter, - AlertDialogHeader, - AlertDialogTitle, - AlertDialogTrigger, -} from "@src/components/ui" - -import { SetCachedStateField } from "./types" - -interface CodeIndexSettingsProps { - codebaseIndexModels: CodebaseIndexModels | undefined - codebaseIndexConfig: CodebaseIndexConfig | undefined - apiConfiguration: ProviderSettings - setCachedStateField: SetCachedStateField<"codebaseIndexConfig"> - setApiConfigurationField: (field: K, value: ProviderSettings[K]) => void - areSettingsCommitted: boolean -} - -import type { IndexingStatusUpdateMessage } from "@roo/ExtensionMessage" - -export const CodeIndexSettings: React.FC = ({ - codebaseIndexModels, - codebaseIndexConfig, - apiConfiguration, - setCachedStateField, - setApiConfigurationField, - areSettingsCommitted, -}) => { - const { t } = useAppTranslation() - const DEFAULT_QDRANT_URL = "http://localhost:6333" - const [indexingStatus, setIndexingStatus] = useState({ - systemStatus: "Standby", - message: "", - processedItems: 0, - totalItems: 0, - currentItemUnit: "items", - }) - - // Safely calculate available models for current provider - const currentProvider = codebaseIndexConfig?.codebaseIndexEmbedderProvider - const modelsForProvider = - currentProvider === "openai" || currentProvider === "ollama" || currentProvider === "openai-compatible" - ? codebaseIndexModels?.[currentProvider] || codebaseIndexModels?.openai - : codebaseIndexModels?.openai - const availableModelIds = Object.keys(modelsForProvider || {}) - - useEffect(() => { - // Request initial indexing status from extension host - vscode.postMessage({ type: "requestIndexingStatus" }) - - // Set up interval for periodic status updates - - // Set up message listener for status updates - const handleMessage = (event: MessageEvent) => { - if (event.data.type === "indexingStatusUpdate") { - setIndexingStatus({ - systemStatus: event.data.values.systemStatus, - message: event.data.values.message || "", - processedItems: event.data.values.processedItems, - totalItems: event.data.values.totalItems, - currentItemUnit: event.data.values.currentItemUnit || "items", - }) - } - } - - window.addEventListener("message", handleMessage) - - // Cleanup function - return () => { - window.removeEventListener("message", handleMessage) - } - }, [codebaseIndexConfig, codebaseIndexModels]) - - /** - * Determines the appropriate model ID when changing providers - */ - function getModelIdForProvider( - newProvider: EmbedderProvider, - currentProvider: EmbedderProvider | undefined, - currentModelId: string | undefined, - availableModels: CodebaseIndexModels | undefined, - ): string { - if (newProvider === currentProvider && currentModelId) { - return currentModelId - } - - const models = availableModels?.[newProvider] - const modelIds = models ? Object.keys(models) : [] - - if (currentModelId && modelIds.includes(currentModelId)) { - return currentModelId - } - - const selectedModel = modelIds.length > 0 ? modelIds[0] : "" - return selectedModel - } - - function validateIndexingConfig(config: CodebaseIndexConfig | undefined, apiConfig: ProviderSettings): boolean { - if (!config) return false - - const baseSchema = z.object({ - codebaseIndexQdrantUrl: z.string().url("Qdrant URL must be a valid URL"), - codebaseIndexEmbedderModelId: z.string().min(1, "Model ID is required"), - }) - - const providerSchemas = { - openai: baseSchema.extend({ - codebaseIndexEmbedderProvider: z.literal("openai"), - codeIndexOpenAiKey: z.string().min(1, "OpenAI key is required"), - }), - ollama: baseSchema.extend({ - codebaseIndexEmbedderProvider: z.literal("ollama"), - codebaseIndexEmbedderBaseUrl: z.string().url("Ollama URL must be a valid URL"), - }), - "openai-compatible": baseSchema.extend({ - codebaseIndexEmbedderProvider: z.literal("openai-compatible"), - codebaseIndexOpenAiCompatibleBaseUrl: z.string().url("Base URL must be a valid URL"), - codebaseIndexOpenAiCompatibleApiKey: z.string().min(1, "API key is required"), - codebaseIndexOpenAiCompatibleModelDimension: z - .number() - .int("Dimension must be an integer") - .positive("Dimension must be a positive number") - .optional(), - }), - } - - try { - const schema = - config.codebaseIndexEmbedderProvider === "openai" - ? providerSchemas.openai - : config.codebaseIndexEmbedderProvider === "ollama" - ? providerSchemas.ollama - : providerSchemas["openai-compatible"] - - schema.parse({ - ...config, - codeIndexOpenAiKey: apiConfig.codeIndexOpenAiKey, - codebaseIndexOpenAiCompatibleBaseUrl: apiConfig.codebaseIndexOpenAiCompatibleBaseUrl, - codebaseIndexOpenAiCompatibleApiKey: apiConfig.codebaseIndexOpenAiCompatibleApiKey, - codebaseIndexOpenAiCompatibleModelDimension: apiConfig.codebaseIndexOpenAiCompatibleModelDimension, - }) - return true - } catch { - return false - } - } - - const progressPercentage = - indexingStatus.totalItems > 0 - ? (indexingStatus.processedItems / indexingStatus.totalItems) * 100 - : indexingStatus.totalItems === 0 && indexingStatus.processedItems === 0 - ? 100 - : 0 - - const transformValue = 100 - progressPercentage - const transformStyleString = `translateX(-${transformValue}%)` - - return ( - <> -
    -
    - - setCachedStateField("codebaseIndexConfig", { - ...codebaseIndexConfig, - codebaseIndexEnabled: e.target.checked, - }) - }> - {t("settings:codeIndex.enableLabel")} - -
    -

    - - - -

    -
    - - {codebaseIndexConfig?.codebaseIndexEnabled && ( -
    -
    - - {indexingStatus.systemStatus} - {indexingStatus.message ? ` - ${indexingStatus.message}` : ""} -
    - - {indexingStatus.systemStatus === "Indexing" && ( -
    - - - -
    - )} - -
    -
    {t("settings:codeIndex.providerLabel")}
    -
    -
    -
    - -
    -
    - - {codebaseIndexConfig?.codebaseIndexEmbedderProvider === "openai" && ( -
    -
    -
    {t("settings:codeIndex.openaiKeyLabel")}
    -
    -
    - setApiConfigurationField("codeIndexOpenAiKey", e.target.value)} - style={{ width: "100%" }}> -
    -
    - )} - - {codebaseIndexConfig?.codebaseIndexEmbedderProvider === "openai-compatible" && ( -
    -
    -
    {t("settings:codeIndex.openaiCompatibleBaseUrlLabel")}
    -
    -
    - - setApiConfigurationField("codebaseIndexOpenAiCompatibleBaseUrl", e.target.value) - } - style={{ width: "100%" }}> -
    -
    -
    {t("settings:codeIndex.openaiCompatibleApiKeyLabel")}
    -
    -
    - - setApiConfigurationField("codebaseIndexOpenAiCompatibleApiKey", e.target.value) - } - style={{ width: "100%" }}> -
    -
    - )} - -
    -
    {t("settings:codeIndex.modelLabel")}
    -
    -
    -
    - {codebaseIndexConfig?.codebaseIndexEmbedderProvider === "openai-compatible" ? ( - - setCachedStateField("codebaseIndexConfig", { - ...codebaseIndexConfig, - codebaseIndexEmbedderModelId: e.target.value, - }) - } - placeholder="Enter custom model ID" - style={{ width: "100%" }}> - ) : ( - - )} -
    -
    - - {codebaseIndexConfig?.codebaseIndexEmbedderProvider === "openai-compatible" && ( -
    -
    -
    {t("settings:codeIndex.openaiCompatibleModelDimensionLabel")}
    -
    -
    - { - const value = e.target.value - if (value === "") { - setApiConfigurationField( - "codebaseIndexOpenAiCompatibleModelDimension", - undefined, - ) - } else { - const parsedValue = parseInt(value, 10) - if (!isNaN(parsedValue)) { - setApiConfigurationField( - "codebaseIndexOpenAiCompatibleModelDimension", - parsedValue, - ) - } - } - }} - placeholder={t("settings:codeIndex.openaiCompatibleModelDimensionPlaceholder")} - style={{ width: "100%" }}> -

    - {t("settings:codeIndex.openaiCompatibleModelDimensionDescription")} -

    -
    -
    - )} - - {codebaseIndexConfig?.codebaseIndexEmbedderProvider === "ollama" && ( -
    -
    -
    {t("settings:codeIndex.ollamaUrlLabel")}
    -
    -
    - - setCachedStateField("codebaseIndexConfig", { - ...codebaseIndexConfig, - codebaseIndexEmbedderBaseUrl: e.target.value, - }) - } - style={{ width: "100%" }}> -
    -
    - )} - -
    -
    -
    {t("settings:codeIndex.qdrantUrlLabel")}
    -
    -
    - - setCachedStateField("codebaseIndexConfig", { - ...codebaseIndexConfig, - codebaseIndexQdrantUrl: e.target.value, - }) - } - onBlur={(e: any) => { - // Set default value if field is empty on blur - if (!e.target.value) { - setCachedStateField("codebaseIndexConfig", { - ...codebaseIndexConfig, - codebaseIndexQdrantUrl: DEFAULT_QDRANT_URL, - }) - } - }} - style={{ width: "100%" }}> -
    -
    - -
    -
    -
    {t("settings:codeIndex.qdrantKeyLabel")}
    -
    -
    - setApiConfigurationField("codeIndexQdrantApiKey", e.target.value)} - style={{ width: "100%" }}> -
    -
    - - {(!areSettingsCommitted || !validateIndexingConfig(codebaseIndexConfig, apiConfiguration)) && ( -

    - {t("settings:codeIndex.unsavedSettingsMessage")} -

    - )} - -
    - {(indexingStatus.systemStatus === "Error" || indexingStatus.systemStatus === "Standby") && ( - vscode.postMessage({ type: "startIndexing" })} - disabled={ - !areSettingsCommitted || - !validateIndexingConfig(codebaseIndexConfig, apiConfiguration) - }> - {t("settings:codeIndex.startIndexingButton")} - - )} - {(indexingStatus.systemStatus === "Indexed" || indexingStatus.systemStatus === "Error") && ( - - - - {t("settings:codeIndex.clearIndexDataButton")} - - - - - - {t("settings:codeIndex.clearDataDialog.title")} - - - {t("settings:codeIndex.clearDataDialog.description")} - - - - - {t("settings:codeIndex.clearDataDialog.cancelButton")} - - vscode.postMessage({ type: "clearIndexData" })}> - {t("settings:codeIndex.clearDataDialog.confirmButton")} - - - - - )} -
    -
    - )} - - ) -} diff --git a/webview-ui/src/components/settings/ExperimentalSettings.tsx b/webview-ui/src/components/settings/ExperimentalSettings.tsx index 79d8afefb22b..958c7742fd23 100644 --- a/webview-ui/src/components/settings/ExperimentalSettings.tsx +++ b/webview-ui/src/components/settings/ExperimentalSettings.tsx @@ -1,41 +1,40 @@ import { HTMLAttributes } from "react" import { FlaskConical } from "lucide-react" +import { VSCodeCheckbox, VSCodeLink } from "@vscode/webview-ui-toolkit/react" +import { Trans } from "react-i18next" -import type { Experiments, CodebaseIndexConfig, CodebaseIndexModels, ProviderSettings } from "@roo-code/types" +import type { Experiments, CodebaseIndexConfig, CodebaseIndexModels } from "@roo-code/types" import { EXPERIMENT_IDS, experimentConfigsMap } from "@roo/experiments" -import { ExtensionStateContextType } from "@src/context/ExtensionStateContext" import { useAppTranslation } from "@src/i18n/TranslationContext" import { cn } from "@src/lib/utils" +import { buildDocLink } from "@src/utils/docLinks" -import { SetCachedStateField, SetExperimentEnabled } from "./types" +import { SetExperimentEnabled } from "./types" import { SectionHeader } from "./SectionHeader" import { Section } from "./Section" import { ExperimentalFeature } from "./ExperimentalFeature" -import { CodeIndexSettings } from "./CodeIndexSettings" +import { SetCachedStateField } from "./types" type ExperimentalSettingsProps = HTMLAttributes & { experiments: Experiments setExperimentEnabled: SetExperimentEnabled - setCachedStateField: SetCachedStateField<"codebaseIndexConfig"> // CodeIndexSettings props codebaseIndexModels: CodebaseIndexModels | undefined codebaseIndexConfig: CodebaseIndexConfig | undefined - apiConfiguration: ProviderSettings - setApiConfigurationField: (field: K, value: ProviderSettings[K]) => void - areSettingsCommitted: boolean + // For codebase index enabled toggle + codebaseIndexEnabled?: boolean + setCachedStateField?: SetCachedStateField } export const ExperimentalSettings = ({ experiments, setExperimentEnabled, - setCachedStateField, codebaseIndexModels, codebaseIndexConfig, - apiConfiguration, - setApiConfigurationField, - areSettingsCommitted, + codebaseIndexEnabled, + setCachedStateField, className, ...props }: ExperimentalSettingsProps) => { @@ -81,14 +80,31 @@ export const ExperimentalSettings = ({ ) })} - } - setApiConfigurationField={setApiConfigurationField} - areSettingsCommitted={areSettingsCommitted} - /> + {/* Codebase Indexing Enable/Disable Toggle */} +
    +
    + { + const newEnabledState = e.target.checked + if (setCachedStateField && codebaseIndexConfig) { + setCachedStateField("codebaseIndexConfig", { + ...codebaseIndexConfig, + codebaseIndexEnabled: newEnabledState, + }) + } + }}> + {t("settings:codeIndex.enableLabel")} + +
    +

    + + + +

    +
    ) diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index 13dbba8a63fb..5882d4f3f3a2 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -70,7 +70,7 @@ export const ModelPicker = ({ const { id: selectedModelId, info: selectedModelInfo } = useSelectedModel(apiConfiguration) - const [searchValue, setSearchValue] = useState(selectedModelId || "") + const [searchValue, setSearchValue] = useState("") const onSelect = useCallback( (modelId: string) => { @@ -87,28 +87,25 @@ export const ModelPicker = ({ } // Delay to ensure the popover is closed before setting the search value. - selectTimeoutRef.current = setTimeout(() => setSearchValue(modelId), 100) + selectTimeoutRef.current = setTimeout(() => setSearchValue(""), 100) }, [modelIdKey, setApiConfigurationField], ) - const onOpenChange = useCallback( - (open: boolean) => { - setOpen(open) + const onOpenChange = useCallback((open: boolean) => { + setOpen(open) - // Abandon the current search if the popover is closed. - if (!open) { - // Clear any existing timeout - if (closeTimeoutRef.current) { - clearTimeout(closeTimeoutRef.current) - } - - // Delay to ensure the popover is closed before setting the search value. - closeTimeoutRef.current = setTimeout(() => setSearchValue(selectedModelId), 100) + // Abandon the current search if the popover is closed. + if (!open) { + // Clear any existing timeout + if (closeTimeoutRef.current) { + clearTimeout(closeTimeoutRef.current) } - }, - [selectedModelId], - ) + + // Clear the search value when closing instead of prefilling it + closeTimeoutRef.current = setTimeout(() => setSearchValue(""), 100) + } + }, []) const onClearSearch = useCallback(() => { setSearchValue("") diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 8712b81cf204..3ece8146afbd 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -174,6 +174,8 @@ const SettingsView = forwardRef(({ onDone, t codebaseIndexModels, customSupportPrompts, profileThresholds, + alwaysAllowFollowupQuestions, + followupAutoApproveTimeoutMs, } = cachedState const apiConfiguration = useMemo(() => cachedState.apiConfiguration ?? {}, [cachedState.apiConfiguration]) @@ -311,12 +313,16 @@ const SettingsView = forwardRef(({ onDone, t vscode.postMessage({ type: "updateExperimental", values: experiments }) vscode.postMessage({ type: "alwaysAllowModeSwitch", bool: alwaysAllowModeSwitch }) vscode.postMessage({ type: "alwaysAllowSubtasks", bool: alwaysAllowSubtasks }) + vscode.postMessage({ type: "alwaysAllowFollowupQuestions", bool: alwaysAllowFollowupQuestions }) + vscode.postMessage({ type: "followupAutoApproveTimeoutMs", value: followupAutoApproveTimeoutMs }) vscode.postMessage({ type: "condensingApiConfigId", text: condensingApiConfigId || "" }) vscode.postMessage({ type: "updateCondensingPrompt", text: customCondensingPrompt || "" }) vscode.postMessage({ type: "updateSupportPrompt", values: customSupportPrompts || {} }) vscode.postMessage({ type: "upsertApiConfiguration", text: currentApiConfigName, apiConfiguration }) vscode.postMessage({ type: "telemetrySetting", text: telemetrySetting }) - vscode.postMessage({ type: "codebaseIndexConfig", values: codebaseIndexConfig }) + if (codebaseIndexConfig) { + vscode.postMessage({ type: "codebaseIndexEnabled", bool: codebaseIndexConfig.codebaseIndexEnabled }) + } vscode.postMessage({ type: "profileThresholds", values: profileThresholds }) setChangeDetected(false) } @@ -599,6 +605,8 @@ const SettingsView = forwardRef(({ onDone, t alwaysAllowModeSwitch={alwaysAllowModeSwitch} alwaysAllowSubtasks={alwaysAllowSubtasks} alwaysAllowExecute={alwaysAllowExecute} + alwaysAllowFollowupQuestions={alwaysAllowFollowupQuestions} + followupAutoApproveTimeoutMs={followupAutoApproveTimeoutMs} allowedCommands={allowedCommands} setCachedStateField={setCachedStateField} /> @@ -683,12 +691,10 @@ const SettingsView = forwardRef(({ onDone, t )} diff --git a/webview-ui/src/components/settings/__tests__/AutoApproveToggle.spec.tsx b/webview-ui/src/components/settings/__tests__/AutoApproveToggle.spec.tsx index 97e5dcc96bb5..270ed305eab5 100644 --- a/webview-ui/src/components/settings/__tests__/AutoApproveToggle.spec.tsx +++ b/webview-ui/src/components/settings/__tests__/AutoApproveToggle.spec.tsx @@ -25,6 +25,8 @@ describe("AutoApproveToggle", () => { alwaysAllowModeSwitch: true, alwaysAllowSubtasks: false, alwaysAllowExecute: true, + alwaysAllowFollowupQuestions: false, + alwaysAllowUpdateTodoList: true, onToggle: mockOnToggle, } diff --git a/webview-ui/src/components/settings/__tests__/CodeIndexSettings.spec.tsx b/webview-ui/src/components/settings/__tests__/CodeIndexSettings.spec.tsx deleted file mode 100644 index 3c3dae7956c7..000000000000 --- a/webview-ui/src/components/settings/__tests__/CodeIndexSettings.spec.tsx +++ /dev/null @@ -1,844 +0,0 @@ -// npx vitest src/components/settings/__tests__/CodeIndexSettings.spec.tsx - -import { render, screen, fireEvent } from "@/utils/test-utils" -import userEvent from "@testing-library/user-event" - -import { CodeIndexSettings } from "../CodeIndexSettings" - -import { vscode } from "@src/utils/vscode" - -vi.mock("@src/utils/vscode", () => ({ - vscode: { - postMessage: vi.fn(), - }, -})) - -vi.mock("@src/i18n/TranslationContext", () => ({ - useAppTranslation: () => ({ - t: (key: string) => { - const translations: Record = { - "settings:codeIndex.providerLabel": "Provider", - "settings:codeIndex.selectProviderPlaceholder": "Select provider", - "settings:codeIndex.openaiProvider": "OpenAI", - "settings:codeIndex.ollamaProvider": "Ollama", - "settings:codeIndex.openaiCompatibleProvider": "OpenAI Compatible", - "settings:codeIndex.openaiKeyLabel": "OpenAI API Key", - "settings:codeIndex.openaiCompatibleBaseUrlLabel": "Base URL", - "settings:codeIndex.openaiCompatibleApiKeyLabel": "API Key", - "settings:codeIndex.openaiCompatibleModelDimensionLabel": "Embedding Dimension", - "settings:codeIndex.openaiCompatibleModelDimensionPlaceholder": "Enter dimension (e.g., 1536)", - "settings:codeIndex.openaiCompatibleModelDimensionDescription": "The dimension of the embedding model", - "settings:codeIndex.modelLabel": "Model", - "settings:codeIndex.selectModelPlaceholder": "Select model", - "settings:codeIndex.qdrantUrlLabel": "Qdrant URL", - "settings:codeIndex.qdrantApiKeyLabel": "Qdrant API Key", - "settings:codeIndex.ollamaUrlLabel": "Ollama URL", - "settings:codeIndex.qdrantKeyLabel": "Qdrant API Key", - "settings:codeIndex.enableLabel": "Enable Code Index", - "settings:codeIndex.enableDescription": "Enable semantic search across your codebase", - "settings:codeIndex.unsavedSettingsMessage": "Please save settings before indexing", - "settings:codeIndex.startIndexingButton": "Start Indexing", - "settings:codeIndex.clearIndexDataButton": "Clear Index Data", - "settings:codeIndex.clearDataDialog.title": "Clear Index Data", - "settings:codeIndex.clearDataDialog.description": "This will remove all indexed data", - "settings:codeIndex.clearDataDialog.cancelButton": "Cancel", - "settings:codeIndex.clearDataDialog.confirmButton": "Confirm", - } - return translations[key] || key - }, - }), -})) - -vi.mock("react-i18next", () => ({ - Trans: ({ children }: any) =>
    {children}
    , -})) - -vi.mock("@src/utils/docLinks", () => ({ - buildDocLink: vi.fn(() => "https://docs.example.com"), -})) - -vi.mock("@src/components/ui", () => ({ - Select: ({ children, value, onValueChange }: any) => ( -
    - - {children} -
    - ), - SelectContent: ({ children }: any) =>
    {children}
    , - SelectItem: ({ children, value }: any) => ( -
    - {children} -
    - ), - SelectTrigger: ({ children }: any) =>
    {children}
    , - SelectValue: ({ placeholder }: any) =>
    {placeholder}
    , - AlertDialog: ({ children }: any) =>
    {children}
    , - AlertDialogAction: ({ children, onClick }: any) => ( - - ), - AlertDialogCancel: ({ children }: any) => , - AlertDialogContent: ({ children }: any) =>
    {children}
    , - AlertDialogDescription: ({ children }: any) =>
    {children}
    , - AlertDialogFooter: ({ children }: any) =>
    {children}
    , - AlertDialogHeader: ({ children }: any) =>
    {children}
    , - AlertDialogTitle: ({ children }: any) =>
    {children}
    , - AlertDialogTrigger: ({ children }: any) =>
    {children}
    , -})) - -vi.mock("@vscode/webview-ui-toolkit/react", () => ({ - VSCodeCheckbox: ({ checked, onChange, children }: any) => ( - - ), - VSCodeTextField: ({ value, onInput, type, style, ...props }: any) => { - const handleChange = (e: any) => { - if (onInput) { - onInput({ target: { value: e.target.value } }) - } - } - return ( - - ) - }, - VSCodeButton: ({ children, onClick, appearance }: any) => ( - - ), - VSCodeLink: ({ children, href }: any) => ( - - {children} - - ), -})) - -vi.mock("@radix-ui/react-progress", () => ({ - Root: ({ children, value }: any) => ( -
    - {children} -
    - ), - Indicator: ({ style }: any) =>
    , -})) - -describe("CodeIndexSettings", () => { - const mockSetCachedStateField = vi.fn() - const mockSetApiConfigurationField = vi.fn() - - const defaultProps = { - codebaseIndexModels: { - openai: { - "text-embedding-3-small": { dimension: 1536 }, - "text-embedding-3-large": { dimension: 3072 }, - }, - "openai-compatible": { - "text-embedding-3-small": { dimension: 1536 }, - "custom-model": { dimension: 768 }, - }, - }, - codebaseIndexConfig: { - codebaseIndexEnabled: true, - codebaseIndexEmbedderProvider: "openai" as const, - codebaseIndexEmbedderModelId: "text-embedding-3-small", - codebaseIndexQdrantUrl: "http://localhost:6333", - }, - apiConfiguration: { - codeIndexOpenAiKey: "", - codebaseIndexOpenAiCompatibleBaseUrl: "", - codebaseIndexOpenAiCompatibleApiKey: "", - codeIndexQdrantApiKey: "", - }, - setCachedStateField: mockSetCachedStateField, - setApiConfigurationField: mockSetApiConfigurationField, - areSettingsCommitted: true, - } - - beforeEach(() => { - vi.clearAllMocks() - // Mock window.addEventListener for message handling - Object.defineProperty(window, "addEventListener", { - value: vi.fn(), - writable: true, - }) - Object.defineProperty(window, "removeEventListener", { - value: vi.fn(), - writable: true, - }) - }) - - describe("Provider Selection", () => { - it("should render OpenAI Compatible provider option", () => { - render() - - expect(screen.getByTestId("select-item-openai-compatible")).toBeInTheDocument() - expect(screen.getByText("OpenAI Compatible")).toBeInTheDocument() - }) - - it("should show OpenAI Compatible configuration fields when provider is selected", () => { - const propsWithOpenAICompatible = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - render() - - expect(screen.getByText("Base URL")).toBeInTheDocument() - expect(screen.getByText("API Key")).toBeInTheDocument() - expect(screen.getAllByTestId("vscode-textfield")).toHaveLength(6) // Base URL, API Key, Embedding Dimension, Model ID, Qdrant URL, Qdrant Key - }) - - it("should hide OpenAI Compatible fields when different provider is selected", () => { - render() - - expect(screen.queryByText("Base URL")).not.toBeInTheDocument() - expect(screen.getByText("OpenAI API Key")).toBeInTheDocument() - }) - - /** - * Test provider switching functionality - */ - // Provider selection functionality is tested through integration tests - // Removed complex provider switching test that was difficult to mock properly - }) - - describe("OpenAI Compatible Configuration", () => { - const openAICompatibleProps = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - it("should render base URL input field", () => { - render() - - const textFields = screen.getAllByTestId("vscode-textfield") - const baseUrlField = textFields.find( - (field) => - field.getAttribute("value") === - openAICompatibleProps.apiConfiguration.codebaseIndexOpenAiCompatibleBaseUrl, - ) - expect(baseUrlField).toBeInTheDocument() - }) - - it("should render API key input field with password type", () => { - render() - - const passwordFields = screen - .getAllByTestId("vscode-textfield") - .filter((field) => field.getAttribute("type") === "password") - expect(passwordFields.length).toBeGreaterThan(0) - }) - - it("should call setApiConfigurationField when base URL changes", async () => { - render() - - // Find the Base URL field - it should be the first text field (not password) in the OpenAI Compatible section - const textFields = screen.getAllByTestId("vscode-textfield") - // Filter for text type fields (not password) and find the one with empty value (base URL field) - const textTypeFields = textFields.filter((field) => field.getAttribute("type") === "text") - const baseUrlField = textTypeFields[0] // First text field should be base URL - - expect(baseUrlField).toBeDefined() - - // Use fireEvent to trigger the change - fireEvent.change(baseUrlField!, { target: { value: "test" } }) - - // Check that setApiConfigurationField was called with the right parameter name (accepts any value) - expect(mockSetApiConfigurationField).toHaveBeenCalledWith("codebaseIndexOpenAiCompatibleBaseUrl", "test") - }) - - it("should call setApiConfigurationField when API key changes", async () => { - render() - - // Find the API Key field by looking for the text and then finding the password input - screen.getByText("API Key") - const passwordFields = screen - .getAllByTestId("vscode-textfield") - .filter((field) => field.getAttribute("type") === "password") - const apiKeyField = passwordFields[0] // First password field in the OpenAI Compatible section - expect(apiKeyField).toBeDefined() - - // Use fireEvent to trigger the change - fireEvent.change(apiKeyField!, { target: { value: "test" } }) - - // Check that setApiConfigurationField was called with the right parameter name (accepts any value) - expect(mockSetApiConfigurationField).toHaveBeenCalledWith("codebaseIndexOpenAiCompatibleApiKey", "test") - }) - - it("should display current base URL value", () => { - const propsWithValues = { - ...openAICompatibleProps, - apiConfiguration: { - ...openAICompatibleProps.apiConfiguration, - codebaseIndexOpenAiCompatibleBaseUrl: "https://existing-api.example.com/v1", - }, - } - - render() - - const textField = screen.getByDisplayValue("https://existing-api.example.com/v1") - expect(textField).toBeInTheDocument() - }) - - it("should display current API key value", () => { - const propsWithValues = { - ...openAICompatibleProps, - apiConfiguration: { - ...openAICompatibleProps.apiConfiguration, - codebaseIndexOpenAiCompatibleApiKey: "existing-api-key", - }, - } - - render() - - const textField = screen.getByDisplayValue("existing-api-key") - expect(textField).toBeInTheDocument() - }) - - it("should display embedding dimension input field for OpenAI Compatible provider", () => { - const propsWithOpenAICompatible = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - render() - - // Look for the embedding dimension label - expect(screen.getByText("Embedding Dimension")).toBeInTheDocument() - }) - - it("should hide embedding dimension input field for non-OpenAI Compatible providers", () => { - render() - - // Should not show embedding dimension for OpenAI provider - expect(screen.queryByText("Embedding Dimension")).not.toBeInTheDocument() - }) - - it("should call setApiConfigurationField when embedding dimension changes", async () => { - const propsWithOpenAICompatible = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - render() - - // Find the embedding dimension input field by placeholder - const dimensionField = screen.getByPlaceholderText("Enter dimension (e.g., 1536)") - expect(dimensionField).toBeDefined() - - // Use fireEvent to trigger the change - fireEvent.change(dimensionField!, { target: { value: "1024" } }) - - // Check that setApiConfigurationField was called with the right parameter name - expect(mockSetApiConfigurationField).toHaveBeenCalledWith( - "codebaseIndexOpenAiCompatibleModelDimension", - 1024, - ) - }) - - it("should display current embedding dimension value", () => { - const propsWithDimension = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - apiConfiguration: { - ...defaultProps.apiConfiguration, - codebaseIndexOpenAiCompatibleModelDimension: 2048, - }, - } - - render() - - const textField = screen.getByDisplayValue("2048") - expect(textField).toBeInTheDocument() - }) - - it("should handle empty embedding dimension value", () => { - const propsWithEmptyDimension = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - apiConfiguration: { - ...defaultProps.apiConfiguration, - codebaseIndexOpenAiCompatibleModelDimension: undefined, - }, - } - - render() - - const dimensionField = screen.getByPlaceholderText("Enter dimension (e.g., 1536)") - expect(dimensionField).toHaveValue("") - }) - - it("should validate embedding dimension input accepts only positive numbers", async () => { - const propsWithOpenAICompatible = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - render() - - const dimensionField = screen.getByPlaceholderText("Enter dimension (e.g., 1536)") - expect(dimensionField).toBeDefined() - - // Test that the field is a text input (implementation uses text with validation logic) - expect(dimensionField).toHaveAttribute("type", "text") - - // Test that invalid input (non-numeric) doesn't trigger setApiConfigurationField - fireEvent.change(dimensionField!, { target: { value: "invalid" } }) - - // The implementation only accepts valid numbers - // Verify that setApiConfigurationField was not called with invalid string values - expect(mockSetApiConfigurationField).not.toHaveBeenCalledWith( - "codebaseIndexOpenAiCompatibleModelDimension", - "invalid", - ) - - // Test that numeric values (including negative) are accepted by the current implementation - fireEvent.change(dimensionField!, { target: { value: "-5" } }) - expect(mockSetApiConfigurationField).toHaveBeenCalledWith("codebaseIndexOpenAiCompatibleModelDimension", -5) - }) - }) - - describe("Model Selection", () => { - /** - * Test conditional rendering of Model ID input based on provider type - */ - describe("Conditional Model Input Rendering", () => { - it("should render VSCodeTextField for Model ID when provider is openai-compatible", () => { - const propsWithOpenAICompatible = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - codebaseIndexEmbedderModelId: "custom-model-id", - }, - } - - render() - - // Should render VSCodeTextField for Model ID - const modelTextFields = screen.getAllByTestId("vscode-textfield") - const modelIdField = modelTextFields.find( - (field) => field.getAttribute("placeholder") === "Enter custom model ID", - ) - expect(modelIdField).toBeInTheDocument() - expect(modelIdField).toHaveValue("custom-model-id") - - // Should NOT render Select dropdown for models (only provider select should exist) - const selectElements = screen.getAllByTestId("select") - expect(selectElements).toHaveLength(1) // Only provider select, no model select - }) - - it("should render Select dropdown for models when provider is openai", () => { - const propsWithOpenAI = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai" as const, - codebaseIndexEmbedderModelId: "text-embedding-3-small", - }, - } - - render() - - // Should render Select dropdown for models (second select element) - const selectElements = screen.getAllByTestId("select") - expect(selectElements).toHaveLength(2) // Provider and model selects - const modelSelect = selectElements[1] // Model select is second - expect(modelSelect).toHaveAttribute("data-value", "text-embedding-3-small") - - // Should NOT render VSCodeTextField for Model ID (only other text fields) - const modelTextFields = screen.getAllByTestId("vscode-textfield") - const modelIdField = modelTextFields.find( - (field) => field.getAttribute("placeholder") === "Enter custom model ID", - ) - expect(modelIdField).toBeUndefined() - }) - - it("should render Select dropdown for models when provider is ollama", () => { - const propsWithOllama = { - ...defaultProps, - codebaseIndexModels: { - ...defaultProps.codebaseIndexModels, - ollama: { - llama2: { dimension: 4096 }, - codellama: { dimension: 4096 }, - }, - }, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "ollama" as const, - codebaseIndexEmbedderModelId: "llama2", - }, - } - - render() - - // Should render Select dropdown for models (second select element) - const selectElements = screen.getAllByTestId("select") - expect(selectElements).toHaveLength(2) // Provider and model selects - const modelSelect = selectElements[1] // Model select is second - expect(modelSelect).toHaveAttribute("data-value", "llama2") - - // Should NOT render VSCodeTextField for Model ID - const modelTextFields = screen.getAllByTestId("vscode-textfield") - const modelIdField = modelTextFields.find( - (field) => field.getAttribute("placeholder") === "Enter custom model ID", - ) - expect(modelIdField).toBeUndefined() - }) - }) - - /** - * Test VSCodeTextField interactions for OpenAI-Compatible provider - */ - describe("VSCodeTextField for OpenAI-Compatible Model ID", () => { - const openAICompatibleProps = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - codebaseIndexEmbedderModelId: "existing-model", - }, - } - - it("should display current Model ID value in VSCodeTextField", () => { - render() - - const modelIdField = screen.getByPlaceholderText("Enter custom model ID") - expect(modelIdField).toHaveValue("existing-model") - }) - - it("should call setCachedStateField when Model ID changes", async () => { - render() - - const modelIdField = screen.getByPlaceholderText("Enter custom model ID") - - // Use fireEvent to trigger the change - fireEvent.change(modelIdField, { target: { value: "new-model" } }) - - // Check that setCachedStateField was called with codebaseIndexConfig - expect(mockSetCachedStateField).toHaveBeenCalledWith( - "codebaseIndexConfig", - expect.objectContaining({ - codebaseIndexEmbedderProvider: "openai-compatible", - codebaseIndexEnabled: true, - codebaseIndexQdrantUrl: "http://localhost:6333", - codebaseIndexEmbedderModelId: "new-model", - }), - ) - }) - - it("should handle empty Model ID value", () => { - const propsWithEmptyModelId = { - ...openAICompatibleProps, - codebaseIndexConfig: { - ...openAICompatibleProps.codebaseIndexConfig, - codebaseIndexEmbedderModelId: "", - }, - } - - render() - - const modelIdField = screen.getByPlaceholderText("Enter custom model ID") - expect(modelIdField).toHaveValue("") - }) - - it("should show placeholder text for Model ID input", () => { - render() - - const modelIdField = screen.getByPlaceholderText("Enter custom model ID") - expect(modelIdField).toBeInTheDocument() - expect(modelIdField).toHaveAttribute("placeholder", "Enter custom model ID") - }) - }) - - /** - * Test Select dropdown interactions for other providers - */ - describe("Select Dropdown for Other Providers", () => { - it("should show available models for OpenAI provider in dropdown", () => { - const propsWithOpenAI = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai" as const, - }, - } - - render() - - expect(screen.getByTestId("select-item-text-embedding-3-small")).toBeInTheDocument() - expect(screen.getByTestId("select-item-text-embedding-3-large")).toBeInTheDocument() - }) - - it("should show available models for Ollama provider in dropdown", () => { - const propsWithOllama = { - ...defaultProps, - codebaseIndexModels: { - ...defaultProps.codebaseIndexModels, - ollama: { - llama2: { dimension: 4096 }, - codellama: { dimension: 4096 }, - }, - }, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "ollama" as const, - }, - } - - render() - - expect(screen.getByTestId("select-item-llama2")).toBeInTheDocument() - expect(screen.getByTestId("select-item-codellama")).toBeInTheDocument() - }) - - it("should call setCachedStateField when model is selected from dropdown", async () => { - const user = userEvent.setup() - const propsWithOpenAI = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai" as const, - }, - } - - render() - - // Get all select elements and find the model select (second one) - const selectElements = screen.getAllByTestId("select") - const modelSelect = selectElements[1] // Provider is first, Model is second - const selectButton = modelSelect.querySelector("button") - expect(selectButton).toBeInTheDocument() - await user.click(selectButton!) - - expect(mockSetCachedStateField).toHaveBeenCalledWith("codebaseIndexConfig", { - ...propsWithOpenAI.codebaseIndexConfig, - codebaseIndexEmbedderModelId: "test-change", - }) - }) - - it("should display current model selection in dropdown", () => { - const propsWithSelectedModel = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai" as const, - codebaseIndexEmbedderModelId: "text-embedding-3-large", - }, - } - - render() - - // Get all select elements and find the model select (second one) - const selectElements = screen.getAllByTestId("select") - const modelSelect = selectElements[1] // Provider is first, Model is second - expect(modelSelect).toHaveAttribute("data-value", "text-embedding-3-large") - }) - }) - - /** - * Test fallback behavior for OpenAI-Compatible provider - */ - describe("OpenAI-Compatible Provider Model Fallback", () => { - it("should show available models for OpenAI Compatible provider", () => { - const propsWithOpenAICompatible = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - render() - - // Note: For openai-compatible, we render VSCodeTextField, not Select dropdown - // But the component still uses availableModelIds for other purposes - const modelIdField = screen.getByPlaceholderText("Enter custom model ID") - expect(modelIdField).toBeInTheDocument() - }) - - it("should fall back to OpenAI models when OpenAI Compatible models are not available", () => { - const propsWithoutCompatibleModels = { - ...defaultProps, - codebaseIndexModels: { - openai: { - "text-embedding-3-small": { dimension: 1536 }, - "text-embedding-3-large": { dimension: 3072 }, - }, - }, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - } - - render() - - // Should still render VSCodeTextField for openai-compatible provider - const modelIdField = screen.getByPlaceholderText("Enter custom model ID") - expect(modelIdField).toBeInTheDocument() - }) - }) - }) - - describe("Form Validation", () => { - it("should handle empty configuration gracefully", () => { - const emptyProps = { - ...defaultProps, - codebaseIndexConfig: undefined, - apiConfiguration: {}, - } - - expect(() => render()).not.toThrow() - }) - - it("should handle missing model configuration", () => { - const propsWithoutModels = { - ...defaultProps, - codebaseIndexModels: undefined, - } - - expect(() => render()).not.toThrow() - }) - - it("should handle empty API configuration fields", () => { - const propsWithEmptyConfig = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "openai-compatible" as const, - }, - apiConfiguration: { - codebaseIndexOpenAiCompatibleBaseUrl: "", - codebaseIndexOpenAiCompatibleApiKey: "", - }, - } - - render() - - const textFields = screen.getAllByTestId("vscode-textfield") - expect(textFields[0]).toHaveValue("") - expect(textFields[1]).toHaveValue("") - }) - }) - - describe("Integration", () => { - it("should request indexing status on mount", () => { - render() - - expect(vscode.postMessage).toHaveBeenCalledWith({ - type: "requestIndexingStatus", - }) - }) - - it("should set up message listener for status updates", () => { - render() - - expect(window.addEventListener).toHaveBeenCalledWith("message", expect.any(Function)) - }) - - it("should clean up message listener on unmount", () => { - const { unmount } = render() - - unmount() - - expect(window.removeEventListener).toHaveBeenCalledWith("message", expect.any(Function)) - }) - - /** - * Test indexing status updates - */ - it("should update indexing status when receiving status update message", () => { - render() - - // Get the message handler that was registered - const messageHandler = (window.addEventListener as any).mock.calls.find( - (call: any) => call[0] === "message", - )?.[1] - - expect(messageHandler).toBeDefined() - - // Simulate receiving a status update message - const mockEvent = { - data: { - type: "indexingStatusUpdate", - values: { - systemStatus: "Indexing", - message: "Processing files...", - processedItems: 50, - totalItems: 100, - currentItemUnit: "files", - }, - }, - } - - messageHandler(mockEvent) - - // Check that the status indicator shows "Indexing" - expect(screen.getByText(/Indexing/)).toBeInTheDocument() - }) - }) - - describe("Error Handling", () => { - it("should handle invalid provider gracefully", () => { - const propsWithInvalidProvider = { - ...defaultProps, - codebaseIndexConfig: { - ...defaultProps.codebaseIndexConfig, - codebaseIndexEmbedderProvider: "invalid-provider" as any, - }, - } - - expect(() => render()).not.toThrow() - }) - - it("should handle missing translation keys gracefully", () => { - // Mock translation function to return undefined for some keys - vi.doMock("@src/i18n/TranslationContext", () => ({ - useAppTranslation: () => ({ - t: (key: string) => (key.includes("missing") ? undefined : key), - }), - })) - - expect(() => render()).not.toThrow() - }) - }) -}) diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index c87ccdb6e921..df7cee562794 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -40,6 +40,10 @@ export interface ExtensionStateContextType extends ExtensionState { mdmCompliant?: boolean hasOpenedModeSelector: boolean // New property to track if user has opened mode selector setHasOpenedModeSelector: (value: boolean) => void // Setter for the new property + alwaysAllowFollowupQuestions: boolean // New property for follow-up questions auto-approve + setAlwaysAllowFollowupQuestions: (value: boolean) => void // Setter for the new property + followupAutoApproveTimeoutMs: number | undefined // Timeout in ms for auto-approving follow-up questions + setFollowupAutoApproveTimeoutMs: (value: number) => void // Setter for the timeout condensingApiConfigId?: string setCondensingApiConfigId: (value: string) => void customCondensingPrompt?: string @@ -125,6 +129,8 @@ export interface ExtensionStateContextType extends ExtensionState { autoCondenseContextPercent: number setAutoCondenseContextPercent: (value: number) => void routerModels?: RouterModels + alwaysAllowUpdateTodoList?: boolean + setAlwaysAllowUpdateTodoList: (value: boolean) => void } export const ExtensionStateContext = createContext(undefined) @@ -213,8 +219,11 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode codebaseIndexEmbedderProvider: "openai", codebaseIndexEmbedderBaseUrl: "", codebaseIndexEmbedderModelId: "", + codebaseIndexSearchMaxResults: undefined, + codebaseIndexSearchMinScore: undefined, }, codebaseIndexModels: { ollama: {}, openai: {} }, + alwaysAllowUpdateTodoList: true, }) const [didHydrateState, setDidHydrateState] = useState(false) @@ -226,6 +235,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode const [currentCheckpoint, setCurrentCheckpoint] = useState() const [extensionRouterModels, setExtensionRouterModels] = useState(undefined) const [marketplaceItems, setMarketplaceItems] = useState([]) + const [alwaysAllowFollowupQuestions, setAlwaysAllowFollowupQuestions] = useState(false) // Add state for follow-up questions auto-approve + const [followupAutoApproveTimeoutMs, setFollowupAutoApproveTimeoutMs] = useState(undefined) // Will be set from global settings const [marketplaceInstalledMetadata, setMarketplaceInstalledMetadata] = useState({ project: {}, global: {}, @@ -255,6 +266,14 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode setState((prevState) => mergeExtensionState(prevState, newState)) setShowWelcome(!checkExistKey(newState.apiConfiguration)) setDidHydrateState(true) + // Update alwaysAllowFollowupQuestions if present in state message + if ((newState as any).alwaysAllowFollowupQuestions !== undefined) { + setAlwaysAllowFollowupQuestions((newState as any).alwaysAllowFollowupQuestions) + } + // Update followupAutoApproveTimeoutMs if present in state message + if ((newState as any).followupAutoApproveTimeoutMs !== undefined) { + setFollowupAutoApproveTimeoutMs((newState as any).followupAutoApproveTimeoutMs) + } // Handle marketplace data if present in state message if (newState.marketplaceItems !== undefined) { setMarketplaceItems(newState.marketplaceItems) @@ -352,6 +371,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode marketplaceItems, marketplaceInstalledMetadata, profileThresholds: state.profileThresholds ?? {}, + alwaysAllowFollowupQuestions, + followupAutoApproveTimeoutMs, setExperimentEnabled: (id, enabled) => setState((prevState) => ({ ...prevState, experiments: { ...prevState.experiments, [id]: enabled } })), setApiConfiguration, @@ -367,6 +388,9 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode setAlwaysAllowMcp: (value) => setState((prevState) => ({ ...prevState, alwaysAllowMcp: value })), setAlwaysAllowModeSwitch: (value) => setState((prevState) => ({ ...prevState, alwaysAllowModeSwitch: value })), setAlwaysAllowSubtasks: (value) => setState((prevState) => ({ ...prevState, alwaysAllowSubtasks: value })), + setAlwaysAllowFollowupQuestions, + setFollowupAutoApproveTimeoutMs: (value) => + setState((prevState) => ({ ...prevState, followupAutoApproveTimeoutMs: value })), setShowAnnouncement: (value) => setState((prevState) => ({ ...prevState, shouldShowAnnouncement: value })), setAllowedCommands: (value) => setState((prevState) => ({ ...prevState, allowedCommands: value })), setAllowedMaxRequests: (value) => setState((prevState) => ({ ...prevState, allowedMaxRequests: value })), @@ -438,6 +462,10 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode setCustomCondensingPrompt: (value) => setState((prevState) => ({ ...prevState, customCondensingPrompt: value })), setProfileThresholds: (value) => setState((prevState) => ({ ...prevState, profileThresholds: value })), + alwaysAllowUpdateTodoList: state.alwaysAllowUpdateTodoList, + setAlwaysAllowUpdateTodoList: (value) => { + setState((prevState) => ({ ...prevState, alwaysAllowUpdateTodoList: value })) + }, } return {children} diff --git a/webview-ui/src/i18n/locales/ca/chat.json b/webview-ui/src/i18n/locales/ca/chat.json index 07f1e25b5365..1dd892a39faf 100644 --- a/webview-ui/src/i18n/locales/ca/chat.json +++ b/webview-ui/src/i18n/locales/ca/chat.json @@ -234,17 +234,19 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "Copiar a l'entrada (o Shift + clic)" + "copyToInput": "Copiar a l'entrada (o Shift + clic)", + "autoSelectCountdown": "Selecció automàtica en {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { - "title": "🎉 Roo Code {{version}} publicat", - "description": "Roo Code {{version}} porta noves funcionalitats potents i millores basades en els teus comentaris.", + "title": "🎉 Roo Code {{version}} Llançat", + "description": "Roo Code {{version}} porta noves funcions potents i millores significatives per millorar el vostre flux de treball de desenvolupament.", "whatsNew": "Novetats", - "feature1": "Llançament del Marketplace Roo - El marketplace ja està en funcionament! Descobreix i instal·la modes i MCP més fàcilment que mai.", - "feature2": "Models Gemini 2.5 - S'ha afegit suport per als nous models Gemini 2.5 Pro, Flash i Flash Lite.", - "feature3": "Suport per a fitxers Excel i més - S'ha afegit suport per a fitxers Excel (.xlsx) i nombroses correccions d'errors i millores!", - "hideButton": "Amagar anunci", - "detailsDiscussLinks": "Obtingues més detalls i participa a Discord i Reddit 🚀" + "feature1": "Compartició de Tasques amb 1 Clic: Comparteix instantàniament les vostres tasques amb companys i la comunitat amb un sol clic.", + "feature2": "Suport per a Directori Global .roo: Carrega regles i configuracions des d'un directori global .roo per a configuracions consistents entre projectes.", + "feature3": "Transicions Millorades d'Arquitecte a Codi: Transferències fluides de la planificació en mode Arquitecte a la implementació en mode Codi.", + "hideButton": "Amaga l'anunci", + "detailsDiscussLinks": "Obtén més detalls i uneix-te a les discussions a Discord i Reddit 🚀" }, "browser": { "rooWantsToUse": "Roo vol utilitzar el navegador:", @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo vol cercar a la base de codi {{query}}:", "wantsToSearchWithPath": "Roo vol cercar a la base de codi {{query}} a {{path}}:", - "didSearch": "S'han trobat {{count}} resultat(s) per a {{query}}:" + "didSearch": "S'han trobat {{count}} resultat(s) per a {{query}}:", + "resultTooltip": "Puntuació de similitud: {{score}} (fes clic per obrir el fitxer)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/ca/common.json b/webview-ui/src/i18n/locales/ca/common.json index 267e0a62d702..e82ee6ab2165 100644 --- a/webview-ui/src/i18n/locales/ca/common.json +++ b/webview-ui/src/i18n/locales/ca/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Generant diagrama mermaid...", "render_error": "No es pot renderitzar el diagrama", + "fixing_syntax": "Corregint la sintaxi de Mermaid...", + "fix_syntax_button": "Corregir sintaxi amb IA", + "original_code": "Codi original:", + "errors": { + "unknown_syntax": "Error de sintaxi desconegut", + "fix_timeout": "La sol·licitud de correcció de l'IA ha esgotat el temps", + "fix_failed": "La correcció de l'IA ha fallat", + "fix_attempts": "No s'ha pogut corregir la sintaxi després de {{attempts}} intents. Últim error: {{error}}", + "no_fix_provided": "L'IA no ha pogut proporcionar una correcció", + "fix_request_failed": "La sol·licitud de correcció ha fallat" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Ampliar", diff --git a/webview-ui/src/i18n/locales/ca/prompts.json b/webview-ui/src/i18n/locales/ca/prompts.json index 04359f996a5e..394e8e5c8cf6 100644 --- a/webview-ui/src/i18n/locales/ca/prompts.json +++ b/webview-ui/src/i18n/locales/ca/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modes", "createNewMode": "Crear nou mode", + "importMode": "Importar mode", + "noMatchFound": "No s'han trobat modes", "editModesConfig": "Editar configuració de modes", "editGlobalModes": "Editar modes globals", "editProjectModes": "Editar modes de projecte (.roomodes)", @@ -50,6 +52,28 @@ "description": "Afegiu directrius de comportament específiques per al mode {{modeName}}.", "loadFromFile": "Les instruccions personalitzades específiques per al mode {{mode}} també es poden carregar des de la carpeta .roo/rules-{{slug}}/ al vostre espai de treball (.roorules-{{slug}} i .clinerules-{{slug}} estan obsolets i deixaran de funcionar aviat)." }, + "exportMode": { + "title": "Exportar mode", + "description": "Exporta aquest mode a un fitxer YAML amb totes les regles incloses per compartir fàcilment amb altres.", + "export": "Exportar mode", + "exporting": "Exportant..." + }, + "importMode": { + "selectLevel": "Tria on importar aquest mode:", + "import": "Importar", + "importing": "Important...", + "global": { + "label": "Nivell global", + "description": "Disponible a tots els projectes. Les regles es fusionaran amb les instruccions personalitzades." + }, + "project": { + "label": "Nivell de projecte", + "description": "Només disponible en aquest espai de treball. Si el mode exportat contenia fitxers de regles, es tornaran a crear a la carpeta .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Avançat" + }, "globalCustomInstructions": { "title": "Instruccions personalitzades per a tots els modes", "description": "Aquestes instruccions s'apliquen a tots els modes. Proporcionen un conjunt bàsic de comportaments que es poden millorar amb instruccions específiques de cada mode a continuació. <0>Més informació", @@ -164,5 +188,13 @@ }, "deleteMode": "Eliminar mode" }, - "allFiles": "tots els fitxers" + "allFiles": "tots els fitxers", + "deleteMode": { + "title": "Suprimeix el mode", + "message": "Estàs segur que vols suprimir el mode \"{{modeName}}\"?", + "rulesFolder": "Aquest mode té una carpeta de regles a {{folderPath}} que també se suprimirà.", + "descriptionNoRules": "Esteu segur que voleu suprimir aquest mode personalitzat?", + "confirm": "Suprimeix", + "cancel": "Cancel·la" + } } diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index ea4f9741494b..2e2197f1231e 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Seleccionar proveïdor", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Clau API:", + "geminiApiKeyPlaceholder": "Introduïu la vostra clau d'API de Gemini", "openaiCompatibleProvider": "Compatible amb OpenAI", - "openaiCompatibleBaseUrlLabel": "URL base:", - "openaiCompatibleApiKeyLabel": "Clau API:", - "openaiCompatibleModelDimensionLabel": "Dimensió d'Embedding:", - "openaiCompatibleModelDimensionPlaceholder": "p. ex., 1536", - "openaiCompatibleModelDimensionDescription": "La dimensió d'embedding (mida de sortida) per al teu model. Consulta la documentació del teu proveïdor per a aquest valor. Valors comuns: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Clau OpenAI:", + "openAiKeyLabel": "Clau API OpenAI", + "openAiKeyPlaceholder": "Introduïu la vostra clau API OpenAI", + "openAiCompatibleBaseUrlLabel": "URL base", + "openAiCompatibleApiKeyLabel": "Clau API", + "openAiCompatibleApiKeyPlaceholder": "Introduïu la vostra clau API", + "openAiCompatibleModelDimensionLabel": "Dimensió d'Embedding:", + "modelDimensionLabel": "Dimensió del model", + "openAiCompatibleModelDimensionPlaceholder": "p. ex., 1536", + "openAiCompatibleModelDimensionDescription": "La dimensió d'embedding (mida de sortida) per al teu model. Consulta la documentació del teu proveïdor per a aquest valor. Valors comuns: 384, 768, 1536, 3072.", "modelLabel": "Model", "selectModelPlaceholder": "Seleccionar model", "ollamaUrlLabel": "URL d'Ollama:", "qdrantUrlLabel": "URL de Qdrant", "qdrantKeyLabel": "Clau de Qdrant:", - "startIndexingButton": "Iniciar indexació", - "clearIndexDataButton": "Esborrar dades d'índex", + "startIndexingButton": "Iniciar", + "clearIndexDataButton": "Esborrar índex", "unsavedSettingsMessage": "Si us plau, deseu la configuració abans d'iniciar el procés d'indexació.", "clearDataDialog": { "title": "Esteu segur?", "description": "Aquesta acció no es pot desfer. Eliminarà permanentment les dades d'índex de la vostra base de codi.", "cancelButton": "Cancel·lar", "confirmButton": "Esborrar dades" - } + }, + "description": "Configureu la configuració d'indexació de la base de codi per habilitar la cerca semàntica del vostre projecte. <0>Més informació", + "statusTitle": "Estat", + "settingsTitle": "Configuració d'indexació", + "disabledMessage": "La indexació de la base de codi està actualment deshabilitada. Habiliteu-la a la configuració global per configurar les opcions d'indexació.", + "embedderProviderLabel": "Proveïdor d'embeddings", + "modelPlaceholder": "Introduïu el nom del model", + "selectModel": "Seleccioneu un model", + "ollamaBaseUrlLabel": "URL base d'Ollama", + "qdrantApiKeyLabel": "Clau API de Qdrant", + "qdrantApiKeyPlaceholder": "Introduïu la vostra clau API de Qdrant (opcional)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Error en desar la configuració", + "modelDimensions": "({{dimension}} dimensions)", + "saveSuccess": "Configuració desada correctament", + "saving": "Desant...", + "saveSettings": "Desar", + "indexingStatuses": { + "standby": "En espera", + "indexing": "Indexant", + "indexed": "Indexat", + "error": "Error" + }, + "close": "Tancar", + "advancedConfigLabel": "Configuració avançada", + "searchMinScoreLabel": "Llindar de puntuació de cerca", + "searchMinScoreDescription": "Puntuació mínima de similitud (0.0-1.0) requerida per als resultats de la cerca. Valors més baixos retornen més resultats però poden ser menys rellevants. Valors més alts retornen menys resultats però més rellevants.", + "searchMinScoreResetTooltip": "Restablir al valor per defecte (0.4)", + "searchMaxResultsLabel": "Màxim de resultats de cerca", + "searchMaxResultsDescription": "Nombre màxim de resultats de cerca a retornar quan es consulta l'índex de la base de codi. Els valors més alts proporcionen més context però poden incloure resultats menys rellevants.", + "resetToDefault": "Restablir al valor per defecte" }, "autoApprove": { "description": "Permet que Roo realitzi operacions automàticament sense requerir aprovació. Activeu aquesta configuració només si confieu plenament en la IA i enteneu els riscos de seguretat associats.", @@ -110,6 +149,11 @@ "label": "Subtasques", "description": "Permetre la creació i finalització de subtasques sense requerir aprovació" }, + "followupQuestions": { + "label": "Pregunta", + "description": "Seleccionar automàticament la primera resposta suggerida per a preguntes de seguiment després del temps d'espera configurat", + "timeoutLabel": "Temps d'espera abans de seleccionar automàticament la primera resposta" + }, "execute": { "label": "Executar", "description": "Executar automàticament comandes de terminal permeses sense requerir aprovació", @@ -118,6 +162,10 @@ "commandPlaceholder": "Introduïu prefix de comanda (ex. 'git ')", "addButton": "Afegir" }, + "updateTodoList": { + "label": "Todo", + "description": "La llista de tasques es actualitza automàticament sense aprovació" + }, "apiRequestLimit": { "title": "Màximes Sol·licituds", "description": "Fes aquesta quantitat de sol·licituds API automàticament abans de demanar aprovació per continuar amb la tasca.", diff --git a/webview-ui/src/i18n/locales/de/chat.json b/webview-ui/src/i18n/locales/de/chat.json index b5c2336e4661..c62fe9d3bbd1 100644 --- a/webview-ui/src/i18n/locales/de/chat.json +++ b/webview-ui/src/i18n/locales/de/chat.json @@ -234,15 +234,17 @@ "tokens": "Tokens" }, "followUpSuggest": { - "copyToInput": "In Eingabefeld kopieren (oder Shift + Klick)" + "copyToInput": "In Eingabefeld kopieren (oder Shift + Klick)", + "autoSelectCountdown": "Automatische Auswahl in {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Roo Code {{version}} veröffentlicht", - "description": "Roo Code {{version}} bringt wichtige neue Funktionen und Verbesserungen basierend auf deinem Feedback.", + "description": "Roo Code {{version}} bringt mächtige neue Funktionen und bedeutende Verbesserungen, um deinen Entwicklungsworkflow zu verbessern.", "whatsNew": "Was ist neu", - "feature1": "Roo Marketplace Launch: Der Marketplace ist jetzt live! Entdecke und installiere Modi und MCPs einfacher denn je.", - "feature2": "Gemini 2.5 Modelle: Unterstützung für neue Gemini 2.5 Pro, Flash und Flash Lite Modelle hinzugefügt.", - "feature3": "Excel-Datei-Unterstützung & mehr: Excel (.xlsx) Datei-Unterstützung hinzugefügt sowie zahlreiche Fehlerbehebungen und Verbesserungen!", + "feature1": "1-Klick-Aufgaben-Teilen: Teile deine Aufgaben sofort mit Kollegen und der Community mit einem einzigen Klick.", + "feature2": "Globale .roo-Verzeichnis-Unterstützung: Lade Regeln und Konfigurationen aus einem globalen .roo-Verzeichnis für konsistente Einstellungen über Projekte hinweg.", + "feature3": "Verbesserte Architect-zu-Code-Übergänge: Nahtlose Übergaben von der Planung im Architect-Modus zur Implementierung im Code-Modus.", "hideButton": "Ankündigung ausblenden", "detailsDiscussLinks": "Erhalte mehr Details und diskutiere auf Discord und Reddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo möchte den Codebase nach {{query}} durchsuchen:", "wantsToSearchWithPath": "Roo möchte den Codebase nach {{query}} in {{path}} durchsuchen:", - "didSearch": "{{count}} Ergebnis(se) für {{query}} gefunden:" + "didSearch": "{{count}} Ergebnis(se) für {{query}} gefunden:", + "resultTooltip": "Ähnlichkeitswert: {{score}} (klicken zum Öffnen der Datei)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/de/common.json b/webview-ui/src/i18n/locales/de/common.json index 76b9064bc357..c10630557079 100644 --- a/webview-ui/src/i18n/locales/de/common.json +++ b/webview-ui/src/i18n/locales/de/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Mermaid-Diagramm wird generiert...", "render_error": "Diagramm kann nicht gerendert werden", + "fixing_syntax": "Mermaid-Syntax wird korrigiert...", + "fix_syntax_button": "Syntax mit KI korrigieren", + "original_code": "Ursprünglicher Code:", + "errors": { + "unknown_syntax": "Unbekannter Syntaxfehler", + "fix_timeout": "KI-Korrekturanfrage hat das Zeitlimit überschritten", + "fix_failed": "KI-Korrektur fehlgeschlagen", + "fix_attempts": "Korrektur nach {{attempts}} Versuchen fehlgeschlagen. Letzter Fehler: {{error}}", + "no_fix_provided": "KI konnte keine Korrektur bereitstellen", + "fix_request_failed": "Korrekturanfrage fehlgeschlagen" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Vergrößern", diff --git a/webview-ui/src/i18n/locales/de/prompts.json b/webview-ui/src/i18n/locales/de/prompts.json index 6e9fd0f47b1d..ea94f9228ddd 100644 --- a/webview-ui/src/i18n/locales/de/prompts.json +++ b/webview-ui/src/i18n/locales/de/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modi", "createNewMode": "Neuen Modus erstellen", + "importMode": "Modus importieren", + "noMatchFound": "Keine Modi gefunden", "editModesConfig": "Moduskonfiguration bearbeiten", "editGlobalModes": "Globale Modi bearbeiten", "editProjectModes": "Projektmodi bearbeiten (.roomodes)", @@ -50,6 +52,28 @@ "description": "Fügen Sie verhaltensspezifische Richtlinien für den Modus {{modeName}} hinzu.", "loadFromFile": "Benutzerdefinierte Anweisungen für den Modus {{mode}} können auch aus dem Ordner .roo/rules-{{slug}}/ in deinem Arbeitsbereich geladen werden (.roorules-{{slug}} und .clinerules-{{slug}} sind veraltet und werden bald nicht mehr funktionieren)." }, + "exportMode": { + "title": "Modus exportieren", + "description": "Exportiert diesen Modus in eine YAML-Datei mit allen enthaltenen Regeln zum einfachen Teilen mit anderen.", + "export": "Modus exportieren", + "exporting": "Exportieren..." + }, + "importMode": { + "selectLevel": "Wähle, wo dieser Modus importiert werden soll:", + "import": "Importieren", + "importing": "Importiere...", + "global": { + "label": "Globale Ebene", + "description": "Verfügbar in allen Projekten. Wenn der exportierte Modus Regeldateien enthielt, werden diese im globalen Ordner .roo/rules-{slug}/ neu erstellt." + }, + "project": { + "label": "Projektebene", + "description": "Nur in diesem Arbeitsbereich verfügbar. Wenn der exportierte Modus Regeldateien enthielt, werden diese im Ordner .roo/rules-{slug}/ neu erstellt." + } + }, + "advanced": { + "title": "Erweitert" + }, "globalCustomInstructions": { "title": "Benutzerdefinierte Anweisungen für alle Modi", "description": "Diese Anweisungen gelten für alle Modi. Sie bieten einen grundlegenden Satz von Verhaltensweisen, die durch modusspezifische Anweisungen unten erweitert werden können. <0>Mehr erfahren", @@ -164,5 +188,13 @@ }, "deleteMode": "Modus löschen" }, - "allFiles": "alle Dateien" + "allFiles": "alle Dateien", + "deleteMode": { + "title": "Modus löschen", + "message": "Möchten Sie den Modus \"{{modeName}}\" wirklich löschen?", + "rulesFolder": "Dieser Modus hat einen Regelordner unter {{folderPath}}, der ebenfalls gelöscht wird.", + "descriptionNoRules": "Möchten Sie diesen benutzerdefinierten Modus wirklich löschen?", + "confirm": "Löschen", + "cancel": "Abbrechen" + } } diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index c5e161bafe6c..14af077b234c 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -38,33 +38,72 @@ }, "codeIndex": { "title": "Codebase-Indexierung", + "description": "Konfiguriere Codebase-Indexierungseinstellungen, um semantische Suche in deinem Projekt zu aktivieren. <0>Mehr erfahren", + "statusTitle": "Status", "enableLabel": "Codebase-Indexierung aktivieren", "enableDescription": "<0>Codebase-Indexierung ist eine experimentelle Funktion, die einen semantischen Suchindex deines Projekts mit KI-Embeddings erstellt. Dies ermöglicht es Roo Code, große Codebasen besser zu verstehen und zu navigieren, indem relevanter Code basierend auf Bedeutung statt nur Schlüsselwörtern gefunden wird.", + "settingsTitle": "Indexierungseinstellungen", + "disabledMessage": "Codebase-Indexierung ist derzeit deaktiviert. Aktiviere sie in den globalen Einstellungen, um Indexierungsoptionen zu konfigurieren.", "providerLabel": "Embeddings-Anbieter", + "embedderProviderLabel": "Embedder-Anbieter", "selectProviderPlaceholder": "Anbieter auswählen", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API-Schlüssel:", + "geminiApiKeyPlaceholder": "Geben Sie Ihren Gemini-API-Schlüssel ein", "openaiCompatibleProvider": "OpenAI-kompatibel", - "openaiCompatibleBaseUrlLabel": "Basis-URL:", - "openaiCompatibleApiKeyLabel": "API-Schlüssel:", - "openaiCompatibleModelDimensionLabel": "Embedding-Dimension:", - "openaiCompatibleModelDimensionPlaceholder": "z.B. 1536", - "openaiCompatibleModelDimensionDescription": "Die Embedding-Dimension (Ausgabegröße) für Ihr Modell. Überprüfen Sie die Dokumentation Ihres Anbieters für diesen Wert. Übliche Werte: 384, 768, 1536, 3072.", - "openaiKeyLabel": "OpenAI-Schlüssel:", + "openAiKeyLabel": "OpenAI API-Schlüssel", + "openAiKeyPlaceholder": "Gib deinen OpenAI API-Schlüssel ein", + "openAiCompatibleBaseUrlLabel": "Basis-URL", + "openAiCompatibleApiKeyLabel": "API-Schlüssel", + "openAiCompatibleApiKeyPlaceholder": "Gib deinen API-Schlüssel ein", + "openAiCompatibleModelDimensionLabel": "Embedding-Dimension:", + "modelDimensionLabel": "Modell-Dimension", + "openAiCompatibleModelDimensionPlaceholder": "z.B. 1536", + "openAiCompatibleModelDimensionDescription": "Die Embedding-Dimension (Ausgabegröße) für Ihr Modell. Überprüfen Sie die Dokumentation Ihres Anbieters für diesen Wert. Übliche Werte: 384, 768, 1536, 3072.", "modelLabel": "Modell", + "modelPlaceholder": "Modellname eingeben", + "selectModel": "Modell auswählen", "selectModelPlaceholder": "Modell auswählen", "ollamaUrlLabel": "Ollama-URL:", + "ollamaBaseUrlLabel": "Ollama Basis-URL", "qdrantUrlLabel": "Qdrant-URL", "qdrantKeyLabel": "Qdrant-Schlüssel:", - "startIndexingButton": "Indexierung starten", - "clearIndexDataButton": "Indexdaten löschen", + "qdrantApiKeyLabel": "Qdrant API-Schlüssel", + "qdrantApiKeyPlaceholder": "Gib deinen Qdrant API-Schlüssel ein (optional)", + "startIndexingButton": "Start", + "clearIndexDataButton": "Index löschen", "unsavedSettingsMessage": "Bitte speichere deine Einstellungen, bevor du den Indexierungsprozess startest.", "clearDataDialog": { "title": "Sind Sie sicher?", "description": "Diese Aktion kann nicht rückgängig gemacht werden. Dies wird Ihre Codebase-Indexdaten dauerhaft löschen.", "cancelButton": "Abbrechen", "confirmButton": "Daten löschen" - } + }, + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Fehler beim Speichern der Einstellungen", + "modelDimensions": "({{dimension}} Dimensionen)", + "saveSuccess": "Einstellungen erfolgreich gespeichert", + "saving": "Speichern...", + "saveSettings": "Speichern", + "indexingStatuses": { + "standby": "Bereitschaft", + "indexing": "Indexierung", + "indexed": "Indexiert", + "error": "Fehler" + }, + "close": "Schließen", + "advancedConfigLabel": "Erweiterte Konfiguration", + "searchMinScoreLabel": "Suchergebnis-Schwellenwert", + "searchMinScoreDescription": "Mindestähnlichkeitswert (0.0-1.0), der für Suchergebnisse erforderlich ist. Niedrigere Werte liefern mehr Ergebnisse, die jedoch möglicherweise weniger relevant sind. Höhere Werte liefern weniger, aber relevantere Ergebnisse.", + "searchMinScoreResetTooltip": "Auf Standardwert zurücksetzen (0.4)", + "searchMaxResultsLabel": "Maximale Suchergebnisse", + "searchMaxResultsDescription": "Maximale Anzahl von Suchergebnissen, die bei der Abfrage des Codebase-Index zurückgegeben werden. Höhere Werte bieten mehr Kontext, können aber weniger relevante Ergebnisse enthalten.", + "resetToDefault": "Auf Standard zurücksetzen" }, "autoApprove": { "description": "Erlaubt Roo, Operationen automatisch ohne Genehmigung durchzuführen. Aktiviere diese Einstellungen nur, wenn du der KI vollständig vertraust und die damit verbundenen Sicherheitsrisiken verstehst.", @@ -110,6 +149,11 @@ "label": "Teilaufgaben", "description": "Erstellung und Abschluss von Unteraufgaben ohne Genehmigung erlauben" }, + "followupQuestions": { + "label": "Frage", + "description": "Automatisch die erste vorgeschlagene Antwort für Folgefragen nach der konfigurierten Zeitüberschreitung auswählen", + "timeoutLabel": "Wartezeit vor der automatischen Auswahl der ersten Antwort" + }, "execute": { "label": "Ausführen", "description": "Erlaubte Terminal-Befehle automatisch ohne Genehmigung ausführen", @@ -118,6 +162,10 @@ "commandPlaceholder": "Befehlspräfix eingeben (z.B. 'git ')", "addButton": "Hinzufügen" }, + "updateTodoList": { + "label": "Todo", + "description": "To-Do-Liste wird automatisch aktualisiert, ohne dass du zustimmen musst" + }, "apiRequestLimit": { "title": "Maximale Anfragen", "description": "Automatisch so viele API-Anfragen stellen, bevor du um die Erlaubnis gebeten wirst, mit der Aufgabe fortzufahren.", @@ -516,7 +564,6 @@ "name": "Marketplace aktivieren", "description": "Wenn aktiviert, kannst du MCP und benutzerdefinierte Modi aus dem Marketplace installieren und verwalten." }, - "MULTI_FILE_APPLY_DIFF": { "name": "Gleichzeitige Dateibearbeitungen aktivieren", "description": "Wenn aktiviert, kann Roo mehrere Dateien in einer einzigen Anfrage bearbeiten. Wenn deaktiviert, muss Roo Dateien einzeln bearbeiten. Das Deaktivieren kann helfen, wenn du mit weniger fähigen Modellen arbeitest oder mehr Kontrolle über Dateiänderungen haben möchtest." diff --git a/webview-ui/src/i18n/locales/en/chat.json b/webview-ui/src/i18n/locales/en/chat.json index 3e5c2bfc6062..ea4f8920f5d2 100644 --- a/webview-ui/src/i18n/locales/en/chat.json +++ b/webview-ui/src/i18n/locales/en/chat.json @@ -201,7 +201,8 @@ "codebaseSearch": { "wantsToSearch": "Roo wants to search the codebase for {{query}}:", "wantsToSearchWithPath": "Roo wants to search the codebase for {{query}} in {{path}}:", - "didSearch": "Found {{count}} result(s) for {{query}}:" + "didSearch": "Found {{count}} result(s) for {{query}}:", + "resultTooltip": "Similarity score: {{score}} (click to open file)" }, "commandOutput": "Command Output", "response": "Response", @@ -244,11 +245,11 @@ }, "announcement": { "title": "🎉 Roo Code {{version}} Released", - "description": "Roo Code {{version}} brings major new features and improvements based on your feedback.", + "description": "Roo Code {{version}} brings powerful new features and significant improvements to enhance your development workflow.", "whatsNew": "What's New", - "feature1": "Roo Marketplace Launch: The marketplace is now live! Discover and install modes and MCPs easier than ever before.", - "feature2": "Gemini 2.5 Models: Added support for new Gemini 2.5 Pro, Flash, and Flash Lite models.", - "feature3": "Excel File Support & More: Added Excel (.xlsx) file support and numerous bug fixes and improvements!", + "feature1": "1-Click Task Sharing: Share your tasks instantly with colleagues and the community with a single click.", + "feature2": "Global .roo Directory Support: Load rules and configurations from a global .roo directory for consistent settings across projects.", + "feature3": "Improved Architect to Code Transitions: Seamless handoffs from planning in Architect mode to implementation in Code mode.", "hideButton": "Hide announcement", "detailsDiscussLinks": "Get more details and discuss in Discord and Reddit 🚀" }, @@ -257,7 +258,9 @@ "seconds": "{{count}}s" }, "followUpSuggest": { - "copyToInput": "Copy to input (same as shift + click)" + "copyToInput": "Copy to input (same as shift + click)", + "autoSelectCountdown": "Auto-selecting in {{count}}s", + "countdownDisplay": "{{count}}s" }, "browser": { "rooWantsToUse": "Roo wants to use the browser:", diff --git a/webview-ui/src/i18n/locales/en/common.json b/webview-ui/src/i18n/locales/en/common.json index 1488f00f4683..e8c419cc8b1d 100644 --- a/webview-ui/src/i18n/locales/en/common.json +++ b/webview-ui/src/i18n/locales/en/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Generating mermaid diagram...", "render_error": "Unable to Render Diagram", + "fixing_syntax": "Fixing Mermaid syntax...", + "fix_syntax_button": "Fix syntax with AI", + "original_code": "Original code:", + "errors": { + "unknown_syntax": "Unknown syntax error", + "fix_timeout": "LLM fix request timed out", + "fix_failed": "LLM fix failed", + "fix_attempts": "Failed to fix syntax after {{attempts}} attempts. Last error: {{error}}", + "no_fix_provided": "LLM failed to provide a fix", + "fix_request_failed": "Fix request failed" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Zoom In", diff --git a/webview-ui/src/i18n/locales/en/prompts.json b/webview-ui/src/i18n/locales/en/prompts.json index 3614d79872aa..6b83b612684a 100644 --- a/webview-ui/src/i18n/locales/en/prompts.json +++ b/webview-ui/src/i18n/locales/en/prompts.json @@ -4,11 +4,13 @@ "modes": { "title": "Modes", "createNewMode": "Create new mode", + "importMode": "Import Mode", "editModesConfig": "Edit modes configuration", "editGlobalModes": "Edit Global Modes", "editProjectModes": "Edit Project Modes (.roomodes)", "createModeHelpText": "Modes are specialized personas that tailor Roo's behavior. <0>Learn about Using Modes or <1>Customizing Modes.", - "selectMode": "Search modes" + "selectMode": "Search modes", + "noMatchFound": "No modes found" }, "apiConfiguration": { "title": "API Configuration", @@ -50,6 +52,27 @@ "description": "Add behavioral guidelines specific to {{modeName}} mode.", "loadFromFile": "Custom instructions specific to {{mode}} mode can also be loaded from the .roo/rules-{{slug}}/ folder in your workspace (.roorules-{{slug}} and .clinerules-{{slug}} are deprecated and will stop working soon)." }, + "exportMode": { + "title": "Export Mode", + "description": "Export this mode with rules from the .roo/rules-{{slug}}/ folder combined into a shareable YAML file. The original files remain unchanged.", + "exporting": "Exporting..." + }, + "importMode": { + "selectLevel": "Choose where to import this mode:", + "import": "Import", + "importing": "Importing...", + "global": { + "label": "Global Level", + "description": "Available across all projects. If the exported mode contained rules files, they will be recreated in the global .roo/rules-{slug}/ folder." + }, + "project": { + "label": "Project Level", + "description": "Only available in this workspace. If the exported mode contained rules files, they will be recreated in .roo/rules-{slug}/ folder." + } + }, + "advanced": { + "title": "Advanced: Override System Prompt" + }, "globalCustomInstructions": { "title": "Custom Instructions for All Modes", "description": "These instructions apply to all modes. They provide a base set of behaviors that can be enhanced by mode-specific instructions below. <0>Learn more", @@ -164,5 +187,13 @@ }, "deleteMode": "Delete mode" }, - "allFiles": "all files" + "allFiles": "all files", + "deleteMode": { + "title": "Delete Mode", + "message": "Are you sure you want to delete the mode \"{{modeName}}\"?", + "rulesFolder": "This mode has a rules folder at {{folderPath}} that will also be deleted.", + "descriptionNoRules": "Are you sure you want to delete this custom mode?", + "confirm": "Delete", + "cancel": "Cancel" + } } diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 14981ea4990b..ea7191f2241c 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -38,24 +38,47 @@ }, "codeIndex": { "title": "Codebase Indexing", + "description": "Configure codebase indexing settings to enable semantic search of your project. <0>Learn more", + "statusTitle": "Status", "enableLabel": "Enable Codebase Indexing", "enableDescription": "<0>Codebase Indexing is an experimental feature that creates a semantic search index of your project using AI embeddings. This enables Roo Code to better understand and navigate large codebases by finding relevant code based on meaning rather than just keywords.", + "settingsTitle": "Indexing Settings", + "disabledMessage": "Codebase indexing is currently disabled. Enable it in the global settings to configure indexing options.", "providerLabel": "Embeddings Provider", + "embedderProviderLabel": "Embedder Provider", "selectProviderPlaceholder": "Select provider", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API Key:", + "geminiApiKeyPlaceholder": "Enter your Gemini API key", "openaiCompatibleProvider": "OpenAI Compatible", - "openaiKeyLabel": "OpenAI Key:", - "openaiCompatibleBaseUrlLabel": "Base URL:", - "openaiCompatibleApiKeyLabel": "API Key:", - "openaiCompatibleModelDimensionLabel": "Embedding Dimension:", - "openaiCompatibleModelDimensionPlaceholder": "e.g., 1536", - "openaiCompatibleModelDimensionDescription": "The embedding dimension (output size) for your model. Check your provider's documentation for this value. Common values: 384, 768, 1536, 3072.", + "openAiKeyLabel": "OpenAI API Key", + "openAiKeyPlaceholder": "Enter your OpenAI API key", + "openAiCompatibleBaseUrlLabel": "Base URL", + "openAiCompatibleApiKeyLabel": "API Key", + "openAiCompatibleApiKeyPlaceholder": "Enter your API key", + "openAiCompatibleModelDimensionLabel": "Embedding Dimension:", + "modelDimensionLabel": "Model Dimension", + "openAiCompatibleModelDimensionPlaceholder": "e.g., 1536", + "openAiCompatibleModelDimensionDescription": "The embedding dimension (output size) for your model. Check your provider's documentation for this value. Common values: 384, 768, 1536, 3072.", "modelLabel": "Model", + "modelPlaceholder": "Enter model name", + "selectModel": "Select a model", "selectModelPlaceholder": "Select model", "ollamaUrlLabel": "Ollama URL:", + "ollamaBaseUrlLabel": "Ollama Base URL", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant Key:", + "qdrantApiKeyLabel": "Qdrant API Key", + "qdrantApiKeyPlaceholder": "Enter your Qdrant API key (optional)", + "advancedConfigLabel": "Advanced Configuration", + "searchMinScoreLabel": "Search Score Threshold", + "searchMinScoreDescription": "Minimum similarity score (0.0-1.0) required for search results. Lower values return more results but may be less relevant. Higher values return fewer but more relevant results.", + "searchMinScoreResetTooltip": "Reset to default value (0.4)", + "searchMaxResultsLabel": "Maximum Search Results", + "searchMaxResultsDescription": "Maximum number of search results to return when querying the codebase index. Higher values provide more context but may include less relevant results.", + "resetToDefault": "Reset to default", "startIndexingButton": "Start Indexing", "clearIndexDataButton": "Clear Index Data", "unsavedSettingsMessage": "Please save your settings before starting the indexing process.", @@ -64,7 +87,23 @@ "description": "This action cannot be undone. This will permanently delete your codebase index data.", "cancelButton": "Cancel", "confirmButton": "Clear Data" - } + }, + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Failed to save settings", + "modelDimensions": "({{dimension}} dimensions)", + "saveSuccess": "Settings saved successfully", + "saving": "Saving...", + "saveSettings": "Save", + "indexingStatuses": { + "standby": "Standby", + "indexing": "Indexing", + "indexed": "Indexed", + "error": "Error" + }, + "close": "Close" }, "autoApprove": { "description": "Allow Roo to automatically perform operations without requiring approval. Enable these settings only if you fully trust the AI and understand the associated security risks.", @@ -110,6 +149,11 @@ "label": "Subtasks", "description": "Allow creation and completion of subtasks without requiring approval" }, + "followupQuestions": { + "label": "Question", + "description": "Automatically select the first suggested answer for follow-up questions after the configured timeout", + "timeoutLabel": "Time to wait before auto-selecting the first answer" + }, "execute": { "label": "Execute", "description": "Automatically execute allowed terminal commands without requiring approval", @@ -118,6 +162,10 @@ "commandPlaceholder": "Enter command prefix (e.g., 'git ')", "addButton": "Add" }, + "updateTodoList": { + "label": "Todo", + "description": "Automatically update the to-do list without requiring approval" + }, "apiRequestLimit": { "title": "Max Requests", "description": "Automatically make this many API requests before asking for approval to continue with the task.", diff --git a/webview-ui/src/i18n/locales/es/chat.json b/webview-ui/src/i18n/locales/es/chat.json index db90a70057f8..a4349b13dcdc 100644 --- a/webview-ui/src/i18n/locales/es/chat.json +++ b/webview-ui/src/i18n/locales/es/chat.json @@ -234,15 +234,17 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "Copiar a la entrada (o Shift + clic)" + "copyToInput": "Copiar a la entrada (o Shift + clic)", + "autoSelectCountdown": "Selección automática en {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Roo Code {{version}} publicado", - "description": "Roo Code {{version}} trae importantes nuevas funcionalidades y mejoras basadas en tus comentarios.", + "description": "Roo Code {{version}} trae poderosas nuevas funcionalidades y mejoras significativas para mejorar tu flujo de trabajo de desarrollo.", "whatsNew": "Novedades", - "feature1": "Lanzamiento del Marketplace de Roo: ¡El marketplace ya está disponible! Descubre e instala modos y MCPs más fácil que nunca.", - "feature2": "Modelos Gemini 2.5: Se agregó soporte para los nuevos modelos Gemini 2.5 Pro, Flash y Flash Lite.", - "feature3": "Soporte de archivos Excel y más: ¡Se agregó soporte para archivos Excel (.xlsx) y numerosas correcciones de errores y mejoras!", + "feature1": "Compartir tareas con 1 clic: Comparte tus tareas instantáneamente con colegas y la comunidad con un solo clic.", + "feature2": "Soporte de directorio .roo global: Carga reglas y configuraciones desde un directorio .roo global para configuraciones consistentes entre proyectos.", + "feature3": "Transiciones mejoradas de Arquitecto a Código: Transferencias fluidas desde la planificación en modo Arquitecto hasta la implementación en modo Código.", "hideButton": "Ocultar anuncio", "detailsDiscussLinks": "Obtén más detalles y participa en Discord y Reddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo quiere buscar en la base de código {{query}}:", "wantsToSearchWithPath": "Roo quiere buscar en la base de código {{query}} en {{path}}:", - "didSearch": "Se encontraron {{count}} resultado(s) para {{query}}:" + "didSearch": "Se encontraron {{count}} resultado(s) para {{query}}:", + "resultTooltip": "Puntuación de similitud: {{score}} (haz clic para abrir el archivo)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/es/common.json b/webview-ui/src/i18n/locales/es/common.json index 5fe624372f23..e2e69ca506fd 100644 --- a/webview-ui/src/i18n/locales/es/common.json +++ b/webview-ui/src/i18n/locales/es/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Generando diagrama mermaid...", "render_error": "No se puede renderizar el diagrama", + "fixing_syntax": "Corrigiendo sintaxis de Mermaid...", + "fix_syntax_button": "Corregir sintaxis con IA", + "original_code": "Código original:", + "errors": { + "unknown_syntax": "Error de sintaxis desconocido", + "fix_timeout": "La solicitud de corrección de IA agotó el tiempo de espera", + "fix_failed": "La corrección de IA falló", + "fix_attempts": "No se pudo corregir la sintaxis después de {{attempts}} intentos. Último error: {{error}}", + "no_fix_provided": "La IA no pudo proporcionar una corrección", + "fix_request_failed": "La solicitud de corrección falló" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Ampliar", diff --git a/webview-ui/src/i18n/locales/es/prompts.json b/webview-ui/src/i18n/locales/es/prompts.json index 54b5c1bd2dbc..daa03f4ffc57 100644 --- a/webview-ui/src/i18n/locales/es/prompts.json +++ b/webview-ui/src/i18n/locales/es/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modos", "createNewMode": "Crear nuevo modo", + "importMode": "Importar modo", + "noMatchFound": "No se encontraron modos", "editModesConfig": "Editar configuración de modos", "editGlobalModes": "Editar modos globales", "editProjectModes": "Editar modos del proyecto (.roomodes)", @@ -50,6 +52,28 @@ "description": "Agrega directrices de comportamiento específicas para el modo {{modeName}}.", "loadFromFile": "Las instrucciones personalizadas para el modo {{mode}} también se pueden cargar desde la carpeta .roo/rules-{{slug}}/ en tu espacio de trabajo (.roorules-{{slug}} y .clinerules-{{slug}} están obsoletos y dejarán de funcionar pronto)." }, + "exportMode": { + "title": "Exportar modo", + "description": "Exporta este modo a un archivo YAML con todas las reglas incluidas para compartir fácilmente con otros.", + "export": "Exportar modo", + "exporting": "Exportando..." + }, + "importMode": { + "selectLevel": "Elige dónde importar este modo:", + "import": "Importar", + "importing": "Importando...", + "global": { + "label": "Nivel global", + "description": "Disponible en todos los proyectos. Si el modo exportado contenía archivos de reglas, se volverán a crear en la carpeta global .roo/rules-{slug}/." + }, + "project": { + "label": "Nivel de proyecto", + "description": "Solo disponible en este espacio de trabajo. Si el modo exportado contenía archivos de reglas, se volverán a crear en la carpeta .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Avanzado" + }, "globalCustomInstructions": { "title": "Instrucciones personalizadas para todos los modos", "description": "Estas instrucciones se aplican a todos los modos. Proporcionan un conjunto base de comportamientos que pueden ser mejorados por instrucciones específicas de cada modo a continuación. <0>Más información", @@ -164,5 +188,13 @@ }, "deleteMode": "Eliminar modo" }, - "allFiles": "todos los archivos" + "allFiles": "todos los archivos", + "deleteMode": { + "title": "Eliminar modo", + "message": "¿Estás seguro de que quieres eliminar el modo \"{{modeName}}\"?", + "rulesFolder": "Este modo tiene una carpeta de reglas en {{folderPath}} que también se eliminará.", + "descriptionNoRules": "¿Estás seguro de que quieres eliminar este modo personalizado?", + "confirm": "Eliminar", + "cancel": "Cancelar" + } } diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index 3e3d20cce1ba..d6cebea20610 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -38,33 +38,72 @@ }, "codeIndex": { "title": "Indexación de código", + "description": "Configura los ajustes de indexación de código para habilitar búsqueda semántica en tu proyecto. <0>Más información", + "statusTitle": "Estado", "enableLabel": "Habilitar indexación de código", "enableDescription": "<0>La indexación de código es una función experimental que crea un índice de búsqueda semántica de tu proyecto usando embeddings de IA. Esto permite a Roo Code entender mejor y navegar grandes bases de código encontrando código relevante basado en significado en lugar de solo palabras clave.", + "settingsTitle": "Configuración de indexación", + "disabledMessage": "La indexación de código está actualmente deshabilitada. Habilítala en la configuración global para configurar las opciones de indexación.", "providerLabel": "Proveedor de embeddings", + "embedderProviderLabel": "Proveedor de embedder", "selectProviderPlaceholder": "Seleccionar proveedor", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Clave API:", + "geminiApiKeyPlaceholder": "Introduce tu clave de API de Gemini", "openaiCompatibleProvider": "Compatible con OpenAI", - "openaiCompatibleBaseUrlLabel": "URL base:", - "openaiCompatibleApiKeyLabel": "Clave API:", - "openaiCompatibleModelDimensionLabel": "Dimensión de Embedding:", - "openaiCompatibleModelDimensionPlaceholder": "ej., 1536", - "openaiCompatibleModelDimensionDescription": "La dimensión de embedding (tamaño de salida) para tu modelo. Consulta la documentación de tu proveedor para este valor. Valores comunes: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Clave de OpenAI:", + "openAiKeyLabel": "Clave API de OpenAI", + "openAiKeyPlaceholder": "Introduce tu clave API de OpenAI", + "openAiCompatibleBaseUrlLabel": "URL base", + "openAiCompatibleApiKeyLabel": "Clave API", + "openAiCompatibleApiKeyPlaceholder": "Introduce tu clave API", + "openAiCompatibleModelDimensionLabel": "Dimensión de Embedding:", + "modelDimensionLabel": "Dimensión del modelo", + "openAiCompatibleModelDimensionPlaceholder": "ej., 1536", + "openAiCompatibleModelDimensionDescription": "La dimensión de embedding (tamaño de salida) para tu modelo. Consulta la documentación de tu proveedor para este valor. Valores comunes: 384, 768, 1536, 3072.", "modelLabel": "Modelo", + "modelPlaceholder": "Introduce el nombre del modelo", + "selectModel": "Seleccionar un modelo", "selectModelPlaceholder": "Seleccionar modelo", "ollamaUrlLabel": "URL de Ollama:", + "ollamaBaseUrlLabel": "URL base de Ollama", "qdrantUrlLabel": "URL de Qdrant", "qdrantKeyLabel": "Clave de Qdrant:", - "startIndexingButton": "Iniciar indexación", - "clearIndexDataButton": "Borrar datos de índice", + "qdrantApiKeyLabel": "Clave API de Qdrant", + "qdrantApiKeyPlaceholder": "Introduce tu clave API de Qdrant (opcional)", + "startIndexingButton": "Iniciar", + "clearIndexDataButton": "Borrar índice", "unsavedSettingsMessage": "Por favor guarda tus ajustes antes de iniciar el proceso de indexación.", "clearDataDialog": { "title": "¿Estás seguro?", "description": "Esta acción no se puede deshacer. Esto eliminará permanentemente los datos de índice de tu base de código.", "cancelButton": "Cancelar", "confirmButton": "Borrar datos" - } + }, + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Error al guardar la configuración", + "modelDimensions": "({{dimension}} dimensiones)", + "saveSuccess": "Configuración guardada exitosamente", + "saving": "Guardando...", + "saveSettings": "Guardar", + "indexingStatuses": { + "standby": "En espera", + "indexing": "Indexando", + "indexed": "Indexado", + "error": "Error" + }, + "close": "Cerrar", + "advancedConfigLabel": "Configuración avanzada", + "searchMinScoreLabel": "Umbral de puntuación de búsqueda", + "searchMinScoreDescription": "Puntuación mínima de similitud (0.0-1.0) requerida para los resultados de búsqueda. Valores más bajos devuelven más resultados pero pueden ser menos relevantes. Valores más altos devuelven menos resultados pero más relevantes.", + "searchMinScoreResetTooltip": "Restablecer al valor predeterminado (0.4)", + "searchMaxResultsLabel": "Resultados máximos de búsqueda", + "searchMaxResultsDescription": "Número máximo de resultados de búsqueda a devolver al consultar el índice de código. Valores más altos proporcionan más contexto pero pueden incluir resultados menos relevantes.", + "resetToDefault": "Restablecer al valor predeterminado" }, "autoApprove": { "description": "Permitir que Roo realice operaciones automáticamente sin requerir aprobación. Habilite esta configuración solo si confía plenamente en la IA y comprende los riesgos de seguridad asociados.", @@ -110,6 +149,11 @@ "label": "Subtareas", "description": "Permitir la creación y finalización de subtareas sin requerir aprobación" }, + "followupQuestions": { + "label": "Pregunta", + "description": "Seleccionar automáticamente la primera respuesta sugerida para preguntas de seguimiento después del tiempo de espera configurado", + "timeoutLabel": "Tiempo de espera antes de seleccionar automáticamente la primera respuesta" + }, "execute": { "label": "Ejecutar", "description": "Ejecutar automáticamente comandos de terminal permitidos sin requerir aprobación", @@ -118,6 +162,10 @@ "commandPlaceholder": "Ingrese prefijo de comando (ej. 'git ')", "addButton": "Añadir" }, + "updateTodoList": { + "label": "Todo", + "description": "La lista de tareas se actualiza automáticamente sin aprobación" + }, "apiRequestLimit": { "title": "Solicitudes máximas", "description": "Realizar automáticamente esta cantidad de solicitudes a la API antes de pedir aprobación para continuar con la tarea.", diff --git a/webview-ui/src/i18n/locales/fr/chat.json b/webview-ui/src/i18n/locales/fr/chat.json index b0d20bc7b229..b5f06354bb88 100644 --- a/webview-ui/src/i18n/locales/fr/chat.json +++ b/webview-ui/src/i18n/locales/fr/chat.json @@ -234,15 +234,17 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "Copier vers l'entrée (ou Shift + clic)" + "copyToInput": "Copier vers l'entrée (ou Shift + clic)", + "autoSelectCountdown": "Sélection automatique dans {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Roo Code {{version}} est sortie", - "description": "Roo Code {{version}} apporte de nouvelles fonctionnalités majeures et des améliorations basées sur vos retours.", + "description": "Roo Code {{version}} apporte de puissantes nouvelles fonctionnalités et des améliorations significatives pour améliorer ton flux de travail de développement.", "whatsNew": "Quoi de neuf", - "feature1": "Lancement du Marketplace Roo : Le marketplace est maintenant en ligne ! Découvrez et installez des modes et des MCPs plus facilement que jamais.", - "feature2": "Modèles Gemini 2.5 : Ajout du support pour les nouveaux modèles Gemini 2.5 Pro, Flash et Flash Lite.", - "feature3": "Support des fichiers Excel et plus : Ajout du support des fichiers Excel (.xlsx) et de nombreuses corrections de bugs et améliorations !", + "feature1": "Partage de tâches en 1 clic : Partage tes tâches instantanément avec tes collègues et la communauté en un seul clic.", + "feature2": "Support du répertoire .roo global : Charge les règles et configurations depuis un répertoire .roo global pour des paramètres cohérents entre les projets.", + "feature3": "Transitions Architecte vers Code améliorées : Transferts fluides de la planification en mode Architecte vers l'implémentation en mode Code.", "hideButton": "Masquer l'annonce", "detailsDiscussLinks": "Obtenez plus de détails et participez aux discussions sur Discord et Reddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo veut rechercher dans la base de code {{query}} :", "wantsToSearchWithPath": "Roo veut rechercher dans la base de code {{query}} dans {{path}} :", - "didSearch": "{{count}} résultat(s) trouvé(s) pour {{query}} :" + "didSearch": "{{count}} résultat(s) trouvé(s) pour {{query}} :", + "resultTooltip": "Score de similarité : {{score}} (cliquer pour ouvrir le fichier)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/fr/common.json b/webview-ui/src/i18n/locales/fr/common.json index 677116ff2a84..e5e0a3bb80d3 100644 --- a/webview-ui/src/i18n/locales/fr/common.json +++ b/webview-ui/src/i18n/locales/fr/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Génération du diagramme mermaid...", "render_error": "Impossible de rendre le diagramme", + "fixing_syntax": "Correction de la syntaxe Mermaid...", + "fix_syntax_button": "Corriger la syntaxe avec l'IA", + "original_code": "Code original :", + "errors": { + "unknown_syntax": "Erreur de syntaxe inconnue", + "fix_timeout": "La demande de correction par l'IA a expiré", + "fix_failed": "La correction par l'IA a échoué", + "fix_attempts": "Échec de la correction de la syntaxe après {{attempts}} tentatives. Dernière erreur : {{error}}", + "no_fix_provided": "L'IA n'a pas pu fournir de correction", + "fix_request_failed": "La demande de correction a échoué" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Agrandir", diff --git a/webview-ui/src/i18n/locales/fr/prompts.json b/webview-ui/src/i18n/locales/fr/prompts.json index 39bc67e482c2..d8c331708110 100644 --- a/webview-ui/src/i18n/locales/fr/prompts.json +++ b/webview-ui/src/i18n/locales/fr/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modes", "createNewMode": "Créer un nouveau mode", + "importMode": "Importer le mode", + "noMatchFound": "Aucun mode trouvé", "editModesConfig": "Modifier la configuration des modes", "editGlobalModes": "Modifier les modes globaux", "editProjectModes": "Modifier les modes du projet (.roomodes)", @@ -50,6 +52,28 @@ "description": "Ajoutez des directives comportementales spécifiques au mode {{modeName}}.", "loadFromFile": "Les instructions personnalisées spécifiques au mode {{mode}} peuvent également être chargées depuis le dossier .roo/rules-{{slug}}/ dans votre espace de travail (.roorules-{{slug}} et .clinerules-{{slug}} sont obsolètes et cesseront de fonctionner bientôt)." }, + "exportMode": { + "title": "Exporter le mode", + "description": "Exporte ce mode vers un fichier YAML avec toutes les règles incluses pour un partage facile avec d'autres.", + "export": "Exporter le mode", + "exporting": "Exportation..." + }, + "importMode": { + "selectLevel": "Choisissez où importer ce mode :", + "import": "Importer", + "importing": "Importation...", + "global": { + "label": "Niveau global", + "description": "Disponible dans tous les projets. Si le mode exporté contenait des fichiers de règles, ils seront recréés dans le dossier global .roo/rules-{slug}/." + }, + "project": { + "label": "Niveau projet", + "description": "Disponible uniquement dans cet espace de travail. Si le mode exporté contenait des fichiers de règles, ils seront recréés dans le dossier .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Avancé" + }, "globalCustomInstructions": { "title": "Instructions personnalisées pour tous les modes", "description": "Ces instructions s'appliquent à tous les modes. Elles fournissent un ensemble de comportements de base qui peuvent être améliorés par des instructions spécifiques au mode ci-dessous. <0>En savoir plus", @@ -164,5 +188,13 @@ }, "deleteMode": "Supprimer le mode" }, - "allFiles": "tous les fichiers" + "allFiles": "tous les fichiers", + "deleteMode": { + "title": "Supprimer le mode", + "message": "Êtes-vous sûr de vouloir supprimer le mode \"{{modeName}}\" ?", + "rulesFolder": "Ce mode a un dossier de règles à {{folderPath}} qui sera également supprimé.", + "descriptionNoRules": "Êtes-vous sûr de vouloir supprimer ce mode personnalisé ?", + "confirm": "Supprimer", + "cancel": "Annuler" + } } diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 5635251876e4..281fb705fc9f 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -38,33 +38,72 @@ }, "codeIndex": { "title": "Indexation de la base de code", + "description": "Configurez les paramètres d'indexation de la base de code pour activer la recherche sémantique dans votre projet. <0>En savoir plus", + "statusTitle": "Statut", "enableLabel": "Activer l'indexation de la base de code", "enableDescription": "<0>L'indexation de la base de code est une fonctionnalité expérimentale qui crée un index de recherche sémantique de votre projet en utilisant des embeddings IA. Cela permet à Roo Code de mieux comprendre et naviguer dans de grandes bases de code en trouvant du code pertinent basé sur le sens plutôt que seulement sur des mots-clés.", + "settingsTitle": "Paramètres d'indexation", + "disabledMessage": "L'indexation de la base de code est actuellement désactivée. Activez-la dans les paramètres globaux pour configurer les options d'indexation.", "providerLabel": "Fournisseur d'embeddings", + "embedderProviderLabel": "Fournisseur d'embedder", "selectProviderPlaceholder": "Sélectionner un fournisseur", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Clé API :", + "geminiApiKeyPlaceholder": "Entrez votre clé API Gemini", "openaiCompatibleProvider": "Compatible OpenAI", - "openaiCompatibleBaseUrlLabel": "URL de base :", - "openaiCompatibleApiKeyLabel": "Clé API :", - "openaiCompatibleModelDimensionLabel": "Dimension d'Embedding :", - "openaiCompatibleModelDimensionPlaceholder": "ex., 1536", - "openaiCompatibleModelDimensionDescription": "La dimension d'embedding (taille de sortie) pour votre modèle. Consultez la documentation de votre fournisseur pour cette valeur. Valeurs courantes : 384, 768, 1536, 3072.", - "openaiKeyLabel": "Clé OpenAI :", + "openAiKeyLabel": "Clé API OpenAI", + "openAiKeyPlaceholder": "Entrez votre clé API OpenAI", + "openAiCompatibleBaseUrlLabel": "URL de base", + "openAiCompatibleApiKeyLabel": "Clé API", + "openAiCompatibleApiKeyPlaceholder": "Entrez votre clé API", + "openAiCompatibleModelDimensionLabel": "Dimension d'Embedding :", + "modelDimensionLabel": "Dimension du modèle", + "openAiCompatibleModelDimensionPlaceholder": "ex., 1536", + "openAiCompatibleModelDimensionDescription": "La dimension d'embedding (taille de sortie) pour votre modèle. Consultez la documentation de votre fournisseur pour cette valeur. Valeurs courantes : 384, 768, 1536, 3072.", "modelLabel": "Modèle", + "modelPlaceholder": "Entrez le nom du modèle", + "selectModel": "Sélectionner un modèle", "selectModelPlaceholder": "Sélectionner un modèle", "ollamaUrlLabel": "URL Ollama :", + "ollamaBaseUrlLabel": "URL de base Ollama", "qdrantUrlLabel": "URL Qdrant", "qdrantKeyLabel": "Clé Qdrant :", - "startIndexingButton": "Démarrer l'indexation", - "clearIndexDataButton": "Effacer les données d'index", + "qdrantApiKeyLabel": "Clé API Qdrant", + "qdrantApiKeyPlaceholder": "Entrez votre clé API Qdrant (optionnel)", + "startIndexingButton": "Démarrer", + "clearIndexDataButton": "Effacer l'index", "unsavedSettingsMessage": "Merci d'enregistrer tes paramètres avant de démarrer le processus d'indexation.", "clearDataDialog": { "title": "Êtes-vous sûr ?", "description": "Cette action ne peut pas être annulée. Cela supprimera définitivement les données d'index de votre base de code.", "cancelButton": "Annuler", "confirmButton": "Effacer les données" - } + }, + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Échec de la sauvegarde des paramètres", + "modelDimensions": "({{dimension}} dimensions)", + "saveSuccess": "Paramètres sauvegardés avec succès", + "saving": "Sauvegarde...", + "saveSettings": "Sauvegarder", + "indexingStatuses": { + "standby": "En attente", + "indexing": "Indexation", + "indexed": "Indexé", + "error": "Erreur" + }, + "close": "Fermer", + "advancedConfigLabel": "Configuration avancée", + "searchMinScoreLabel": "Seuil de score de recherche", + "searchMinScoreDescription": "Score de similarité minimum (0.0-1.0) requis pour les résultats de recherche. Des valeurs plus faibles renvoient plus de résultats mais peuvent être moins pertinents. Des valeurs plus élevées renvoient moins de résultats mais plus pertinents.", + "searchMinScoreResetTooltip": "Réinitialiser à la valeur par défaut (0.4)", + "searchMaxResultsLabel": "Résultats de recherche maximum", + "searchMaxResultsDescription": "Nombre maximum de résultats de recherche à retourner lors de l'interrogation de l'index de code. Des valeurs plus élevées fournissent plus de contexte mais peuvent inclure des résultats moins pertinents.", + "resetToDefault": "Réinitialiser par défaut" }, "autoApprove": { "description": "Permettre à Roo d'effectuer automatiquement des opérations sans requérir d'approbation. Activez ces paramètres uniquement si vous faites entièrement confiance à l'IA et que vous comprenez les risques de sécurité associés.", @@ -110,6 +149,11 @@ "label": "Sous-tâches", "description": "Permettre la création et l'achèvement des sous-tâches sans nécessiter d'approbation" }, + "followupQuestions": { + "label": "Question", + "description": "Sélectionner automatiquement la première réponse suggérée pour les questions de suivi après le délai configuré", + "timeoutLabel": "Temps d'attente avant la sélection automatique de la première réponse" + }, "execute": { "label": "Exécuter", "description": "Exécuter automatiquement les commandes de terminal autorisées sans nécessiter d'approbation", @@ -118,6 +162,10 @@ "commandPlaceholder": "Entrez le préfixe de commande (ex. 'git ')", "addButton": "Ajouter" }, + "updateTodoList": { + "label": "Todo", + "description": "La liste de tâches est mise à jour automatiquement sans approbation" + }, "apiRequestLimit": { "title": "Requêtes maximales", "description": "Effectuer automatiquement ce nombre de requêtes API avant de demander l'approbation pour continuer la tâche.", @@ -516,7 +564,6 @@ "name": "Activer le Marketplace", "description": "Lorsque cette option est activée, tu peux installer des MCP et des modes personnalisés depuis le Marketplace." }, - "MULTI_FILE_APPLY_DIFF": { "name": "Activer les éditions de fichiers concurrentes", "description": "Lorsque cette option est activée, Roo peut éditer plusieurs fichiers en une seule requête. Lorsqu'elle est désactivée, Roo doit éditer les fichiers un par un. Désactiver cette option peut aider lorsque tu travailles avec des modèles moins capables ou lorsque tu veux plus de contrôle sur les modifications de fichiers." diff --git a/webview-ui/src/i18n/locales/hi/chat.json b/webview-ui/src/i18n/locales/hi/chat.json index 24e2af1eb7e2..9afdcbdd3849 100644 --- a/webview-ui/src/i18n/locales/hi/chat.json +++ b/webview-ui/src/i18n/locales/hi/chat.json @@ -234,17 +234,19 @@ "tokens": "टोकन" }, "followUpSuggest": { - "copyToInput": "इनपुट में कॉपी करें (या Shift + क्लिक)" + "copyToInput": "इनपुट में कॉपी करें (या Shift + क्लिक)", + "autoSelectCountdown": "{{count}}s में स्वचालित रूप से चयन हो रहा है", + "countdownDisplay": "{{count}}सेकंड" }, "announcement": { "title": "🎉 Roo Code {{version}} रिलीज़ हुआ", - "description": "Roo Code {{version}} आपके फीडबैक के आधार पर शक्तिशाली नई सुविधाएँ और सुधार लाता है।", - "whatsNew": "नई सुविधाएँ", - "feature1": "Roo Marketplace लॉन्च - Marketplace अब लाइव है! पहले से कहीं आसान तरीके से modes और MCP खोजें और इंस्टॉल करें।", - "feature2": "Gemini 2.5 Models - नए Gemini 2.5 Pro, Flash, और Flash Lite models के लिए समर्थन जोड़ा गया।", - "feature3": "Excel File समर्थन और अधिक - Excel (.xlsx) file समर्थन जोड़ा गया और कई bug fixes और सुधार!", - "hideButton": "घोषणा छिपाएँ", - "detailsDiscussLinks": "Discord और Reddit पर अधिक जानकारी प्राप्त करें और चर्चा में भाग लें 🚀" + "description": "Roo Code {{version}} आपके विकास वर्कफ़्लो को बेहतर बनाने के लिए शक्तिशाली नई सुविधाएं और महत्वपूर्ण सुधार लेकर आया है।", + "whatsNew": "नया क्या है", + "feature1": "1-क्लिक टास्क शेयरिंग: अपने टास्क को सहकर्मियों और समुदाय के साथ एक क्लिक में तुरंत साझा करें।", + "feature2": "ग्लोबल .roo डायरेक्टरी समर्थन: प्रोजेक्ट्स में निरंतर सेटिंग्स के लिए ग्लोबल .roo डायरेक्टरी से नियम और कॉन्फ़िगरेशन लोड करें।", + "feature3": "बेहतर आर्किटेक्ट से कोड ट्रांज़िशन: आर्किटेक्ट मोड में प्लानिंग से कोड मोड में इम्प्लीमेंटेशन तक सहज स्थानांतरण।", + "hideButton": "घोषणा छुपाएं", + "detailsDiscussLinks": "Discord और Reddit पर अधिक विवरण प्राप्त करें और चर्चाओं में शामिल हों 🚀" }, "browser": { "rooWantsToUse": "Roo ब्राउज़र का उपयोग करना चाहता है:", @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo कोडबेस में {{query}} खोजना चाहता है:", "wantsToSearchWithPath": "Roo {{path}} में कोडबेस में {{query}} खोजना चाहता है:", - "didSearch": "{{query}} के लिए {{count}} परिणाम मिले:" + "didSearch": "{{query}} के लिए {{count}} परिणाम मिले:", + "resultTooltip": "समानता स्कोर: {{score}} (फ़ाइल खोलने के लिए क्लिक करें)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/hi/common.json b/webview-ui/src/i18n/locales/hi/common.json index 77876eb2743d..d7566739e503 100644 --- a/webview-ui/src/i18n/locales/hi/common.json +++ b/webview-ui/src/i18n/locales/hi/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "मरमेड डायग्राम जनरेट हो रहा है...", "render_error": "डायग्राम रेंडर नहीं किया जा सकता", + "fixing_syntax": "मरमेड सिंटैक्स ठीक किया जा रहा है...", + "fix_syntax_button": "AI से सिंटैक्स ठीक करें", + "original_code": "मूल कोड:", + "errors": { + "unknown_syntax": "अज्ञात सिंटैक्स त्रुटि", + "fix_timeout": "LLM फिक्स अनुरोध का समय समाप्त हो गया", + "fix_failed": "LLM फिक्स विफल हुआ", + "fix_attempts": "{{attempts}} प्रयासों के बाद सिंटैक्स ठीक करने में विफल। अंतिम त्रुटि: {{error}}", + "no_fix_provided": "LLM फिक्स प्रदान करने में विफल रहा", + "fix_request_failed": "फिक्स अनुरोध विफल हुआ" + }, "buttons": { "zoom": "ज़ूम", "zoomIn": "बड़ा करें", diff --git a/webview-ui/src/i18n/locales/hi/prompts.json b/webview-ui/src/i18n/locales/hi/prompts.json index 9633b0295383..1cb21673b0f1 100644 --- a/webview-ui/src/i18n/locales/hi/prompts.json +++ b/webview-ui/src/i18n/locales/hi/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "मोड्स", "createNewMode": "नया मोड बनाएँ", + "importMode": "मोड आयात करें", + "noMatchFound": "कोई मोड नहीं मिला", "editModesConfig": "मोड कॉन्फ़िगरेशन संपादित करें", "editGlobalModes": "ग्लोबल मोड्स संपादित करें", "editProjectModes": "प्रोजेक्ट मोड्स संपादित करें (.roomodes)", @@ -50,6 +52,28 @@ "description": "{{modeName}} मोड के लिए विशिष्ट व्यवहार दिशानिर्देश जोड़ें।", "loadFromFile": "{{mode}} मोड के लिए विशिष्ट कस्टम निर्देश आपके वर्कस्पेस में .roo/rules-{{slug}}/ फ़ोल्डर से भी लोड किए जा सकते हैं (.roorules-{{slug}} और .clinerules-{{slug}} पुराने हो गए हैं और जल्द ही काम करना बंद कर देंगे)।" }, + "exportMode": { + "title": "मोड निर्यात करें", + "description": "इस मोड को सभी नियमों के साथ एक YAML फ़ाइल में निर्यात करें ताकि दूसरों के साथ आसानी से साझा किया जा सके।", + "export": "मोड निर्यात करें", + "exporting": "निर्यात हो रहा है..." + }, + "importMode": { + "selectLevel": "चुनें कि इस मोड को कहाँ आयात करना है:", + "import": "आयात करें", + "importing": "आयात कर रहे हैं...", + "global": { + "label": "वैश्विक स्तर", + "description": "सभी परियोजनाओं में उपलब्ध। नियम कस्टम निर्देशों में विलय कर दिए जाएंगे।" + }, + "project": { + "label": "परियोजना स्तर", + "description": "केवल इस कार्यक्षेत्र में उपलब्ध। यदि निर्यात किए गए मोड में नियम फाइलें थीं, तो उन्हें .roo/rules-{slug}/ फ़ोल्डर में फिर से बनाया जाएगा।" + } + }, + "advanced": { + "title": "उन्नत" + }, "globalCustomInstructions": { "title": "सभी मोड्स के लिए कस्टम निर्देश", "description": "ये निर्देश सभी मोड्स पर लागू होते हैं। वे व्यवहारों का एक आधार सेट प्रदान करते हैं जिन्हें नीचे दिए गए मोड-विशिष्ट निर्देशों द्वारा बढ़ाया जा सकता है। <0>और जानें", @@ -164,5 +188,13 @@ }, "deleteMode": "मोड हटाएँ" }, - "allFiles": "सभी फाइलें" + "allFiles": "सभी फाइलें", + "deleteMode": { + "title": "मोड हटाएं", + "message": "क्या आप वाकई मोड \"{{modeName}}\" को हटाना चाहते हैं?", + "rulesFolder": "इस मोड में {{folderPath}} पर एक नियम फ़ोल्डर है जिसे भी हटा दिया जाएगा।", + "descriptionNoRules": "क्या आप वाकई इस कस्टम मोड को हटाना चाहते हैं?", + "confirm": "हटाएं", + "cancel": "रद्द करें" + } } diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 8de5bd1a19d9..4b8132768f8e 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "प्रदाता चुनें", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API कुंजी:", + "geminiApiKeyPlaceholder": "अपना जेमिनी एपीआई कुंजी दर्ज करें", "openaiCompatibleProvider": "OpenAI संगत", - "openaiCompatibleBaseUrlLabel": "आधार URL:", - "openaiCompatibleApiKeyLabel": "API कुंजी:", - "openaiCompatibleModelDimensionLabel": "एम्बेडिंग आयाम:", - "openaiCompatibleModelDimensionPlaceholder": "उदा., 1536", - "openaiCompatibleModelDimensionDescription": "आपके मॉडल के लिए एम्बेडिंग आयाम (आउटपुट साइज)। इस मान के लिए अपने प्रदाता के दस्तावेज़ीकरण की जांच करें। सामान्य मान: 384, 768, 1536, 3072।", - "openaiKeyLabel": "OpenAI कुंजी:", + "openAiKeyLabel": "OpenAI API कुंजी", + "openAiKeyPlaceholder": "अपना OpenAI API कुंजी दर्ज करें", + "openAiCompatibleBaseUrlLabel": "आधार URL", + "openAiCompatibleApiKeyLabel": "API कुंजी", + "openAiCompatibleApiKeyPlaceholder": "अपना API कुंजी दर्ज करें", + "openAiCompatibleModelDimensionLabel": "एम्बेडिंग आयाम:", + "modelDimensionLabel": "मॉडल आयाम", + "openAiCompatibleModelDimensionPlaceholder": "उदा., 1536", + "openAiCompatibleModelDimensionDescription": "आपके मॉडल के लिए एम्बेडिंग आयाम (आउटपुट साइज)। इस मान के लिए अपने प्रदाता के दस्तावेज़ीकरण की जांच करें। सामान्य मान: 384, 768, 1536, 3072।", "modelLabel": "मॉडल", "selectModelPlaceholder": "मॉडल चुनें", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant कुंजी:", - "startIndexingButton": "इंडेक्सिंग शुरू करें", - "clearIndexDataButton": "इंडेक्स डेटा साफ़ करें", + "startIndexingButton": "शुरू करें", + "clearIndexDataButton": "इंडेक्स साफ़ करें", "unsavedSettingsMessage": "इंडेक्सिंग प्रक्रिया शुरू करने से पहले कृपया अपनी सेटिंग्स सहेजें।", "clearDataDialog": { "title": "क्या आप सुनिश्चित हैं?", "description": "यह क्रिया पूर्ववत नहीं की जा सकती। यह आपके कोडबेस इंडेक्स डेटा को स्थायी रूप से हटा देगी।", "cancelButton": "रद्द करें", "confirmButton": "डेटा साफ़ करें" - } + }, + "description": "अपने प्रोजेक्ट की सिमेंटिक खोज को सक्षम करने के लिए कोडबेस इंडेक्सिंग सेटिंग्स कॉन्फ़िगर करें। <0>और जानें", + "statusTitle": "स्थिति", + "settingsTitle": "इंडेक्सिंग सेटिंग्स", + "disabledMessage": "कोडबेस इंडेक्सिंग वर्तमान में अक्षम है। इंडेक्सिंग विकल्पों को कॉन्फ़िगर करने के लिए इसे ग्लोबल सेटिंग्स में सक्षम करें।", + "embedderProviderLabel": "एम्बेडर प्रदाता", + "modelPlaceholder": "मॉडल नाम दर्ज करें", + "selectModel": "एक मॉडल चुनें", + "ollamaBaseUrlLabel": "Ollama आधार URL", + "qdrantApiKeyLabel": "Qdrant API कुंजी", + "qdrantApiKeyPlaceholder": "अपनी Qdrant API कुंजी दर्ज करें (वैकल्पिक)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "सेटिंग्स सहेजने में विफल", + "modelDimensions": "({{dimension}} आयाम)", + "saveSuccess": "सेटिंग्स सफलतापूर्वक सहेजी गईं", + "saving": "सहेज रहे हैं...", + "saveSettings": "सहेजें", + "indexingStatuses": { + "standby": "स्टैंडबाई", + "indexing": "इंडेक्सिंग", + "indexed": "इंडेक्स किया गया", + "error": "त्रुटि" + }, + "close": "बंद करें", + "advancedConfigLabel": "उन्नत कॉन्फ़िगरेशन", + "searchMinScoreLabel": "खोज स्कोर थ्रेसहोल्ड", + "searchMinScoreDescription": "खोज परिणामों के लिए आवश्यक न्यूनतम समानता स्कोर (0.0-1.0)। कम मान अधिक परिणाम लौटाते हैं लेकिन कम प्रासंगिक हो सकते हैं। उच्च मान कम लेकिन अधिक प्रासंगिक परिणाम लौटाते हैं।", + "searchMinScoreResetTooltip": "डिफ़ॉल्ट मान पर रीसेट करें (0.4)", + "searchMaxResultsLabel": "अधिकतम खोज परिणाम", + "searchMaxResultsDescription": "कोडबेस इंडेक्स को क्वेरी करते समय वापस करने के लिए खोज परिणामों की अधिकतम संख्या। उच्च मान अधिक संदर्भ प्रदान करते हैं लेकिन कम प्रासंगिक परिणाम शामिल कर सकते हैं।", + "resetToDefault": "डिफ़ॉल्ट पर रीसेट करें" }, "autoApprove": { "description": "Roo को अनुमोदन की आवश्यकता के बिना स्वचालित रूप से ऑपरेशन करने की अनुमति दें। इन सेटिंग्स को केवल तभी सक्षम करें जब आप AI पर पूरी तरह से भरोसा करते हों और संबंधित सुरक्षा जोखिमों को समझते हों।", @@ -110,6 +149,11 @@ "label": "उप-कार्य", "description": "अनुमोदन की आवश्यकता के बिना उप-कार्यों के निर्माण और पूर्णता की अनुमति दें" }, + "followupQuestions": { + "label": "प्रश्न", + "description": "कॉन्फ़िगर किए गए टाइमआउट के बाद अनुवर्ती प्रश्नों के लिए पहले सुझाए गए उत्तर को स्वचालित रूप से चुनें", + "timeoutLabel": "पहले उत्तर को स्वचालित रूप से चुनने से पहले प्रतीक्षा करने का समय" + }, "execute": { "label": "निष्पादित करें", "description": "अनुमोदन की आवश्यकता के बिना स्वचालित रूप से अनुमत टर्मिनल कमांड निष्पादित करें", @@ -118,6 +162,10 @@ "commandPlaceholder": "कमांड प्रीफिक्स दर्ज करें (उदा. 'git ')", "addButton": "जोड़ें" }, + "updateTodoList": { + "label": "टूडू", + "description": "अनुमोदन की आवश्यकता के बिना स्वचालित रूप से टूडू सूची अपडेट करें" + }, "apiRequestLimit": { "title": "अधिकतम अनुरोध", "description": "कार्य जारी रखने के लिए अनुमति मांगने से पहले स्वचालित रूप से इतने API अनुरोध करें।", diff --git a/webview-ui/src/i18n/locales/id/chat.json b/webview-ui/src/i18n/locales/id/chat.json index 0568b58913b5..fc0bd5056f6f 100644 --- a/webview-ui/src/i18n/locales/id/chat.json +++ b/webview-ui/src/i18n/locales/id/chat.json @@ -207,7 +207,8 @@ "codebaseSearch": { "wantsToSearch": "Roo ingin mencari codebase untuk {{query}}:", "wantsToSearchWithPath": "Roo ingin mencari codebase untuk {{query}} di {{path}}:", - "didSearch": "Ditemukan {{count}} hasil untuk {{query}}:" + "didSearch": "Ditemukan {{count}} hasil untuk {{query}}:", + "resultTooltip": "Skor kemiripan: {{score}} (klik untuk membuka file)" }, "commandOutput": "Output Perintah", "response": "Respons", @@ -250,20 +251,22 @@ }, "announcement": { "title": "🎉 Roo Code {{version}} Dirilis", - "description": "Roo Code {{version}} menghadirkan fitur baru yang powerful dan perbaikan berdasarkan feedback kamu.", - "whatsNew": "Apa yang Baru", - "feature1": "Peluncuran Roo Marketplace - Marketplace sekarang sudah live! Temukan dan install mode serta MCP lebih mudah dari sebelumnya.", - "feature2": "Model Gemini 2.5 - Menambahkan dukungan untuk model Gemini 2.5 Pro, Flash, dan Flash Lite yang baru.", - "feature3": "Dukungan File Excel & Lainnya - Menambahkan dukungan file Excel (.xlsx) dan banyak perbaikan bug serta peningkatan!", + "description": "Roo Code {{version}} menghadirkan fitur-fitur baru yang kuat dan peningkatan signifikan untuk meningkatkan alur kerja pengembangan Anda.", + "whatsNew": "Yang Baru", + "feature1": "Berbagi Tugas 1-Klik: Bagikan tugas Anda secara instan dengan rekan kerja dan komunitas hanya dengan satu klik.", + "feature2": "Dukungan Direktori Global .roo: Muat aturan dan konfigurasi dari direktori global .roo untuk pengaturan yang konsisten di seluruh proyek.", + "feature3": "Transisi Arsitektur ke Kode yang Ditingkatkan: Transfer yang mulus dari perencanaan di mode Arsitektur ke implementasi di mode Kode.", "hideButton": "Sembunyikan pengumuman", - "detailsDiscussLinks": "Dapatkan detail lebih lanjut dan diskusi di Discord dan Reddit 🚀" + "detailsDiscussLinks": "Dapatkan detail lebih lanjut dan bergabung dalam diskusi di Discord dan Reddit 🚀" }, "reasoning": { "thinking": "Berpikir", "seconds": "{{count}}d" }, "followUpSuggest": { - "copyToInput": "Salin ke input (sama dengan shift + klik)" + "copyToInput": "Salin ke input (sama dengan shift + klik)", + "autoSelectCountdown": "Pemilihan otomatis dalam {{count}}dtk", + "countdownDisplay": "{{count}}dtk" }, "browser": { "rooWantsToUse": "Roo ingin menggunakan browser:", diff --git a/webview-ui/src/i18n/locales/id/common.json b/webview-ui/src/i18n/locales/id/common.json index d50246ced276..7c2e45a7bd3a 100644 --- a/webview-ui/src/i18n/locales/id/common.json +++ b/webview-ui/src/i18n/locales/id/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Membuat diagram mermaid...", "render_error": "Tidak Dapat Merender Diagram", + "fixing_syntax": "Memperbaiki sintaks Mermaid...", + "fix_syntax_button": "Perbaiki sintaks dengan AI", + "original_code": "Kode asli:", + "errors": { + "unknown_syntax": "Error sintaks tidak dikenal", + "fix_timeout": "Permintaan perbaikan LLM habis waktu", + "fix_failed": "Perbaikan LLM gagal", + "fix_attempts": "Gagal memperbaiki sintaks setelah {{attempts}} percobaan. Error terakhir: {{error}}", + "no_fix_provided": "LLM gagal memberikan perbaikan", + "fix_request_failed": "Permintaan perbaikan gagal" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Perbesar", diff --git a/webview-ui/src/i18n/locales/id/prompts.json b/webview-ui/src/i18n/locales/id/prompts.json index a77a6e5376eb..4990911f3e4a 100644 --- a/webview-ui/src/i18n/locales/id/prompts.json +++ b/webview-ui/src/i18n/locales/id/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Mode", "createNewMode": "Buat mode baru", + "importMode": "Impor mode", + "noMatchFound": "Tidak ada mode yang ditemukan", "editModesConfig": "Edit konfigurasi mode", "editGlobalModes": "Edit Mode Global", "editProjectModes": "Edit Mode Proyek (.roomodes)", @@ -50,6 +52,28 @@ "description": "Tambahkan panduan perilaku khusus untuk mode {{modeName}}.", "loadFromFile": "Instruksi kustom khusus untuk mode {{mode}} juga dapat dimuat dari folder .roo/rules-{{slug}}/ di workspace Anda (.roomodes-{{slug}} dan .clinerules-{{slug}} sudah deprecated dan akan segera berhenti bekerja)." }, + "exportMode": { + "title": "Ekspor Mode", + "description": "Ekspor mode ini ke file YAML dengan semua aturan yang disertakan untuk berbagi dengan mudah dengan orang lain.", + "export": "Ekspor Mode", + "exporting": "Mengekspor..." + }, + "importMode": { + "selectLevel": "Pilih di mana akan mengimpor mode ini:", + "import": "Impor", + "importing": "Mengimpor...", + "global": { + "label": "Tingkat Global", + "description": "Tersedia di semua proyek. Aturan akan digabungkan ke dalam instruksi kustom." + }, + "project": { + "label": "Tingkat Proyek", + "description": "Hanya tersedia di ruang kerja ini. Jika mode yang diekspor berisi file aturan, file tersebut akan dibuat ulang di folder .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Lanjutan" + }, "globalCustomInstructions": { "title": "Instruksi Kustom untuk Semua Mode", "description": "Instruksi ini berlaku untuk semua mode. Mereka menyediakan set dasar perilaku yang dapat ditingkatkan oleh instruksi khusus mode di bawah. <0>Pelajari lebih lanjut", @@ -164,5 +188,13 @@ }, "deleteMode": "Hapus mode" }, - "allFiles": "semua file" + "allFiles": "semua file", + "deleteMode": { + "title": "Hapus Mode", + "message": "Anda yakin ingin menghapus mode \"{{modeName}}\"?", + "rulesFolder": "Mode ini memiliki folder aturan di {{folderPath}} yang juga akan dihapus.", + "descriptionNoRules": "Apakah Anda yakin ingin menghapus mode kustom ini?", + "confirm": "Hapus", + "cancel": "Batal" + } } diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 8def303c05df..ac1454ab7b95 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Pilih provider", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API Key:", + "geminiApiKeyPlaceholder": "Masukkan kunci API Gemini Anda", "openaiCompatibleProvider": "OpenAI Compatible", - "openaiKeyLabel": "OpenAI Key:", - "openaiCompatibleBaseUrlLabel": "Base URL:", - "openaiCompatibleApiKeyLabel": "API Key:", - "openaiCompatibleModelDimensionLabel": "Dimensi Embedding:", - "openaiCompatibleModelDimensionPlaceholder": "misalnya, 1536", - "openaiCompatibleModelDimensionDescription": "Dimensi embedding (ukuran output) untuk model kamu. Periksa dokumentasi provider kamu untuk nilai ini. Nilai umum: 384, 768, 1536, 3072.", + "openAiKeyLabel": "OpenAI API Key", + "openAiKeyPlaceholder": "Masukkan kunci API OpenAI kamu", + "openAiCompatibleBaseUrlLabel": "Base URL", + "openAiCompatibleApiKeyLabel": "API Key", + "openAiCompatibleApiKeyPlaceholder": "Masukkan kunci API kamu", + "openAiCompatibleModelDimensionLabel": "Dimensi Embedding:", + "modelDimensionLabel": "Dimensi Model", + "openAiCompatibleModelDimensionPlaceholder": "misalnya, 1536", + "openAiCompatibleModelDimensionDescription": "Dimensi embedding (ukuran output) untuk model kamu. Periksa dokumentasi provider kamu untuk nilai ini. Nilai umum: 384, 768, 1536, 3072.", "modelLabel": "Model", "selectModelPlaceholder": "Pilih model", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant Key:", - "startIndexingButton": "Mulai Pengindeksan", - "clearIndexDataButton": "Hapus Data Indeks", + "startIndexingButton": "Mulai", + "clearIndexDataButton": "Hapus Indeks", "unsavedSettingsMessage": "Silakan simpan pengaturan kamu sebelum memulai proses pengindeksan.", "clearDataDialog": { "title": "Apakah kamu yakin?", "description": "Tindakan ini tidak dapat dibatalkan. Ini akan menghapus data indeks codebase kamu secara permanen.", "cancelButton": "Batal", "confirmButton": "Hapus Data" - } + }, + "description": "Konfigurasi pengaturan pengindeksan codebase untuk mengaktifkan pencarian semantik proyek kamu. <0>Pelajari lebih lanjut", + "statusTitle": "Status", + "settingsTitle": "Pengaturan Pengindeksan", + "disabledMessage": "Pengindeksan codebase saat ini dinonaktifkan. Aktifkan di pengaturan global untuk mengkonfigurasi opsi pengindeksan.", + "embedderProviderLabel": "Provider Embedder", + "modelPlaceholder": "Masukkan nama model", + "selectModel": "Pilih model", + "ollamaBaseUrlLabel": "URL Dasar Ollama", + "qdrantApiKeyLabel": "Kunci API Qdrant", + "qdrantApiKeyPlaceholder": "Masukkan kunci API Qdrant kamu (opsional)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Gagal menyimpan pengaturan", + "modelDimensions": "({{dimension}} dimensi)", + "saveSuccess": "Pengaturan berhasil disimpan", + "saving": "Menyimpan...", + "saveSettings": "Simpan", + "indexingStatuses": { + "standby": "Siaga", + "indexing": "Mengindeks", + "indexed": "Terindeks", + "error": "Error" + }, + "close": "Tutup", + "advancedConfigLabel": "Konfigurasi Lanjutan", + "searchMinScoreLabel": "Ambang Batas Skor Pencarian", + "searchMinScoreDescription": "Skor kesamaan minimum (0.0-1.0) yang diperlukan untuk hasil pencarian. Nilai yang lebih rendah mengembalikan lebih banyak hasil tetapi mungkin kurang relevan. Nilai yang lebih tinggi mengembalikan lebih sedikit hasil tetapi lebih relevan.", + "searchMinScoreResetTooltip": "Reset ke nilai default (0.4)", + "searchMaxResultsLabel": "Hasil Pencarian Maksimum", + "searchMaxResultsDescription": "Jumlah maksimum hasil pencarian yang dikembalikan saat melakukan query indeks basis kode. Nilai yang lebih tinggi memberikan lebih banyak konteks tetapi mungkin menyertakan hasil yang kurang relevan.", + "resetToDefault": "Reset ke default" }, "autoApprove": { "description": "Izinkan Roo untuk secara otomatis melakukan operasi tanpa memerlukan persetujuan. Aktifkan pengaturan ini hanya jika kamu sepenuhnya mempercayai AI dan memahami risiko keamanan yang terkait.", @@ -110,6 +149,11 @@ "label": "Subtugas", "description": "Izinkan pembuatan dan penyelesaian subtugas tanpa memerlukan persetujuan" }, + "followupQuestions": { + "label": "Pertanyaan", + "description": "Secara otomatis memilih jawaban pertama yang disarankan untuk pertanyaan lanjutan setelah batas waktu yang dikonfigurasi", + "timeoutLabel": "Waktu tunggu sebelum otomatis memilih jawaban pertama" + }, "execute": { "label": "Eksekusi", "description": "Secara otomatis mengeksekusi perintah terminal yang diizinkan tanpa memerlukan persetujuan", @@ -122,6 +166,10 @@ "label": "Tampilkan menu auto-approve di tampilan chat", "description": "Ketika diaktifkan, menu auto-approve akan ditampilkan di bagian bawah tampilan chat, memungkinkan akses cepat ke pengaturan auto-approve" }, + "updateTodoList": { + "label": "Todo", + "description": "Daftar tugas diperbarui secara otomatis tanpa persetujuan" + }, "apiRequestLimit": { "title": "Permintaan Maks", "description": "Secara otomatis membuat sejumlah permintaan API ini sebelum meminta persetujuan untuk melanjutkan tugas.", diff --git a/webview-ui/src/i18n/locales/it/chat.json b/webview-ui/src/i18n/locales/it/chat.json index 1f7d6b463fa6..f49a25dfa602 100644 --- a/webview-ui/src/i18n/locales/it/chat.json +++ b/webview-ui/src/i18n/locales/it/chat.json @@ -234,15 +234,17 @@ "tokens": "token" }, "followUpSuggest": { - "copyToInput": "Copia nell'input (o Shift + clic)" + "copyToInput": "Copia nell'input (o Shift + clic)", + "autoSelectCountdown": "Selezione automatica in {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Rilasciato Roo Code {{version}}", - "description": "Roo Code {{version}} introduce importanti nuove funzionalità e miglioramenti basati sui tuoi feedback.", + "description": "Roo Code {{version}} porta nuove potenti funzionalità e miglioramenti significativi per potenziare il tuo flusso di lavoro di sviluppo.", "whatsNew": "Novità", - "feature1": "Lancio del Marketplace Roo: Il marketplace è ora attivo! Scopri e installa modalità e MCP più facilmente che mai.", - "feature2": "Modelli Gemini 2.5: Aggiunto supporto per i nuovi modelli Gemini 2.5 Pro, Flash e Flash Lite.", - "feature3": "Supporto File Excel e altro: Aggiunto supporto per file Excel (.xlsx) e numerose correzioni di bug e miglioramenti!", + "feature1": "Condivisione Task con 1 Clic: Condividi istantaneamente i tuoi task con colleghi e la community con un solo clic.", + "feature2": "Supporto Directory Globale .roo: Carica regole e configurazioni da una directory globale .roo per impostazioni coerenti tra progetti.", + "feature3": "Transizioni Migliorate da Architetto a Codice: Trasferimenti fluidi dalla pianificazione in modalità Architetto all'implementazione in modalità Codice.", "hideButton": "Nascondi annuncio", "detailsDiscussLinks": "Ottieni maggiori dettagli e partecipa alle discussioni su Discord e Reddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo vuole cercare nella base di codice {{query}}:", "wantsToSearchWithPath": "Roo vuole cercare nella base di codice {{query}} in {{path}}:", - "didSearch": "Trovato {{count}} risultato/i per {{query}}:" + "didSearch": "Trovato {{count}} risultato/i per {{query}}:", + "resultTooltip": "Punteggio di somiglianza: {{score}} (clicca per aprire il file)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/it/common.json b/webview-ui/src/i18n/locales/it/common.json index 9d5426aa0eb6..8e8a8a9e72e3 100644 --- a/webview-ui/src/i18n/locales/it/common.json +++ b/webview-ui/src/i18n/locales/it/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Generazione del diagramma mermaid...", "render_error": "Impossibile renderizzare il diagramma", + "fixing_syntax": "Correzione della sintassi Mermaid...", + "fix_syntax_button": "Correggi sintassi con AI", + "original_code": "Codice originale:", + "errors": { + "unknown_syntax": "Errore di sintassi sconosciuto", + "fix_timeout": "Richiesta di correzione LLM scaduta", + "fix_failed": "Correzione LLM fallita", + "fix_attempts": "Impossibile correggere la sintassi dopo {{attempts}} tentativi. Ultimo errore: {{error}}", + "no_fix_provided": "LLM non è riuscito a fornire una correzione", + "fix_request_failed": "Richiesta di correzione fallita" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Ingrandisci", diff --git a/webview-ui/src/i18n/locales/it/prompts.json b/webview-ui/src/i18n/locales/it/prompts.json index c556a18aacd3..0cb6236ff520 100644 --- a/webview-ui/src/i18n/locales/it/prompts.json +++ b/webview-ui/src/i18n/locales/it/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modalità", "createNewMode": "Crea nuova modalità", + "importMode": "Importa modalità", + "noMatchFound": "Nessuna modalità trovata", "editModesConfig": "Modifica configurazione modalità", "editGlobalModes": "Modifica modalità globali", "editProjectModes": "Modifica modalità di progetto (.roomodes)", @@ -50,6 +52,28 @@ "description": "Aggiungi linee guida comportamentali specifiche per la modalità {{modeName}}.", "loadFromFile": "Le istruzioni personalizzate specifiche per la modalità {{mode}} possono essere caricate anche dalla cartella .roo/rules-{{slug}}/ nel tuo spazio di lavoro (.roorules-{{slug}} e .clinerules-{{slug}} sono obsoleti e smetteranno di funzionare presto)." }, + "exportMode": { + "title": "Esporta modalità", + "description": "Esporta questa modalità in un file YAML con tutte le regole incluse per una facile condivisione con altri.", + "export": "Esporta modalità", + "exporting": "Esportazione..." + }, + "importMode": { + "selectLevel": "Scegli dove importare questa modalità:", + "import": "Importa", + "importing": "Importazione...", + "global": { + "label": "Livello globale", + "description": "Disponibile in tutti i progetti. Le regole verranno unite nelle istruzioni personalizzate." + }, + "project": { + "label": "Livello di progetto", + "description": "Disponibile solo in questo spazio di lavoro. Se la modalità esportata conteneva file di regole, verranno ricreati nella cartella .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Avanzato" + }, "globalCustomInstructions": { "title": "Istruzioni personalizzate per tutte le modalità", "description": "Queste istruzioni si applicano a tutte le modalità. Forniscono un insieme base di comportamenti che possono essere migliorati dalle istruzioni specifiche per modalità qui sotto. <0>Scopri di più", @@ -164,5 +188,13 @@ }, "deleteMode": "Elimina modalità" }, - "allFiles": "tutti i file" + "allFiles": "tutti i file", + "deleteMode": { + "title": "Elimina modalità", + "message": "Sei sicuro di voler eliminare la modalità \"{{modeName}}\"?", + "rulesFolder": "Questa modalità ha una cartella di regole in {{folderPath}} che verrà eliminata.", + "descriptionNoRules": "Sei sicuro di voler eliminare questa modalità personalizzata?", + "confirm": "Elimina", + "cancel": "Annulla" + } } diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 572f99cbb010..7627486863aa 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Seleziona fornitore", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Chiave API:", + "geminiApiKeyPlaceholder": "Inserisci la tua chiave API Gemini", "openaiCompatibleProvider": "Compatibile con OpenAI", - "openaiCompatibleBaseUrlLabel": "URL di base:", - "openaiCompatibleApiKeyLabel": "Chiave API:", - "openaiCompatibleModelDimensionLabel": "Dimensione Embedding:", - "openaiCompatibleModelDimensionPlaceholder": "es., 1536", - "openaiCompatibleModelDimensionDescription": "La dimensione dell'embedding (dimensione di output) per il tuo modello. Controlla la documentazione del tuo provider per questo valore. Valori comuni: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Chiave OpenAI:", + "openAiKeyLabel": "Chiave API OpenAI", + "openAiKeyPlaceholder": "Inserisci la tua chiave API OpenAI", + "openAiCompatibleBaseUrlLabel": "URL di base", + "openAiCompatibleApiKeyLabel": "Chiave API", + "openAiCompatibleApiKeyPlaceholder": "Inserisci la tua chiave API", + "openAiCompatibleModelDimensionLabel": "Dimensione Embedding:", + "modelDimensionLabel": "Dimensione del modello", + "openAiCompatibleModelDimensionPlaceholder": "es., 1536", + "openAiCompatibleModelDimensionDescription": "La dimensione dell'embedding (dimensione di output) per il tuo modello. Controlla la documentazione del tuo provider per questo valore. Valori comuni: 384, 768, 1536, 3072.", "modelLabel": "Modello", "selectModelPlaceholder": "Seleziona modello", "ollamaUrlLabel": "URL Ollama:", "qdrantUrlLabel": "URL Qdrant", "qdrantKeyLabel": "Chiave Qdrant:", - "startIndexingButton": "Avvia indicizzazione", - "clearIndexDataButton": "Cancella dati indice", + "startIndexingButton": "Avvia", + "clearIndexDataButton": "Cancella indice", "unsavedSettingsMessage": "Per favore salva le tue impostazioni prima di avviare il processo di indicizzazione.", "clearDataDialog": { "title": "Sei sicuro?", "description": "Questa azione non può essere annullata. Eliminerà permanentemente i dati di indice del tuo codice.", "cancelButton": "Annulla", "confirmButton": "Cancella dati" - } + }, + "description": "Configura le impostazioni di indicizzazione del codebase per abilitare la ricerca semantica del tuo progetto. <0>Scopri di più", + "statusTitle": "Stato", + "settingsTitle": "Impostazioni di indicizzazione", + "disabledMessage": "L'indicizzazione del codebase è attualmente disabilitata. Abilitala nelle impostazioni globali per configurare le opzioni di indicizzazione.", + "embedderProviderLabel": "Provider Embedder", + "modelPlaceholder": "Inserisci il nome del modello", + "selectModel": "Seleziona un modello", + "ollamaBaseUrlLabel": "URL base Ollama", + "qdrantApiKeyLabel": "Chiave API Qdrant", + "qdrantApiKeyPlaceholder": "Inserisci la tua chiave API Qdrant (opzionale)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Impossibile salvare le impostazioni", + "modelDimensions": "({{dimension}} dimensioni)", + "saveSuccess": "Impostazioni salvate con successo", + "saving": "Salvataggio...", + "saveSettings": "Salva", + "indexingStatuses": { + "standby": "In attesa", + "indexing": "Indicizzazione", + "indexed": "Indicizzato", + "error": "Errore" + }, + "close": "Chiudi", + "advancedConfigLabel": "Configurazione avanzata", + "searchMinScoreLabel": "Soglia punteggio di ricerca", + "searchMinScoreDescription": "Punteggio minimo di somiglianza (0.0-1.0) richiesto per i risultati della ricerca. Valori più bassi restituiscono più risultati ma potrebbero essere meno pertinenti. Valori più alti restituiscono meno risultati ma più pertinenti.", + "searchMinScoreResetTooltip": "Ripristina al valore predefinito (0.4)", + "searchMaxResultsLabel": "Risultati di ricerca massimi", + "searchMaxResultsDescription": "Numero massimo di risultati di ricerca da restituire quando si interroga l'indice del codice. Valori più alti forniscono più contesto ma possono includere risultati meno pertinenti.", + "resetToDefault": "Ripristina al valore predefinito" }, "autoApprove": { "description": "Permetti a Roo di eseguire automaticamente operazioni senza richiedere approvazione. Abilita queste impostazioni solo se ti fidi completamente dell'IA e comprendi i rischi di sicurezza associati.", @@ -110,6 +149,11 @@ "label": "Sottoattività", "description": "Consenti la creazione e il completamento di attività secondarie senza richiedere approvazione" }, + "followupQuestions": { + "label": "Domanda", + "description": "Seleziona automaticamente la prima risposta suggerita per le domande di follow-up dopo il timeout configurato", + "timeoutLabel": "Tempo di attesa prima di selezionare automaticamente la prima risposta" + }, "execute": { "label": "Esegui", "description": "Esegui automaticamente i comandi del terminale consentiti senza richiedere approvazione", @@ -118,6 +162,10 @@ "commandPlaceholder": "Inserisci prefisso comando (es. 'git ')", "addButton": "Aggiungi" }, + "updateTodoList": { + "label": "Todo", + "description": "La lista delle cose da fare viene aggiornata automaticamente senza approvazione" + }, "apiRequestLimit": { "title": "Richieste massime", "description": "Esegui automaticamente questo numero di richieste API prima di chiedere l'approvazione per continuare con l'attività.", diff --git a/webview-ui/src/i18n/locales/ja/chat.json b/webview-ui/src/i18n/locales/ja/chat.json index 255acb717f85..cb5ebcdafd05 100644 --- a/webview-ui/src/i18n/locales/ja/chat.json +++ b/webview-ui/src/i18n/locales/ja/chat.json @@ -234,15 +234,17 @@ "tokens": "トークン" }, "followUpSuggest": { - "copyToInput": "入力欄にコピー(またはShift + クリック)" + "copyToInput": "入力欄にコピー(またはShift + クリック)", + "autoSelectCountdown": "{{count}}秒後に自動選択します", + "countdownDisplay": "{{count}}秒" }, "announcement": { "title": "🎉 Roo Code {{version}} リリース", - "description": "Roo Code {{version}}は、あなたのフィードバックに基づく重要な新機能と改善をもたらします。", + "description": "Roo Code {{version}}は、開発ワークフローを向上させる強力な新機能と重要な改善をもたらします。", "whatsNew": "新機能", - "feature1": "Roo マーケットプレイス開始: マーケットプレイスが開始されました!これまで以上に簡単にモードとMCPを発見・インストール。", - "feature2": "Gemini 2.5 モデル: 新しいGemini 2.5 Pro、Flash、Flash Liteモデルのサポートを追加。", - "feature3": "Excelファイルサポートなど: Excel (.xlsx) ファイルサポートと多数のバグ修正・改善を追加!", + "feature1": "1クリックタスク共有: ワンクリックで同僚やコミュニティとタスクを瞬時に共有できます。", + "feature2": "グローバル.rooディレクトリサポート: グローバル.rooディレクトリからルールと設定を読み込み、プロジェクト間で一貫した設定を実現。", + "feature3": "改善されたアーキテクトからコードへの移行: アーキテクトモードでの計画からコードモードでの実装へのシームレスな引き継ぎ。", "hideButton": "通知を非表示", "detailsDiscussLinks": "詳細はDiscordRedditでご確認・ディスカッションください 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Rooはコードベースで {{query}} を検索したい:", "wantsToSearchWithPath": "Rooは {{path}} 内のコードベースで {{query}} を検索したい:", - "didSearch": "{{query}} の検索結果: {{count}} 件" + "didSearch": "{{query}} の検索結果: {{count}} 件", + "resultTooltip": "類似度スコア: {{score}} (クリックしてファイルを開く)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/ja/common.json b/webview-ui/src/i18n/locales/ja/common.json index 975ea6783444..bdf5798080e1 100644 --- a/webview-ui/src/i18n/locales/ja/common.json +++ b/webview-ui/src/i18n/locales/ja/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Mermaidダイアグラムを生成中...", "render_error": "ダイアグラムをレンダリングできません", + "fixing_syntax": "Mermaid構文を修正中...", + "fix_syntax_button": "AIで構文を修正", + "original_code": "元のコード:", + "errors": { + "unknown_syntax": "不明な構文エラー", + "fix_timeout": "AI修正リクエストがタイムアウトしました", + "fix_failed": "AI修正に失敗しました", + "fix_attempts": "{{attempts}}回の試行後に構文の修正に失敗しました。最後のエラー:{{error}}", + "no_fix_provided": "AIが修正を提供できませんでした", + "fix_request_failed": "修正リクエストに失敗しました" + }, "buttons": { "zoom": "ズーム", "zoomIn": "拡大", diff --git a/webview-ui/src/i18n/locales/ja/prompts.json b/webview-ui/src/i18n/locales/ja/prompts.json index 8049a82d31eb..f9f9042924ba 100644 --- a/webview-ui/src/i18n/locales/ja/prompts.json +++ b/webview-ui/src/i18n/locales/ja/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "モード", "createNewMode": "新しいモードを作成", + "importMode": "モードをインポート", + "noMatchFound": "モードが見つかりません", "editModesConfig": "モード設定を編集", "editGlobalModes": "グローバルモードを編集", "editProjectModes": "プロジェクトモードを編集 (.roomodes)", @@ -50,6 +52,28 @@ "description": "{{modeName}}モードに特化した行動ガイドラインを追加します。", "loadFromFile": "{{mode}}モード固有のカスタム指示は、ワークスペースの.roo/rules-{{slug}}/フォルダからも読み込めます(.roorules-{{slug}}と.clinerules-{{slug}}は非推奨であり、まもなく機能しなくなります)。" }, + "exportMode": { + "title": "モードをエクスポート", + "description": "このモードをすべてのルールを含むYAMLファイルにエクスポートして、他のユーザーと簡単に共有できます。", + "export": "モードをエクスポート", + "exporting": "エクスポート中..." + }, + "importMode": { + "selectLevel": "このモードをインポートする場所を選択してください:", + "import": "インポート", + "importing": "インポート中...", + "global": { + "label": "グローバルレベル", + "description": "すべてのプロジェクトで利用可能です。ルールはカスタム指示にマージされます。" + }, + "project": { + "label": "プロジェクトレベル", + "description": "このワークスペースでのみ利用可能です。エクスポートされたモードにルールファイルが含まれていた場合、それらは.roo/rules-{slug}/フォルダに再作成されます。" + } + }, + "advanced": { + "title": "詳細設定" + }, "globalCustomInstructions": { "title": "すべてのモードのカスタム指示", "description": "これらの指示はすべてのモードに適用されます。モード固有の指示で強化できる基本的な動作セットを提供します。<0>詳細はこちら", @@ -164,5 +188,13 @@ }, "deleteMode": "モードを削除" }, - "allFiles": "すべてのファイル" + "allFiles": "すべてのファイル", + "deleteMode": { + "title": "モードを削除", + "message": "モード「{{modeName}}」を削除してもよろしいですか?", + "rulesFolder": "このモードには{{folderPath}}にルールフォルダがあり、それも削除されます。", + "descriptionNoRules": "このカスタムモードを削除してもよろしいですか?", + "confirm": "削除", + "cancel": "キャンセル" + } } diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index c6a681e549cc..f779749cd4a7 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "プロバイダーを選択", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "APIキー:", + "geminiApiKeyPlaceholder": "Gemini APIキーを入力してください", "openaiCompatibleProvider": "OpenAI互換", - "openaiCompatibleBaseUrlLabel": "ベースURL:", - "openaiCompatibleApiKeyLabel": "APIキー:", - "openaiCompatibleModelDimensionLabel": "埋め込みディメンション:", - "openaiCompatibleModelDimensionPlaceholder": "例:1536", - "openaiCompatibleModelDimensionDescription": "モデルの埋め込みディメンション(出力サイズ)。この値についてはプロバイダーのドキュメントを確認してください。一般的な値:384、768、1536、3072。", - "openaiKeyLabel": "OpenAIキー:", + "openAiKeyLabel": "OpenAI APIキー", + "openAiKeyPlaceholder": "OpenAI APIキーを入力してください", + "openAiCompatibleBaseUrlLabel": "ベースURL", + "openAiCompatibleApiKeyLabel": "APIキー", + "openAiCompatibleApiKeyPlaceholder": "APIキーを入力してください", + "openAiCompatibleModelDimensionLabel": "埋め込みディメンション:", + "modelDimensionLabel": "モデルディメンション", + "openAiCompatibleModelDimensionPlaceholder": "例:1536", + "openAiCompatibleModelDimensionDescription": "モデルの埋め込みディメンション(出力サイズ)。この値についてはプロバイダーのドキュメントを確認してください。一般的な値:384、768、1536、3072。", "modelLabel": "モデル", "selectModelPlaceholder": "モデルを選択", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrantキー:", - "startIndexingButton": "インデックス作成を開始", - "clearIndexDataButton": "インデックスデータをクリア", + "startIndexingButton": "開始", + "clearIndexDataButton": "インデックスクリア", "unsavedSettingsMessage": "インデックス作成プロセスを開始する前に設定を保存してください。", "clearDataDialog": { "title": "本当によろしいですか?", "description": "この操作は元に戻せません。コードベースのインデックスデータが完全に削除されます。", "cancelButton": "キャンセル", "confirmButton": "データをクリア" - } + }, + "description": "プロジェクトのセマンティック検索を有効にするためのコードベースインデックス設定を構成します。<0>詳細はこちら", + "statusTitle": "ステータス", + "settingsTitle": "インデックス設定", + "disabledMessage": "コードベースインデックスは現在無効になっています。グローバル設定で有効にしてインデックスオプションを構成してください。", + "embedderProviderLabel": "エンベッダープロバイダー", + "modelPlaceholder": "モデル名を入力", + "selectModel": "モデルを選択", + "ollamaBaseUrlLabel": "Ollama ベースURL", + "qdrantApiKeyLabel": "Qdrant APIキー", + "qdrantApiKeyPlaceholder": "Qdrant APIキーを入力(オプション)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "設定の保存に失敗しました", + "modelDimensions": "({{dimension}} 次元)", + "saveSuccess": "設定が正常に保存されました", + "saving": "保存中...", + "saveSettings": "保存", + "indexingStatuses": { + "standby": "スタンバイ", + "indexing": "インデックス中", + "indexed": "インデックス済み", + "error": "エラー" + }, + "close": "閉じる", + "advancedConfigLabel": "詳細設定", + "searchMinScoreLabel": "検索スコアのしきい値", + "searchMinScoreDescription": "検索結果に必要な最小類似度スコア(0.0-1.0)。値を低くするとより多くの結果が返されますが、関連性が低くなる可能性があります。値を高くすると返される結果は少なくなりますが、より関連性が高くなります。", + "searchMinScoreResetTooltip": "デフォルト値(0.4)にリセット", + "searchMaxResultsLabel": "最大検索結果数", + "searchMaxResultsDescription": "コードベースインデックスをクエリする際に返される検索結果の最大数。値を高くするとより多くのコンテキストが提供されますが、関連性の低い結果が含まれる可能性があります。", + "resetToDefault": "デフォルトにリセット" }, "autoApprove": { "description": "Rooが承認なしで自動的に操作を実行できるようにします。AIを完全に信頼し、関連するセキュリティリスクを理解している場合にのみ、これらの設定を有効にしてください。", @@ -110,6 +149,11 @@ "label": "サブタスク", "description": "承認なしでサブタスクの作成と完了を許可" }, + "followupQuestions": { + "label": "質問", + "description": "設定された時間が経過すると、フォローアップ質問の最初の提案回答を自動的に選択します", + "timeoutLabel": "最初の回答を自動選択するまでの待機時間" + }, "execute": { "label": "実行", "description": "承認なしで自動的に許可されたターミナルコマンドを実行", @@ -118,6 +162,10 @@ "commandPlaceholder": "コマンドプレフィックスを入力(例:'git ')", "addButton": "追加" }, + "updateTodoList": { + "label": "Todo", + "description": "承認なしで自動的にToDoリストを更新" + }, "apiRequestLimit": { "title": "最大リクエスト数", "description": "タスクを続行するための承認を求める前に、自動的にこの数のAPIリクエストを行います。", diff --git a/webview-ui/src/i18n/locales/ko/chat.json b/webview-ui/src/i18n/locales/ko/chat.json index b83c40057434..1f86dc8cf4bf 100644 --- a/webview-ui/src/i18n/locales/ko/chat.json +++ b/webview-ui/src/i18n/locales/ko/chat.json @@ -234,17 +234,19 @@ "tokens": "토큰" }, "followUpSuggest": { - "copyToInput": "입력창에 복사 (또는 Shift + 클릭)" + "copyToInput": "입력창에 복사 (또는 Shift + 클릭)", + "autoSelectCountdown": "{{count}}초 후 자동 선택", + "countdownDisplay": "{{count}}초" }, "announcement": { "title": "🎉 Roo Code {{version}} 출시", - "description": "Roo Code {{version}}은 사용자 피드백을 기반으로 중요한 새로운 기능과 개선사항을 제공합니다.", + "description": "Roo Code {{version}}은 개발 워크플로우를 향상시키는 강력한 새 기능과 중요한 개선사항을 제공합니다.", "whatsNew": "새로운 기능", - "feature1": "Roo 마켓플레이스 출시: 마켓플레이스가 이제 라이브입니다! 그 어느 때보다 쉽게 모드와 MCP를 발견하고 설치하세요.", - "feature2": "Gemini 2.5 모델: 새로운 Gemini 2.5 Pro, Flash, Flash Lite 모델 지원을 추가했습니다.", - "feature3": "Excel 파일 지원 및 기타: Excel (.xlsx) 파일 지원과 수많은 버그 수정 및 개선사항 추가!", + "feature1": "원클릭 작업 공유: 한 번의 클릭으로 동료 및 커뮤니티와 작업을 즉시 공유하세요.", + "feature2": "글로벌 .roo 디렉토리 지원: 글로벌 .roo 디렉토리에서 규칙과 구성을 로드하여 프로젝트 간 일관된 설정을 유지하세요.", + "feature3": "개선된 아키텍트에서 코드로의 전환: 아키텍트 모드에서의 계획부터 코드 모드에서의 구현까지 원활한 인수인계.", "hideButton": "공지 숨기기", - "detailsDiscussLinks": "DiscordReddit에서 더 자세한 정보를 확인하고 논의하세요 🚀" + "detailsDiscussLinks": "DiscordReddit에서 자세한 내용을 확인하고 토론에 참여하세요 🚀" }, "browser": { "rooWantsToUse": "Roo가 브라우저를 사용하고 싶어합니다:", @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo가 코드베이스에서 {{query}}을(를) 검색하고 싶어합니다:", "wantsToSearchWithPath": "Roo가 {{path}}에서 {{query}}을(를) 검색하고 싶어합니다:", - "didSearch": "{{query}}에 대한 검색 결과 {{count}}개 찾음:" + "didSearch": "{{query}}에 대한 검색 결과 {{count}}개 찾음:", + "resultTooltip": "유사도 점수: {{score}} (클릭하여 파일 열기)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/ko/common.json b/webview-ui/src/i18n/locales/ko/common.json index 276f2cb20b5c..4581982b2f26 100644 --- a/webview-ui/src/i18n/locales/ko/common.json +++ b/webview-ui/src/i18n/locales/ko/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "머메이드 다이어그램 생성 중...", "render_error": "다이어그램을 렌더링할 수 없음", + "fixing_syntax": "머메이드 구문 수정 중...", + "fix_syntax_button": "AI로 구문 수정하기", + "original_code": "원본 코드:", + "errors": { + "unknown_syntax": "알 수 없는 구문 오류", + "fix_timeout": "LLM 수정 요청 시간 초과", + "fix_failed": "LLM 수정 실패", + "fix_attempts": "{{attempts}}번의 시도 후 구문 수정 실패. 마지막 오류: {{error}}", + "no_fix_provided": "LLM이 수정을 제공하지 못함", + "fix_request_failed": "수정 요청 실패" + }, "buttons": { "zoom": "줌", "zoomIn": "확대", diff --git a/webview-ui/src/i18n/locales/ko/prompts.json b/webview-ui/src/i18n/locales/ko/prompts.json index 990ee67f03b1..2b7d25d620c1 100644 --- a/webview-ui/src/i18n/locales/ko/prompts.json +++ b/webview-ui/src/i18n/locales/ko/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "모드", "createNewMode": "새 모드 만들기", + "importMode": "모드 가져오기", + "noMatchFound": "모드를 찾을 수 없습니다", "editModesConfig": "모드 구성 편집", "editGlobalModes": "전역 모드 편집", "editProjectModes": "프로젝트 모드 편집 (.roomodes)", @@ -50,6 +52,28 @@ "description": "{{modeName}} 모드에 대한 특정 행동 지침을 추가하세요.", "loadFromFile": "{{mode}} 모드에 대한 사용자 지정 지침은 작업 공간의 .roo/rules-{{slug}}/ 폴더에서도 로드할 수 있습니다(.roorules-{{slug}}와 .clinerules-{{slug}}는 더 이상 사용되지 않으며 곧 작동을 중단합니다)." }, + "exportMode": { + "title": "모드 내보내기", + "description": "이 모드를 모든 규칙이 포함된 YAML 파일로 내보내어 다른 사람들과 쉽게 공유할 수 있습니다.", + "export": "모드 내보내기", + "exporting": "내보내는 중..." + }, + "importMode": { + "selectLevel": "이 모드를 가져올 위치를 선택하세요:", + "import": "가져오기", + "importing": "가져오는 중...", + "global": { + "label": "전역 수준", + "description": "모든 프로젝트에서 사용 가능합니다. 규칙은 사용자 지정 지침에 병합됩니다." + }, + "project": { + "label": "프로젝트 수준", + "description": "이 작업 공간에서만 사용할 수 있습니다. 내보낸 모드에 규칙 파일이 포함된 경우 .roo/rules-{slug}/ 폴더에 다시 생성됩니다." + } + }, + "advanced": { + "title": "고급" + }, "globalCustomInstructions": { "title": "모든 모드에 대한 사용자 지정 지침", "description": "이 지침은 모든 모드에 적용됩니다. 아래의 모드별 지침으로 향상될 수 있는 기본 동작 세트를 제공합니다. <0>더 알아보기", @@ -164,5 +188,13 @@ }, "deleteMode": "모드 삭제" }, - "allFiles": "모든 파일" + "allFiles": "모든 파일", + "deleteMode": { + "title": "모드 삭제", + "message": "\"{{modeName}}\" 모드를 삭제하시겠습니까?", + "rulesFolder": "이 모드에는 {{folderPath}}에 규칙 폴더가 있으며 함께 삭제됩니다.", + "descriptionNoRules": "이 사용자 정의 모드를 삭제하시겠습니까?", + "confirm": "삭제", + "cancel": "취소" + } } diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 6ae68428bddf..2ea16f783438 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "제공자 선택", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API 키:", + "geminiApiKeyPlaceholder": "Gemini API 키를 입력하세요", "openaiCompatibleProvider": "OpenAI 호환", - "openaiCompatibleBaseUrlLabel": "기본 URL:", - "openaiCompatibleApiKeyLabel": "API 키:", - "openaiCompatibleModelDimensionLabel": "임베딩 차원:", - "openaiCompatibleModelDimensionPlaceholder": "예: 1536", - "openaiCompatibleModelDimensionDescription": "모델의 임베딩 차원(출력 크기)입니다. 이 값에 대해서는 제공업체의 문서를 확인하세요. 일반적인 값: 384, 768, 1536, 3072.", - "openaiKeyLabel": "OpenAI 키:", + "openAiKeyLabel": "OpenAI API 키", + "openAiKeyPlaceholder": "OpenAI API 키를 입력하세요", + "openAiCompatibleBaseUrlLabel": "기본 URL", + "openAiCompatibleApiKeyLabel": "API 키", + "openAiCompatibleApiKeyPlaceholder": "API 키를 입력하세요", + "openAiCompatibleModelDimensionLabel": "임베딩 차원:", + "modelDimensionLabel": "모델 차원", + "openAiCompatibleModelDimensionPlaceholder": "예: 1536", + "openAiCompatibleModelDimensionDescription": "모델의 임베딩 차원(출력 크기)입니다. 이 값에 대해서는 제공업체의 문서를 확인하세요. 일반적인 값: 384, 768, 1536, 3072.", "modelLabel": "모델", "selectModelPlaceholder": "모델 선택", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant 키:", - "startIndexingButton": "인덱싱 시작", - "clearIndexDataButton": "인덱스 데이터 지우기", + "startIndexingButton": "시작", + "clearIndexDataButton": "인덱스 지우기", "unsavedSettingsMessage": "인덱싱 프로세스를 시작하기 전에 설정을 저장해 주세요.", "clearDataDialog": { "title": "확실합니까?", "description": "이 작업은 취소할 수 없습니다. 코드베이스 인덱스 데이터가 영구적으로 삭제됩니다.", "cancelButton": "취소", "confirmButton": "데이터 지우기" - } + }, + "description": "프로젝트의 시맨틱 검색을 활성화하기 위한 코드베이스 인덱싱 설정을 구성합니다. <0>자세히 알아보기", + "statusTitle": "상태", + "settingsTitle": "인덱싱 설정", + "disabledMessage": "코드베이스 인덱싱이 현재 비활성화되어 있습니다. 인덱싱 옵션을 구성하려면 전역 설정에서 활성화하세요.", + "embedderProviderLabel": "임베더 제공자", + "modelPlaceholder": "모델 이름을 입력하세요", + "selectModel": "모델 선택", + "ollamaBaseUrlLabel": "Ollama 기본 URL", + "qdrantApiKeyLabel": "Qdrant API 키", + "qdrantApiKeyPlaceholder": "Qdrant API 키를 입력하세요 (선택사항)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "설정을 저장하지 못했습니다", + "modelDimensions": "({{dimension}} 차원)", + "saveSuccess": "설정이 성공적으로 저장되었습니다", + "saving": "저장 중...", + "saveSettings": "저장", + "indexingStatuses": { + "standby": "대기", + "indexing": "인덱싱 중", + "indexed": "인덱싱됨", + "error": "오류" + }, + "close": "닫기", + "advancedConfigLabel": "고급 구성", + "searchMinScoreLabel": "검색 점수 임계값", + "searchMinScoreDescription": "검색 결과에 필요한 최소 유사도 점수(0.0-1.0). 값이 낮을수록 더 많은 결과가 반환되지만 관련성이 떨어질 수 있습니다. 값이 높을수록 결과는 적지만 관련성이 높은 결과가 반환됩니다.", + "searchMinScoreResetTooltip": "기본값(0.4)으로 재설정", + "searchMaxResultsLabel": "최대 검색 결과", + "searchMaxResultsDescription": "코드베이스 인덱스를 쿼리할 때 반환할 최대 검색 결과 수입니다. 값이 높을수록 더 많은 컨텍스트를 제공하지만 관련성이 낮은 결과가 포함될 수 있습니다.", + "resetToDefault": "기본값으로 재설정" }, "autoApprove": { "description": "Roo가 승인 없이 자동으로 작업을 수행할 수 있도록 허용합니다. AI를 완전히 신뢰하고 관련 보안 위험을 이해하는 경우에만 이러한 설정을 활성화하세요.", @@ -110,6 +149,11 @@ "label": "하위 작업", "description": "승인 없이 하위 작업 생성 및 완료 허용" }, + "followupQuestions": { + "label": "질문", + "description": "설정된 시간이 지나면 후속 질문에 대한 첫 번째 제안 답변을 자동으로 선택합니다", + "timeoutLabel": "첫 번째 답변을 자동 선택하기 전 대기 시간" + }, "execute": { "label": "실행", "description": "승인 없이 자동으로 허용된 터미널 명령 실행", @@ -118,6 +162,10 @@ "commandPlaceholder": "명령 접두사 입력(예: 'git ')", "addButton": "추가" }, + "updateTodoList": { + "label": "Todo", + "description": "승인 없이 자동으로 할 일 목록이 업데이트됩니다" + }, "apiRequestLimit": { "title": "최대 요청 수", "description": "작업을 계속하기 위한 승인을 요청하기 전에 자동으로 이 수의 API 요청을 수행합니다.", diff --git a/webview-ui/src/i18n/locales/nl/chat.json b/webview-ui/src/i18n/locales/nl/chat.json index 4b6877389d68..d228d7b0c287 100644 --- a/webview-ui/src/i18n/locales/nl/chat.json +++ b/webview-ui/src/i18n/locales/nl/chat.json @@ -225,13 +225,13 @@ }, "announcement": { "title": "🎉 Roo Code {{version}} uitgebracht", - "description": "Roo Code {{version}} brengt krachtige nieuwe functies en verbeteringen op basis van jouw feedback.", + "description": "Roo Code {{version}} brengt krachtige nieuwe functies en significante verbeteringen om je ontwikkelingsworkflow te verbeteren.", "whatsNew": "Wat is er nieuw", - "feature1": "Roo Marketplace Launch - De marketplace is nu live! Ontdek en installeer modi en MCP's makkelijker dan ooit tevoren.", - "feature2": "Gemini 2.5 Modellen - Ondersteuning toegevoegd voor nieuwe Gemini 2.5 Pro, Flash en Flash Lite modellen.", - "feature3": "Excel Bestandsondersteuning & Meer - Excel (.xlsx) bestandsondersteuning toegevoegd en talloze bugfixes en verbeteringen!", + "feature1": "1-Klik Taak Delen: Deel je taken direct met collega's en de community met slechts één klik.", + "feature2": "Globale .roo Directory Ondersteuning: Laad regels en configuraties vanuit een globale .roo directory voor consistente instellingen tussen projecten.", + "feature3": "Verbeterde Architect naar Code Overgangen: Soepele overdrachten van planning in Architect modus naar implementatie in Code modus.", "hideButton": "Aankondiging verbergen", - "detailsDiscussLinks": "Meer details en discussie in Discord en Reddit 🚀" + "detailsDiscussLinks": "Krijg meer details en doe mee aan discussies op Discord en Reddit 🚀" }, "reasoning": { "thinking": "Denkt na", @@ -244,7 +244,9 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "Kopiëren naar invoer (zelfde als shift + klik)" + "copyToInput": "Kopiëren naar invoer (zelfde als shift + klik)", + "autoSelectCountdown": "Automatische selectie in {{count}}s", + "countdownDisplay": "{{count}}s" }, "browser": { "rooWantsToUse": "Roo wil de browser gebruiken:", @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo wil de codebase doorzoeken op {{query}}:", "wantsToSearchWithPath": "Roo wil de codebase doorzoeken op {{query}} in {{path}}:", - "didSearch": "{{count}} resultaat/resultaten gevonden voor {{query}}:" + "didSearch": "{{count}} resultaat/resultaten gevonden voor {{query}}:", + "resultTooltip": "Gelijkenisscore: {{score}} (klik om bestand te openen)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/nl/common.json b/webview-ui/src/i18n/locales/nl/common.json index 012808e51aa1..b3fd4383819f 100644 --- a/webview-ui/src/i18n/locales/nl/common.json +++ b/webview-ui/src/i18n/locales/nl/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Mermaid-diagram genereren...", "render_error": "Kan diagram niet weergeven", + "fixing_syntax": "Mermaid-syntax corrigeren...", + "fix_syntax_button": "Syntax corrigeren met AI", + "original_code": "Originele code:", + "errors": { + "unknown_syntax": "Onbekende syntaxfout", + "fix_timeout": "LLM-correctieverzoek is verlopen", + "fix_failed": "LLM-correctie mislukt", + "fix_attempts": "Syntax kon niet worden gecorrigeerd na {{attempts}} pogingen. Laatste fout: {{error}}", + "no_fix_provided": "LLM kon geen correctie leveren", + "fix_request_failed": "Correctieverzoek mislukt" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Inzoomen", diff --git a/webview-ui/src/i18n/locales/nl/prompts.json b/webview-ui/src/i18n/locales/nl/prompts.json index 2aa09a5a1560..04c8eb070334 100644 --- a/webview-ui/src/i18n/locales/nl/prompts.json +++ b/webview-ui/src/i18n/locales/nl/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modi", "createNewMode": "Nieuwe modus aanmaken", + "importMode": "Modus importeren", + "noMatchFound": "Geen modi gevonden", "editModesConfig": "Modusconfiguratie bewerken", "editGlobalModes": "Globale modi bewerken", "editProjectModes": "Projectmodi bewerken (.roomodes)", @@ -50,6 +52,28 @@ "description": "Voeg gedragsrichtlijnen toe die specifiek zijn voor de modus {{modeName}}.", "loadFromFile": "Modusspecifieke instructies voor {{mode}} kunnen ook worden geladen uit de map .roo/rules-{{slug}}/ in je werkruimte (.roorules-{{slug}} en .clinerules-{{slug}} zijn verouderd en werken binnenkort niet meer)." }, + "exportMode": { + "title": "Modus exporteren", + "description": "Exporteer deze modus naar een YAML-bestand met alle regels inbegrepen voor eenvoudig delen met anderen.", + "export": "Modus exporteren", + "exporting": "Exporteren..." + }, + "importMode": { + "selectLevel": "Kies waar je deze modus wilt importeren:", + "import": "Importeren", + "importing": "Importeren...", + "global": { + "label": "Globaal niveau", + "description": "Beschikbaar in alle projecten. Regels worden samengevoegd in aangepaste instructies." + }, + "project": { + "label": "Projectniveau", + "description": "Alleen beschikbaar in deze werkruimte. Als de geëxporteerde modus regelbestanden bevatte, worden deze opnieuw gemaakt in de map .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Geavanceerd" + }, "globalCustomInstructions": { "title": "Aangepaste instructies voor alle modi", "description": "Deze instructies gelden voor alle modi. Ze bieden een basisset aan gedragingen die kunnen worden uitgebreid met modusspecifieke instructies hieronder. <0>Meer informatie", @@ -164,5 +188,13 @@ }, "deleteMode": "Modus verwijderen" }, - "allFiles": "alle bestanden" + "allFiles": "alle bestanden", + "deleteMode": { + "title": "Modus verwijderen", + "message": "Weet je zeker dat je de modus \"{{modeName}}\" wilt verwijderen?", + "rulesFolder": "Deze modus heeft een regelmap op {{folderPath}} die ook wordt verwijderd.", + "descriptionNoRules": "Weet je zeker dat je deze aangepaste modus wilt verwijderen?", + "confirm": "Verwijderen", + "cancel": "Annuleren" + } } diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index c6d1bb7992c7..92b00cf3ff78 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Selecteer provider", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API-sleutel:", + "geminiApiKeyPlaceholder": "Voer uw Gemini API-sleutel in", "openaiCompatibleProvider": "OpenAI-compatibel", - "openaiCompatibleBaseUrlLabel": "Basis-URL:", - "openaiCompatibleApiKeyLabel": "API-sleutel:", - "openaiCompatibleModelDimensionLabel": "Embedding Dimensie:", - "openaiCompatibleModelDimensionPlaceholder": "bijv., 1536", - "openaiCompatibleModelDimensionDescription": "De embedding dimensie (uitvoergrootte) voor uw model. Controleer de documentatie van uw provider voor deze waarde. Veelvoorkomende waarden: 384, 768, 1536, 3072.", - "openaiKeyLabel": "OpenAI-sleutel:", + "openAiKeyLabel": "OpenAI API-sleutel", + "openAiKeyPlaceholder": "Voer uw OpenAI API-sleutel in", + "openAiCompatibleBaseUrlLabel": "Basis-URL", + "openAiCompatibleApiKeyLabel": "API-sleutel", + "openAiCompatibleApiKeyPlaceholder": "Voer uw API-sleutel in", + "openAiCompatibleModelDimensionLabel": "Embedding Dimensie:", + "modelDimensionLabel": "Model Dimensie", + "openAiCompatibleModelDimensionPlaceholder": "bijv., 1536", + "openAiCompatibleModelDimensionDescription": "De embedding dimensie (uitvoergrootte) voor uw model. Controleer de documentatie van uw provider voor deze waarde. Veelvoorkomende waarden: 384, 768, 1536, 3072.", "modelLabel": "Model", "selectModelPlaceholder": "Selecteer model", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant-sleutel:", - "startIndexingButton": "Indexering starten", - "clearIndexDataButton": "Indexgegevens wissen", + "startIndexingButton": "Start", + "clearIndexDataButton": "Index wissen", "unsavedSettingsMessage": "Sla je instellingen op voordat je het indexeringsproces start.", "clearDataDialog": { "title": "Weet je het zeker?", "description": "Deze actie kan niet ongedaan worden gemaakt. Dit zal je codebase-indexgegevens permanent verwijderen.", "cancelButton": "Annuleren", "confirmButton": "Gegevens wissen" - } + }, + "description": "Configureer codebase-indexeringsinstellingen om semantisch zoeken voor je project in te schakelen. <0>Meer informatie", + "statusTitle": "Status", + "settingsTitle": "Indexeringsinstellingen", + "disabledMessage": "Codebase-indexering is momenteel uitgeschakeld. Schakel het in de algemene instellingen in om indexeringsopties te configureren.", + "embedderProviderLabel": "Embedder Provider", + "modelPlaceholder": "Voer modelnaam in", + "selectModel": "Selecteer een model", + "ollamaBaseUrlLabel": "Ollama Basis-URL", + "qdrantApiKeyLabel": "Qdrant API-sleutel", + "qdrantApiKeyPlaceholder": "Voer je Qdrant API-sleutel in (optioneel)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Kan instellingen niet opslaan", + "modelDimensions": "({{dimension}} dimensies)", + "saveSuccess": "Instellingen succesvol opgeslagen", + "saving": "Opslaan...", + "saveSettings": "Opslaan", + "indexingStatuses": { + "standby": "Stand-by", + "indexing": "Indexeren", + "indexed": "Geïndexeerd", + "error": "Fout" + }, + "close": "Sluiten", + "advancedConfigLabel": "Geavanceerde configuratie", + "searchMinScoreLabel": "Zoekscore drempel", + "searchMinScoreDescription": "Minimale overeenkomstscore (0.0-1.0) vereist voor zoekresultaten. Lagere waarden leveren meer resultaten op, maar zijn mogelijk minder relevant. Hogere waarden leveren minder, maar relevantere resultaten op.", + "searchMinScoreResetTooltip": "Reset naar standaardwaarde (0.4)", + "searchMaxResultsLabel": "Maximum Zoekresultaten", + "searchMaxResultsDescription": "Maximum aantal zoekresultaten dat wordt geretourneerd bij het doorzoeken van de codebase-index. Hogere waarden bieden meer context maar kunnen minder relevante resultaten bevatten.", + "resetToDefault": "Reset naar standaard" }, "autoApprove": { "description": "Sta Roo toe om automatisch handelingen uit te voeren zonder goedkeuring. Schakel deze instellingen alleen in als je de AI volledig vertrouwt en de bijbehorende beveiligingsrisico's begrijpt.", @@ -110,6 +149,11 @@ "label": "Subtaken", "description": "Subtaken aanmaken en afronden zonder goedkeuring" }, + "followupQuestions": { + "label": "Vraag", + "description": "Selecteer automatisch het eerste voorgestelde antwoord voor vervolgvragen na de geconfigureerde time-out", + "timeoutLabel": "Wachttijd voordat het eerste antwoord automatisch wordt geselecteerd" + }, "execute": { "label": "Uitvoeren", "description": "Automatisch toegestane terminalcommando's uitvoeren zonder goedkeuring", @@ -118,6 +162,10 @@ "commandPlaceholder": "Voer commando-prefix in (bijv. 'git ')", "addButton": "Toevoegen" }, + "updateTodoList": { + "label": "Todo", + "description": "De takenlijst wordt automatisch bijgewerkt zonder goedkeuring" + }, "apiRequestLimit": { "title": "Maximale verzoeken", "description": "Voer automatisch dit aantal API-verzoeken uit voordat om goedkeuring wordt gevraagd om door te gaan met de taak.", diff --git a/webview-ui/src/i18n/locales/pl/chat.json b/webview-ui/src/i18n/locales/pl/chat.json index fe1a47156a51..fdb39b085125 100644 --- a/webview-ui/src/i18n/locales/pl/chat.json +++ b/webview-ui/src/i18n/locales/pl/chat.json @@ -234,15 +234,17 @@ "tokens": "tokeny" }, "followUpSuggest": { - "copyToInput": "Kopiuj do pola wprowadzania (lub Shift + kliknięcie)" + "copyToInput": "Kopiuj do pola wprowadzania (lub Shift + kliknięcie)", + "autoSelectCountdown": "Automatyczny wybór za {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Roo Code {{version}} wydany", - "description": "Roo Code {{version}} przynosi potężne nowe funkcje i ulepszenia na podstawie Twoich opinii.", + "description": "Roo Code {{version}} wprowadza potężne nowe funkcje i znaczące ulepszenia, aby ulepszyć Twój przepływ pracy programistycznej.", "whatsNew": "Co nowego", - "feature1": "Uruchomienie Roo Marketplace - Marketplace jest już dostępny! Odkrywaj i instaluj tryby oraz MCP łatwiej niż kiedykolwiek wcześniej.", - "feature2": "Modele Gemini 2.5 - Dodano wsparcie dla nowych modeli Gemini 2.5 Pro, Flash i Flash Lite.", - "feature3": "Wsparcie dla plików Excel i więcej - Dodano wsparcie dla plików Excel (.xlsx) oraz liczne poprawki błędów i ulepszenia!", + "feature1": "Udostępnianie zadań jednym kliknięciem: Natychmiast udostępniaj swoje zadania współpracownikom i społeczności jednym kliknięciem.", + "feature2": "Wsparcie globalnego katalogu .roo: Ładuj reguły i konfiguracje z globalnego katalogu .roo dla spójnych ustawień między projektami.", + "feature3": "Ulepszone przejścia z Architekta do Kodu: Płynne transfery z planowania w trybie Architekta do implementacji w trybie Kodu.", "hideButton": "Ukryj ogłoszenie", "detailsDiscussLinks": "Uzyskaj więcej szczegółów i dołącz do dyskusji na Discord i Reddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo chce przeszukać bazę kodu w poszukiwaniu {{query}}:", "wantsToSearchWithPath": "Roo chce przeszukać bazę kodu w poszukiwaniu {{query}} w {{path}}:", - "didSearch": "Znaleziono {{count}} wynik(ów) dla {{query}}:" + "didSearch": "Znaleziono {{count}} wynik(ów) dla {{query}}:", + "resultTooltip": "Wynik podobieństwa: {{score}} (kliknij, aby otworzyć plik)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/pl/common.json b/webview-ui/src/i18n/locales/pl/common.json index c72b046c427f..864b9eca1de2 100644 --- a/webview-ui/src/i18n/locales/pl/common.json +++ b/webview-ui/src/i18n/locales/pl/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Generowanie diagramu mermaid...", "render_error": "Nie można renderować diagramu", + "fixing_syntax": "Naprawianie składni Mermaid...", + "fix_syntax_button": "Napraw składnię za pomocą AI", + "original_code": "Oryginalny kod:", + "errors": { + "unknown_syntax": "Nieznany błąd składni", + "fix_timeout": "Upłynął limit czasu żądania naprawy LLM", + "fix_failed": "Naprawa LLM nie powiodła się", + "fix_attempts": "Nie udało się naprawić składni po {{attempts}} próbach. Ostatni błąd: {{error}}", + "no_fix_provided": "LLM nie dostarczył naprawy", + "fix_request_failed": "Żądanie naprawy nie powiodło się" + }, "buttons": { "zoom": "Powiększenie", "zoomIn": "Powiększ", diff --git a/webview-ui/src/i18n/locales/pl/prompts.json b/webview-ui/src/i18n/locales/pl/prompts.json index b4a1bdcc50e4..e769750bc3ca 100644 --- a/webview-ui/src/i18n/locales/pl/prompts.json +++ b/webview-ui/src/i18n/locales/pl/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Tryby", "createNewMode": "Utwórz nowy tryb", + "importMode": "Importuj tryb", + "noMatchFound": "Nie znaleziono trybów", "editModesConfig": "Edytuj konfigurację trybów", "editGlobalModes": "Edytuj tryby globalne", "editProjectModes": "Edytuj tryby projektu (.roomodes)", @@ -50,6 +52,28 @@ "description": "Dodaj wytyczne dotyczące zachowania specyficzne dla trybu {{modeName}}.", "loadFromFile": "Niestandardowe instrukcje dla trybu {{mode}} mogą być również ładowane z folderu .roo/rules-{{slug}}/ w Twoim obszarze roboczym (.roorules-{{slug}} i .clinerules-{{slug}} są przestarzałe i wkrótce przestaną działać)." }, + "exportMode": { + "title": "Eksportuj tryb", + "description": "Eksportuj ten tryb do pliku YAML ze wszystkimi regułami w celu łatwego udostępniania innym.", + "export": "Eksportuj tryb", + "exporting": "Eksportowanie..." + }, + "importMode": { + "selectLevel": "Wybierz, gdzie zaimportować ten tryb:", + "import": "Importuj", + "importing": "Importowanie...", + "global": { + "label": "Poziom globalny", + "description": "Dostępne we wszystkich projektach. Reguły zostaną scalone z niestandardowymi instrukcjami." + }, + "project": { + "label": "Poziom projektu", + "description": "Dostępne tylko w tym obszarze roboczym. Jeśli wyeksportowany tryb zawierał pliki reguł, zostaną one odtworzone w folderze .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Zaawansowane" + }, "globalCustomInstructions": { "title": "Niestandardowe instrukcje dla wszystkich trybów", "description": "Te instrukcje dotyczą wszystkich trybów. Zapewniają podstawowy zestaw zachowań, które mogą być rozszerzone przez instrukcje specyficzne dla trybów poniżej. <0>Dowiedz się więcej", @@ -164,5 +188,13 @@ }, "deleteMode": "Usuń tryb" }, - "allFiles": "wszystkie pliki" + "allFiles": "wszystkie pliki", + "deleteMode": { + "title": "Usuń tryb", + "message": "Czy na pewno chcesz usunąć tryb \"{{modeName}}\"?", + "rulesFolder": "Ten tryb ma folder z regułami w {{folderPath}}, który również zostanie usunięty.", + "descriptionNoRules": "Czy na pewno chcesz usunąć ten niestandardowy tryb?", + "confirm": "Usuń", + "cancel": "Anuluj" + } } diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index b702d7b5a58f..34e0ac52ef73 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Wybierz dostawcę", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Klucz API:", + "geminiApiKeyPlaceholder": "Wprowadź swój klucz API Gemini", "openaiCompatibleProvider": "Kompatybilny z OpenAI", - "openaiCompatibleBaseUrlLabel": "Bazowy URL:", - "openaiCompatibleApiKeyLabel": "Klucz API:", - "openaiCompatibleModelDimensionLabel": "Wymiar Embeddingu:", - "openaiCompatibleModelDimensionPlaceholder": "np., 1536", - "openaiCompatibleModelDimensionDescription": "Wymiar embeddingu (rozmiar wyjściowy) dla twojego modelu. Sprawdź dokumentację swojego dostawcy, aby uzyskać tę wartość. Typowe wartości: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Klucz OpenAI:", + "openAiKeyLabel": "Klucz API OpenAI", + "openAiKeyPlaceholder": "Wprowadź swój klucz API OpenAI", + "openAiCompatibleBaseUrlLabel": "Bazowy URL", + "openAiCompatibleApiKeyLabel": "Klucz API", + "openAiCompatibleApiKeyPlaceholder": "Wprowadź swój klucz API", + "openAiCompatibleModelDimensionLabel": "Wymiar Embeddingu:", + "modelDimensionLabel": "Wymiar modelu", + "openAiCompatibleModelDimensionPlaceholder": "np., 1536", + "openAiCompatibleModelDimensionDescription": "Wymiar embeddingu (rozmiar wyjściowy) dla twojego modelu. Sprawdź dokumentację swojego dostawcy, aby uzyskać tę wartość. Typowe wartości: 384, 768, 1536, 3072.", "modelLabel": "Model", "selectModelPlaceholder": "Wybierz model", "ollamaUrlLabel": "URL Ollama:", "qdrantUrlLabel": "URL Qdrant", "qdrantKeyLabel": "Klucz Qdrant:", - "startIndexingButton": "Rozpocznij indeksowanie", - "clearIndexDataButton": "Wyczyść dane indeksu", + "startIndexingButton": "Rozpocznij", + "clearIndexDataButton": "Wyczyść indeks", "unsavedSettingsMessage": "Zapisz swoje ustawienia przed rozpoczęciem procesu indeksowania.", "clearDataDialog": { "title": "Czy jesteś pewien?", "description": "Tej akcji nie można cofnąć. Spowoduje to trwałe usunięcie danych indeksu Twojego kodu.", "cancelButton": "Anuluj", "confirmButton": "Wyczyść dane" - } + }, + "description": "Skonfiguruj ustawienia indeksowania bazy kodu, aby włączyć wyszukiwanie semantyczne w swoim projekcie. <0>Dowiedz się więcej", + "statusTitle": "Status", + "settingsTitle": "Ustawienia indeksowania", + "disabledMessage": "Indeksowanie bazy kodu jest obecnie wyłączone. Włącz je w ustawieniach globalnych, aby skonfigurować opcje indeksowania.", + "embedderProviderLabel": "Dostawca Embeddera", + "modelPlaceholder": "Wprowadź nazwę modelu", + "selectModel": "Wybierz model", + "ollamaBaseUrlLabel": "Bazowy URL Ollama", + "qdrantApiKeyLabel": "Klucz API Qdrant", + "qdrantApiKeyPlaceholder": "Wprowadź swój klucz API Qdrant (opcjonalnie)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Nie udało się zapisać ustawień", + "modelDimensions": "({{dimension}} wymiarów)", + "saveSuccess": "Ustawienia zapisane pomyślnie", + "saving": "Zapisywanie...", + "saveSettings": "Zapisz", + "indexingStatuses": { + "standby": "Gotowość", + "indexing": "Indeksowanie", + "indexed": "Zaindeksowane", + "error": "Błąd" + }, + "close": "Zamknij", + "advancedConfigLabel": "Konfiguracja zaawansowana", + "searchMinScoreLabel": "Próg wyniku wyszukiwania", + "searchMinScoreDescription": "Minimalny wynik podobieństwa (0.0-1.0) wymagany dla wyników wyszukiwania. Niższe wartości zwracają więcej wyników, ale mogą być mniej trafne. Wyższe wartości zwracają mniej wyników, ale bardziej trafnych.", + "searchMinScoreResetTooltip": "Zresetuj do wartości domyślnej (0.4)", + "searchMaxResultsLabel": "Maksymalna liczba wyników wyszukiwania", + "searchMaxResultsDescription": "Maksymalna liczba wyników wyszukiwania zwracanych podczas zapytania do indeksu bazy kodu. Wyższe wartości zapewniają więcej kontekstu, ale mogą zawierać mniej istotne wyniki.", + "resetToDefault": "Przywróć domyślne" }, "autoApprove": { "description": "Pozwól Roo na automatyczne wykonywanie operacji bez wymagania zatwierdzenia. Włącz te ustawienia tylko jeśli w pełni ufasz AI i rozumiesz związane z tym zagrożenia bezpieczeństwa.", @@ -110,6 +149,11 @@ "label": "Podzadania", "description": "Zezwalaj na tworzenie i ukończenie podzadań bez konieczności zatwierdzania" }, + "followupQuestions": { + "label": "Pytanie", + "description": "Automatycznie wybierz pierwszą sugerowaną odpowiedź na pytania uzupełniające po skonfigurowanym limicie czasu", + "timeoutLabel": "Czas oczekiwania przed automatycznym wybraniem pierwszej odpowiedzi" + }, "execute": { "label": "Wykonaj", "description": "Automatycznie wykonuj dozwolone polecenia terminala bez konieczności zatwierdzania", @@ -118,6 +162,10 @@ "commandPlaceholder": "Wprowadź prefiks polecenia (np. 'git ')", "addButton": "Dodaj" }, + "updateTodoList": { + "label": "Todo", + "description": "Lista zadań jest automatycznie aktualizowana bez zatwierdzenia" + }, "apiRequestLimit": { "title": "Maksymalna liczba żądań", "description": "Automatycznie wykonaj tyle żądań API przed poproszeniem o zgodę na kontynuowanie zadania.", diff --git a/webview-ui/src/i18n/locales/pt-BR/chat.json b/webview-ui/src/i18n/locales/pt-BR/chat.json index f8dd3542c0a6..b37879f2f088 100644 --- a/webview-ui/src/i18n/locales/pt-BR/chat.json +++ b/webview-ui/src/i18n/locales/pt-BR/chat.json @@ -234,15 +234,17 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "Copiar para entrada (ou Shift + clique)" + "copyToInput": "Copiar para entrada (ou Shift + clique)", + "autoSelectCountdown": "Seleção automática em {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Roo Code {{version}} Lançado", - "description": "Roo Code {{version}} traz importantes novos recursos e melhorias baseados no seu feedback.", + "description": "Roo Code {{version}} traz novos recursos poderosos e melhorias significativas para aprimorar seu fluxo de trabalho de desenvolvimento.", "whatsNew": "O que há de novo", - "feature1": "Lançamento do Marketplace Roo: O marketplace está agora no ar! Descubra e instale modos e MCPs mais facilmente do que nunca.", - "feature2": "Modelos Gemini 2.5: Adicionado suporte para novos modelos Gemini 2.5 Pro, Flash e Flash Lite.", - "feature3": "Suporte a Arquivos Excel e Mais: Adicionado suporte a arquivos Excel (.xlsx) e numerosas correções de bugs e melhorias!", + "feature1": "Compartilhamento de Tarefas com 1 Clique: Compartilhe instantaneamente suas tarefas com colegas e a comunidade com apenas um clique.", + "feature2": "Suporte a Diretório Global .roo: Carregue regras e configurações de um diretório global .roo para configurações consistentes entre projetos.", + "feature3": "Transições Aprimoradas de Arquiteto para Código: Transferências suaves do planejamento no modo Arquiteto para implementação no modo Código.", "hideButton": "Ocultar anúncio", "detailsDiscussLinks": "Obtenha mais detalhes e participe da discussão no Discord e Reddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo quer pesquisar na base de código por {{query}}:", "wantsToSearchWithPath": "Roo quer pesquisar na base de código por {{query}} em {{path}}:", - "didSearch": "Encontrado {{count}} resultado(s) para {{query}}:" + "didSearch": "Encontrado {{count}} resultado(s) para {{query}}:", + "resultTooltip": "Pontuação de similaridade: {{score}} (clique para abrir o arquivo)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/pt-BR/common.json b/webview-ui/src/i18n/locales/pt-BR/common.json index a911b2366f75..86ac454e8a88 100644 --- a/webview-ui/src/i18n/locales/pt-BR/common.json +++ b/webview-ui/src/i18n/locales/pt-BR/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Gerando diagrama mermaid...", "render_error": "Não foi possível renderizar o diagrama", + "fixing_syntax": "Corrigindo sintaxe do Mermaid...", + "fix_syntax_button": "Corrigir sintaxe com IA", + "original_code": "Código original:", + "errors": { + "unknown_syntax": "Erro de sintaxe desconhecido", + "fix_timeout": "Tempo limite da solicitação de correção LLM esgotado", + "fix_failed": "Falha na correção LLM", + "fix_attempts": "Falha ao corrigir sintaxe após {{attempts}} tentativas. Último erro: {{error}}", + "no_fix_provided": "LLM falhou em fornecer uma correção", + "fix_request_failed": "Falha na solicitação de correção" + }, "buttons": { "zoom": "Zoom", "zoomIn": "Ampliar", diff --git a/webview-ui/src/i18n/locales/pt-BR/prompts.json b/webview-ui/src/i18n/locales/pt-BR/prompts.json index c2a88d4eaaa0..cce3e7a23a75 100644 --- a/webview-ui/src/i18n/locales/pt-BR/prompts.json +++ b/webview-ui/src/i18n/locales/pt-BR/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modos", "createNewMode": "Criar novo modo", + "importMode": "Importar modo", + "noMatchFound": "Nenhum modo encontrado", "editModesConfig": "Editar configuração de modos", "editGlobalModes": "Editar modos globais", "editProjectModes": "Editar modos do projeto (.roomodes)", @@ -50,6 +52,28 @@ "description": "Adicione diretrizes comportamentais específicas para o modo {{modeName}}.", "loadFromFile": "Instruções personalizadas específicas para o modo {{mode}} também podem ser carregadas da pasta .roo/rules-{{slug}}/ no seu espaço de trabalho (.roorules-{{slug}} e .clinerules-{{slug}} estão obsoletos e deixarão de funcionar em breve)." }, + "exportMode": { + "title": "Exportar modo", + "description": "Exporte este modo para um arquivo YAML com todas as regras incluídas para compartilhar facilmente com outros.", + "export": "Exportar modo", + "exporting": "Exportando..." + }, + "importMode": { + "selectLevel": "Escolha onde importar este modo:", + "import": "Importar", + "importing": "Importando...", + "global": { + "label": "Nível global", + "description": "Disponível em todos os projetos. As regras serão mescladas nas instruções personalizadas." + }, + "project": { + "label": "Nível do projeto", + "description": "Disponível apenas neste espaço de trabalho. Se o modo exportado continha arquivos de regras, eles serão recriados na pasta .roo/rules-{slug}/." + } + }, + "advanced": { + "title": "Avançado" + }, "globalCustomInstructions": { "title": "Instruções personalizadas para todos os modos", "description": "Estas instruções se aplicam a todos os modos. Elas fornecem um conjunto base de comportamentos que podem ser aprimorados por instruções específicas do modo abaixo. <0>Saiba mais", @@ -164,5 +188,13 @@ }, "deleteMode": "Excluir modo" }, - "allFiles": "todos os arquivos" + "allFiles": "todos os arquivos", + "deleteMode": { + "title": "Excluir Modo", + "message": "Tem certeza de que deseja excluir o modo \"{{modeName}}\"?", + "rulesFolder": "Este modo tem uma pasta de regras em {{folderPath}} que também será excluída.", + "descriptionNoRules": "Tem certeza de que deseja excluir este modo personalizado?", + "confirm": "Excluir", + "cancel": "Cancelar" + } } diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index be7bfe2d0d1f..e696ab0f77bf 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Selecionar provedor", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Chave de API:", + "geminiApiKeyPlaceholder": "Digite sua chave de API do Gemini", "openaiCompatibleProvider": "Compatível com OpenAI", - "openaiCompatibleBaseUrlLabel": "URL Base:", - "openaiCompatibleApiKeyLabel": "Chave de API:", - "openaiCompatibleModelDimensionLabel": "Dimensão de Embedding:", - "openaiCompatibleModelDimensionPlaceholder": "ex., 1536", - "openaiCompatibleModelDimensionDescription": "A dimensão de embedding (tamanho de saída) para seu modelo. Verifique a documentação do seu provedor para este valor. Valores comuns: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Chave OpenAI:", + "openAiKeyLabel": "Chave de API OpenAI", + "openAiKeyPlaceholder": "Digite sua chave de API OpenAI", + "openAiCompatibleBaseUrlLabel": "URL Base", + "openAiCompatibleApiKeyLabel": "Chave de API", + "openAiCompatibleApiKeyPlaceholder": "Digite sua chave de API", + "openAiCompatibleModelDimensionLabel": "Dimensão de Embedding:", + "modelDimensionLabel": "Dimensão do Modelo", + "openAiCompatibleModelDimensionPlaceholder": "ex., 1536", + "openAiCompatibleModelDimensionDescription": "A dimensão de embedding (tamanho de saída) para seu modelo. Verifique a documentação do seu provedor para este valor. Valores comuns: 384, 768, 1536, 3072.", "modelLabel": "Modelo", "selectModelPlaceholder": "Selecionar modelo", "ollamaUrlLabel": "URL Ollama:", "qdrantUrlLabel": "URL Qdrant", "qdrantKeyLabel": "Chave Qdrant:", - "startIndexingButton": "Iniciar Indexação", - "clearIndexDataButton": "Limpar Dados de Índice", + "startIndexingButton": "Iniciar", + "clearIndexDataButton": "Limpar Índice", "unsavedSettingsMessage": "Por favor, salve suas configurações antes de iniciar o processo de indexação.", "clearDataDialog": { "title": "Tem certeza?", "description": "Esta ação não pode ser desfeita. Isso excluirá permanentemente os dados de índice da sua base de código.", "cancelButton": "Cancelar", "confirmButton": "Limpar Dados" - } + }, + "description": "Configure as configurações de indexação da base de código para habilitar a pesquisa semântica do seu projeto. <0>Saiba mais", + "statusTitle": "Status", + "settingsTitle": "Configurações de Indexação", + "disabledMessage": "A indexação da base de código está atualmente desativada. Ative-a nas configurações globais para configurar as opções de indexação.", + "embedderProviderLabel": "Provedor de Embedder", + "modelPlaceholder": "Insira o nome do modelo", + "selectModel": "Selecione um modelo", + "ollamaBaseUrlLabel": "URL Base do Ollama", + "qdrantApiKeyLabel": "Chave da API Qdrant", + "qdrantApiKeyPlaceholder": "Insira sua chave da API Qdrant (opcional)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Falha ao salvar configurações", + "modelDimensions": "({{dimension}} dimensões)", + "saveSuccess": "Configurações salvas com sucesso", + "saving": "Salvando...", + "saveSettings": "Salvar", + "indexingStatuses": { + "standby": "Em espera", + "indexing": "Indexando", + "indexed": "Indexado", + "error": "Erro" + }, + "close": "Fechar", + "advancedConfigLabel": "Configuração Avançada", + "searchMinScoreLabel": "Limite de pontuação de busca", + "searchMinScoreDescription": "Pontuação mínima de similaridade (0.0-1.0) necessária para os resultados da busca. Valores mais baixos retornam mais resultados, mas podem ser menos relevantes. Valores mais altos retornam menos resultados, mas mais relevantes.", + "searchMinScoreResetTooltip": "Redefinir para o valor padrão (0.4)", + "searchMaxResultsLabel": "Resultados máximos de busca", + "searchMaxResultsDescription": "Número máximo de resultados de busca a retornar ao consultar o índice de código. Valores mais altos fornecem mais contexto, mas podem incluir resultados menos relevantes.", + "resetToDefault": "Redefinir para o padrão" }, "autoApprove": { "description": "Permitir que o Roo realize operações automaticamente sem exigir aprovação. Ative essas configurações apenas se confiar totalmente na IA e compreender os riscos de segurança associados.", @@ -110,6 +149,11 @@ "label": "Subtarefas", "description": "Permitir a criação e conclusão de subtarefas sem exigir aprovação" }, + "followupQuestions": { + "label": "Pergunta", + "description": "Selecionar automaticamente a primeira resposta sugerida para perguntas de acompanhamento após o tempo limite configurado", + "timeoutLabel": "Tempo de espera antes de selecionar automaticamente a primeira resposta" + }, "execute": { "label": "Executar", "description": "Executar automaticamente comandos de terminal permitidos sem exigir aprovação", @@ -118,6 +162,10 @@ "commandPlaceholder": "Digite o prefixo do comando (ex. 'git ')", "addButton": "Adicionar" }, + "updateTodoList": { + "label": "Todo", + "description": "A lista de tarefas é atualizada automaticamente sem aprovação" + }, "apiRequestLimit": { "title": "Máximo de Solicitações", "description": "Fazer automaticamente este número de requisições à API antes de pedir aprovação para continuar com a tarefa.", diff --git a/webview-ui/src/i18n/locales/ru/chat.json b/webview-ui/src/i18n/locales/ru/chat.json index b07d8d49f780..5865e41a539f 100644 --- a/webview-ui/src/i18n/locales/ru/chat.json +++ b/webview-ui/src/i18n/locales/ru/chat.json @@ -225,11 +225,11 @@ }, "announcement": { "title": "🎉 Выпущен Roo Code {{version}}", - "description": "Roo Code {{version}} приносит важные новые функции и улучшения на основе ваших отзывов.", + "description": "Roo Code {{version}} приносит мощные новые функции и значительные улучшения для совершенствования вашего рабочего процесса разработки.", "whatsNew": "Что нового", - "feature1": "Запуск Roo Marketplace: Маркетплейс теперь в сети! Открывайте и устанавливайте режимы и MCP проще, чем когда-либо.", - "feature2": "Модели Gemini 2.5: Добавлена поддержка новых моделей Gemini 2.5 Pro, Flash и Flash Lite.", - "feature3": "Поддержка файлов Excel и многое другое: Добавлена поддержка файлов Excel (.xlsx) и множество исправлений ошибок и улучшений!", + "feature1": "Обмен задачами в 1 клик: Мгновенно делитесь своими задачами с коллегами и сообществом одним кликом.", + "feature2": "Поддержка глобального каталога .roo: Загружайте правила и конфигурации из глобального каталога .roo для согласованных настроек между проектами.", + "feature3": "Улучшенные переходы от Архитектора к Коду: Плавные переходы от планирования в режиме Архитектора к реализации в режиме Кода.", "hideButton": "Скрыть объявление", "detailsDiscussLinks": "Подробнее и обсуждение в Discord и Reddit 🚀" }, @@ -244,7 +244,9 @@ "tokens": "токены" }, "followUpSuggest": { - "copyToInput": "Скопировать во ввод (то же, что shift + клик)" + "copyToInput": "Скопировать во ввод (то же, что shift + клик)", + "autoSelectCountdown": "Автовыбор через {{count}}с", + "countdownDisplay": "{{count}}с" }, "browser": { "rooWantsToUse": "Roo хочет использовать браузер:", @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo хочет выполнить поиск в кодовой базе по {{query}}:", "wantsToSearchWithPath": "Roo хочет выполнить поиск в кодовой базе по {{query}} в {{path}}:", - "didSearch": "Найдено {{count}} результат(ов) для {{query}}:" + "didSearch": "Найдено {{count}} результат(ов) для {{query}}:", + "resultTooltip": "Оценка схожести: {{score}} (нажмите, чтобы открыть файл)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/ru/common.json b/webview-ui/src/i18n/locales/ru/common.json index e68899a2dbc2..78678c5c3c8a 100644 --- a/webview-ui/src/i18n/locales/ru/common.json +++ b/webview-ui/src/i18n/locales/ru/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Создание диаграммы mermaid...", "render_error": "Не удалось отобразить диаграмму", + "fixing_syntax": "Исправление синтаксиса Mermaid...", + "fix_syntax_button": "Исправить синтаксис с помощью ИИ", + "original_code": "Исходный код:", + "errors": { + "unknown_syntax": "Неизвестная ошибка синтаксиса", + "fix_timeout": "Истекло время ожидания запроса исправления LLM", + "fix_failed": "Не удалось исправить с помощью LLM", + "fix_attempts": "Не удалось исправить синтаксис после {{attempts}} попыток. Последняя ошибка: {{error}}", + "no_fix_provided": "LLM не смог предоставить исправление", + "fix_request_failed": "Запрос на исправление не выполнен" + }, "buttons": { "zoom": "Масштаб", "zoomIn": "Увеличить", diff --git a/webview-ui/src/i18n/locales/ru/prompts.json b/webview-ui/src/i18n/locales/ru/prompts.json index 07e9f91db821..29be2a2fe057 100644 --- a/webview-ui/src/i18n/locales/ru/prompts.json +++ b/webview-ui/src/i18n/locales/ru/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Режимы", "createNewMode": "Создать новый режим", + "importMode": "Импортировать режим", + "noMatchFound": "Режимы не найдены", "editModesConfig": "Редактировать конфигурацию режимов", "editGlobalModes": "Редактировать глобальные режимы", "editProjectModes": "Редактировать режимы проекта (.roomodes)", @@ -50,11 +52,30 @@ "description": "Добавьте рекомендации по поведению, специфичные для режима {{modeName}}.", "loadFromFile": "Пользовательские инструкции для режима {{mode}} также можно загрузить из папки .roo/rules-{{slug}}/ в вашем рабочем пространстве (.roorules-{{slug}} и .clinerules-{{slug}} устарели и скоро перестанут работать)." }, + "exportMode": { + "title": "Экспортировать режим", + "description": "Экспортировать этот режим в файл YAML со всеми включенными правилами для удобного обмена с другими.", + "export": "Экспортировать режим", + "exporting": "Экспорт..." + }, "globalCustomInstructions": { "title": "Пользовательские инструкции для всех режимов", "description": "Эти инструкции применяются ко всем режимам. Они задают базовое поведение, которое можно расширить с помощью инструкций ниже. <0>Узнать больше", "loadFromFile": "Инструкции также можно загрузить из папки .roo/rules/ в вашем рабочем пространстве (.roorules и .clinerules устарели и скоро перестанут работать)." }, + "importMode": { + "selectLevel": "Выберите, куда импортировать этот режим:", + "import": "Импорт", + "importing": "Импортирование...", + "global": { + "label": "Глобальный уровень", + "description": "Доступно во всех проектах. Правила будут объединены с пользовательскими инструкциями." + }, + "project": { + "label": "Уровень проекта", + "description": "Доступно только в этом рабочем пространстве. Если экспортированный режим содержал файлы правил, они будут воссозданы в папке .roo/rules-{slug}/." + } + }, "systemPrompt": { "preview": "Предпросмотр системного промпта", "copy": "Скопировать системный промпт в буфер обмена", @@ -164,5 +185,16 @@ }, "deleteMode": "Удалить режим" }, - "allFiles": "все файлы" + "allFiles": "все файлы", + "advanced": { + "title": "Дополнительно" + }, + "deleteMode": { + "title": "Удалить режим", + "message": "Вы уверены, что хотите удалить режим \"{{modeName}}\"?", + "rulesFolder": "У этого режима есть папка правил по адресу {{folderPath}}, которая также будет удалена.", + "descriptionNoRules": "Вы уверены, что хотите удалить этот пользовательский режим?", + "confirm": "Удалить", + "cancel": "Отмена" + } } diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index b0693f453255..3e6a7a9d769b 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Выберите провайдера", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Ключ API:", + "geminiApiKeyPlaceholder": "Введите свой API-ключ Gemini", "openaiCompatibleProvider": "OpenAI-совместимый", - "openaiCompatibleBaseUrlLabel": "Базовый URL:", - "openaiCompatibleApiKeyLabel": "Ключ API:", - "openaiCompatibleModelDimensionLabel": "Размерность эмбеддинга:", - "openaiCompatibleModelDimensionPlaceholder": "напр., 1536", - "openaiCompatibleModelDimensionDescription": "Размерность эмбеддинга (размер выходных данных) для вашей модели. Проверьте документацию вашего провайдера для этого значения. Распространенные значения: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Ключ OpenAI:", + "openAiKeyLabel": "Ключ API OpenAI", + "openAiKeyPlaceholder": "Введите ваш ключ API OpenAI", + "openAiCompatibleBaseUrlLabel": "Базовый URL", + "openAiCompatibleApiKeyLabel": "Ключ API", + "openAiCompatibleApiKeyPlaceholder": "Введите ваш ключ API", + "openAiCompatibleModelDimensionLabel": "Размерность эмбеддинга:", + "modelDimensionLabel": "Размерность модели", + "openAiCompatibleModelDimensionPlaceholder": "напр., 1536", + "openAiCompatibleModelDimensionDescription": "Размерность эмбеддинга (размер выходных данных) для вашей модели. Проверьте документацию вашего провайдера для этого значения. Распространенные значения: 384, 768, 1536, 3072.", "modelLabel": "Модель", "selectModelPlaceholder": "Выберите модель", "ollamaUrlLabel": "URL Ollama:", "qdrantUrlLabel": "URL Qdrant", "qdrantKeyLabel": "Ключ Qdrant:", - "startIndexingButton": "Начать индексацию", - "clearIndexDataButton": "Очистить данные индекса", + "startIndexingButton": "Начать", + "clearIndexDataButton": "Очистить индекс", "unsavedSettingsMessage": "Пожалуйста, сохрани настройки перед запуском процесса индексации.", "clearDataDialog": { "title": "Вы уверены?", "description": "Это действие нельзя отменить. Оно навсегда удалит данные индекса вашей кодовой базы.", "cancelButton": "Отмена", "confirmButton": "Очистить данные" - } + }, + "description": "Настройте параметры индексации кодовой базы для включения семантического поиска в вашем проекте. <0>Узнать больше", + "statusTitle": "Статус", + "settingsTitle": "Настройки индексации", + "disabledMessage": "Индексация кодовой базы в настоящее время отключена. Включите ее в глобальных настройках для настройки параметров индексации.", + "embedderProviderLabel": "Провайдер эмбеддера", + "modelPlaceholder": "Введите название модели", + "selectModel": "Выберите модель", + "ollamaBaseUrlLabel": "Базовый URL Ollama", + "qdrantApiKeyLabel": "API-ключ Qdrant", + "qdrantApiKeyPlaceholder": "Введите ваш API-ключ Qdrant (необязательно)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Не удалось сохранить настройки", + "modelDimensions": "({{dimension}} измерений)", + "saveSuccess": "Настройки успешно сохранены", + "saving": "Сохранение...", + "saveSettings": "Сохранить", + "indexingStatuses": { + "standby": "Ожидание", + "indexing": "Индексация", + "indexed": "Проиндексировано", + "error": "Ошибка" + }, + "close": "Закрыть", + "advancedConfigLabel": "Расширенная конфигурация", + "searchMinScoreLabel": "Порог оценки поиска", + "searchMinScoreDescription": "Минимальный балл сходства (0.0-1.0), необходимый для результатов поиска. Более низкие значения возвращают больше результатов, но они могут быть менее релевантными. Более высокие значения возвращают меньше результатов, но более релевантных.", + "searchMinScoreResetTooltip": "Сбросить к значению по умолчанию (0.4)", + "searchMaxResultsLabel": "Максимальное количество результатов поиска", + "searchMaxResultsDescription": "Максимальное количество результатов поиска, возвращаемых при запросе индекса кодовой базы. Более высокие значения предоставляют больше контекста, но могут включать менее релевантные результаты.", + "resetToDefault": "Сбросить к значению по умолчанию" }, "autoApprove": { "description": "Разрешить Roo автоматически выполнять операции без необходимости одобрения. Включайте эти параметры только если полностью доверяете ИИ и понимаете связанные с этим риски безопасности.", @@ -110,6 +149,11 @@ "label": "Подзадачи", "description": "Разрешить создание и выполнение подзадач без необходимости одобрения" }, + "followupQuestions": { + "label": "Вопрос", + "description": "Автоматически выбирать первый предложенный ответ на дополнительные вопросы после настроенного тайм-аута", + "timeoutLabel": "Время ожидания перед автоматическим выбором первого ответа" + }, "execute": { "label": "Выполнение", "description": "Автоматически выполнять разрешённые команды терминала без необходимости одобрения", @@ -118,6 +162,10 @@ "commandPlaceholder": "Введите префикс команды (например, 'git ')", "addButton": "Добавить" }, + "updateTodoList": { + "label": "Todo", + "description": "Список дел обновляется автоматически без подтверждения" + }, "apiRequestLimit": { "title": "Максимум запросов", "description": "Автоматически выполнять это количество API-запросов перед запросом разрешения на продолжение задачи.", diff --git a/webview-ui/src/i18n/locales/tr/chat.json b/webview-ui/src/i18n/locales/tr/chat.json index f5625842b1b5..6c5bd2035378 100644 --- a/webview-ui/src/i18n/locales/tr/chat.json +++ b/webview-ui/src/i18n/locales/tr/chat.json @@ -234,17 +234,19 @@ "tokens": "token" }, "followUpSuggest": { - "copyToInput": "Giriş alanına kopyala (veya Shift + tıklama)" + "copyToInput": "Giriş alanına kopyala (veya Shift + tıklama)", + "autoSelectCountdown": "{{count}}s içinde otomatik seçilecek", + "countdownDisplay": "{{count}}sn" }, "announcement": { "title": "🎉 Roo Code {{version}} Yayınlandı", - "description": "Roo Code {{version}} geri bildirimlerinize dayalı güçlü yeni özellikler ve iyileştirmeler getiriyor.", + "description": "Roo Code {{version}}, geliştirme iş akışınızı geliştirmek için güçlü yeni özellikler ve önemli iyileştirmeler getiriyor.", "whatsNew": "Yenilikler", - "feature1": "Roo Marketplace Lansmanı - Marketplace artık canlı! Modları ve MCP'leri her zamankinden daha kolay keşfedin ve kurun.", - "feature2": "Gemini 2.5 Modelleri - Yeni Gemini 2.5 Pro, Flash ve Flash Lite modelleri için destek eklendi.", - "feature3": "Excel Dosya Desteği ve Daha Fazlası - Excel (.xlsx) dosya desteği eklendi ve sayısız hata düzeltmesi ve iyileştirme!", + "feature1": "Tek Tıkla Görev Paylaşımı: Görevlerinizi meslektaşlarınız ve toplulukla tek tıkla anında paylaşın.", + "feature2": "Global .roo Dizin Desteği: Projeler arası tutarlı ayarlar için global .roo dizininden kurallar ve yapılandırmalar yükleyin.", + "feature3": "Geliştirilmiş Mimar'dan Kod'a Geçişler: Mimar modunda planlamadan Kod modunda uygulamaya sorunsuz aktarımlar.", "hideButton": "Duyuruyu gizle", - "detailsDiscussLinks": "Discord ve Reddit üzerinde daha fazla ayrıntı edinin ve tartışmalara katılın 🚀" + "detailsDiscussLinks": "Discord ve Reddit'te daha fazla ayrıntı alın ve tartışmalara katılın 🚀" }, "browser": { "rooWantsToUse": "Roo tarayıcıyı kullanmak istiyor:", @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo kod tabanında {{query}} aramak istiyor:", "wantsToSearchWithPath": "Roo {{path}} içinde kod tabanında {{query}} aramak istiyor:", - "didSearch": "{{query}} için {{count}} sonuç bulundu:" + "didSearch": "{{query}} için {{count}} sonuç bulundu:", + "resultTooltip": "Benzerlik puanı: {{score}} (dosyayı açmak için tıklayın)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/tr/common.json b/webview-ui/src/i18n/locales/tr/common.json index 23344ca9662c..6ce2794d925c 100644 --- a/webview-ui/src/i18n/locales/tr/common.json +++ b/webview-ui/src/i18n/locales/tr/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Mermaid diyagramı oluşturuluyor...", "render_error": "Diyagram render edilemiyor", + "fixing_syntax": "Mermaid sözdizimi düzeltiliyor...", + "fix_syntax_button": "Sözdizimini AI ile düzelt", + "original_code": "Orijinal kod:", + "errors": { + "unknown_syntax": "Bilinmeyen sözdizimi hatası", + "fix_timeout": "LLM düzeltme isteği zaman aşımına uğradı", + "fix_failed": "LLM düzeltme başarısız oldu", + "fix_attempts": "{{attempts}} denemeden sonra sözdizimi düzeltilemedi. Son hata: {{error}}", + "no_fix_provided": "LLM düzeltme sağlayamadı", + "fix_request_failed": "Düzeltme isteği başarısız oldu" + }, "buttons": { "zoom": "Yakınlaştır", "zoomIn": "Büyüt", diff --git a/webview-ui/src/i18n/locales/tr/prompts.json b/webview-ui/src/i18n/locales/tr/prompts.json index d091456e4372..a8d23cfccee6 100644 --- a/webview-ui/src/i18n/locales/tr/prompts.json +++ b/webview-ui/src/i18n/locales/tr/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Modlar", "createNewMode": "Yeni mod oluştur", + "importMode": "Modu içe aktar", + "noMatchFound": "Mod bulunamadı", "editModesConfig": "Mod yapılandırmasını düzenle", "editGlobalModes": "Global modları düzenle", "editProjectModes": "Proje modlarını düzenle (.roomodes)", @@ -50,11 +52,30 @@ "description": "{{modeName}} modu için özel davranış yönergeleri ekleyin.", "loadFromFile": "{{mode}} moduna özgü özel talimatlar ayrıca çalışma alanınızdaki .roo/rules-{{slug}}/ klasöründen yüklenebilir (.roorules-{{slug}} ve .clinerules-{{slug}} kullanımdan kaldırılmıştır ve yakında çalışmayı durduracaklardır)." }, + "exportMode": { + "title": "Modu Dışa Aktar", + "description": "Bu modu tüm kurallar dahil olarak bir YAML dosyasına dışa aktararak başkalarıyla kolayca paylaşın.", + "export": "Modu Dışa Aktar", + "exporting": "Dışa aktarılıyor..." + }, "globalCustomInstructions": { "title": "Tüm Modlar için Özel Talimatlar", "description": "Bu talimatlar tüm modlara uygulanır. Aşağıdaki moda özgü talimatlarla geliştirilebilen temel davranış seti sağlarlar. <0>Daha fazla bilgi edinin", "loadFromFile": "Talimatlar ayrıca çalışma alanınızdaki .roo/rules/ klasöründen de yüklenebilir (.roorules ve .clinerules kullanımdan kaldırılmıştır ve yakında çalışmayı durduracaklardır)." }, + "importMode": { + "selectLevel": "Bu modu nereye içe aktaracağınızı seçin:", + "import": "İçe Aktar", + "importing": "İçe aktarılıyor...", + "global": { + "label": "Genel Seviye", + "description": "Tüm projelerde kullanılabilir. Kurallar özel talimatlarla birleştirilecektir." + }, + "project": { + "label": "Proje Seviyesi", + "description": "Yalnızca bu çalışma alanında kullanılabilir. Dışa aktarılan mod kural dosyaları içeriyorsa, bunlar .roo/rules-{slug}/ klasöründe yeniden oluşturulur." + } + }, "systemPrompt": { "preview": "Sistem promptunu önizle", "copy": "Sistem promptunu panoya kopyala", @@ -164,5 +185,16 @@ }, "deleteMode": "Modu sil" }, - "allFiles": "tüm dosyalar" + "allFiles": "tüm dosyalar", + "advanced": { + "title": "Gelişmiş" + }, + "deleteMode": { + "title": "Modu Sil", + "message": "\"{{modeName}}\" modunu silmek istediğinizden emin misiniz?", + "rulesFolder": "Bu modun {{folderPath}} konumunda bir kurallar klasörü var ve bu da silinecek.", + "descriptionNoRules": "Bu özel modu silmek istediğinizden emin misiniz?", + "confirm": "Sil", + "cancel": "İptal" + } } diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 06bd626406d5..c0a3b8f501bb 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Sağlayıcı seç", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API Anahtarı:", + "geminiApiKeyPlaceholder": "Gemini API anahtarınızı girin", "openaiCompatibleProvider": "OpenAI Uyumlu", - "openaiCompatibleBaseUrlLabel": "Temel URL:", - "openaiCompatibleApiKeyLabel": "API Anahtarı:", - "openaiCompatibleModelDimensionLabel": "Gömme Boyutu:", - "openaiCompatibleModelDimensionPlaceholder": "örn., 1536", - "openaiCompatibleModelDimensionDescription": "Modeliniz için gömme boyutu (çıktı boyutu). Bu değer için sağlayıcınızın belgelerine bakın. Yaygın değerler: 384, 768, 1536, 3072.", - "openaiKeyLabel": "OpenAI Anahtarı:", + "openAiKeyLabel": "OpenAI API Anahtarı", + "openAiKeyPlaceholder": "OpenAI API anahtarınızı girin", + "openAiCompatibleBaseUrlLabel": "Temel URL", + "openAiCompatibleApiKeyLabel": "API Anahtarı", + "openAiCompatibleApiKeyPlaceholder": "API anahtarınızı girin", + "openAiCompatibleModelDimensionLabel": "Gömme Boyutu:", + "modelDimensionLabel": "Model Boyutu", + "openAiCompatibleModelDimensionPlaceholder": "örn., 1536", + "openAiCompatibleModelDimensionDescription": "Modeliniz için gömme boyutu (çıktı boyutu). Bu değer için sağlayıcınızın belgelerine bakın. Yaygın değerler: 384, 768, 1536, 3072.", "modelLabel": "Model", "selectModelPlaceholder": "Model seç", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant Anahtarı:", - "startIndexingButton": "İndekslemeyi Başlat", - "clearIndexDataButton": "İndeks Verilerini Temizle", + "startIndexingButton": "Başlat", + "clearIndexDataButton": "İndeks Temizle", "unsavedSettingsMessage": "İndeksleme işlemini başlatmadan önce lütfen ayarlarını kaydet.", "clearDataDialog": { "title": "Emin misiniz?", "description": "Bu işlem geri alınamaz. Bu, kod tabanı indeks verilerinizi kalıcı olarak silecektir.", "cancelButton": "İptal", "confirmButton": "Verileri Temizle" - } + }, + "description": "Projenizin anlamsal aramasını etkinleştirmek için kod tabanı indeksleme ayarlarını yapılandırın. <0>Daha fazla bilgi", + "statusTitle": "Durum", + "settingsTitle": "İndeksleme Ayarları", + "disabledMessage": "Kod tabanı indeksleme şu anda devre dışı. İndeksleme seçeneklerini yapılandırmak için genel ayarlarda etkinleştirin.", + "embedderProviderLabel": "Gömücü Sağlayıcı", + "modelPlaceholder": "Model adını girin", + "selectModel": "Bir model seçin", + "ollamaBaseUrlLabel": "Ollama Temel URL", + "qdrantApiKeyLabel": "Qdrant API Anahtarı", + "qdrantApiKeyPlaceholder": "Qdrant API anahtarınızı girin (isteğe bağlı)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Ayarlar kaydedilemedi", + "modelDimensions": "({{dimension}} boyut)", + "saveSuccess": "Ayarlar başarıyla kaydedildi", + "saving": "Kaydediliyor...", + "saveSettings": "Kaydet", + "indexingStatuses": { + "standby": "Bekleme", + "indexing": "İndeksleniyor", + "indexed": "İndekslendi", + "error": "Hata" + }, + "close": "Kapat", + "advancedConfigLabel": "Gelişmiş Yapılandırma", + "searchMinScoreLabel": "Arama Skoru Eşiği", + "searchMinScoreDescription": "Arama sonuçları için gereken minimum benzerlik puanı (0.0-1.0). Düşük değerler daha fazla sonuç döndürür ancak daha az alakalı olabilir. Yüksek değerler daha az ancak daha alakalı sonuçlar döndürür.", + "searchMinScoreResetTooltip": "Varsayılan değere sıfırla (0.4)", + "searchMaxResultsLabel": "Maksimum Arama Sonuçları", + "searchMaxResultsDescription": "Kod tabanı dizinini sorgularken döndürülecek maksimum arama sonucu sayısı. Daha yüksek değerler daha fazla bağlam sağlar ancak daha az alakalı sonuçlar içerebilir.", + "resetToDefault": "Varsayılana sıfırla" }, "autoApprove": { "description": "Roo'nun onay gerektirmeden otomatik olarak işlemler gerçekleştirmesine izin verin. Bu ayarları yalnızca yapay zekaya tamamen güveniyorsanız ve ilgili güvenlik risklerini anlıyorsanız etkinleştirin.", @@ -110,6 +149,11 @@ "label": "Alt Görevler", "description": "Onay gerektirmeden alt görevlerin oluşturulmasına ve tamamlanmasına izin ver" }, + "followupQuestions": { + "label": "Soru", + "description": "Yapılandırılan zaman aşımından sonra takip sorularına ilişkin ilk önerilen yanıtı otomatik olarak seç", + "timeoutLabel": "İlk yanıtı otomatik olarak seçmeden önce beklenecek süre" + }, "execute": { "label": "Yürüt", "description": "Onay gerektirmeden otomatik olarak izin verilen terminal komutlarını yürüt", @@ -118,6 +162,10 @@ "commandPlaceholder": "Komut öneki girin (örn. 'git ')", "addButton": "Ekle" }, + "updateTodoList": { + "label": "Todo", + "description": "Yapılacaklar listesi onay gerektirmeden otomatik olarak güncellenir" + }, "apiRequestLimit": { "title": "Maksimum İstek", "description": "Göreve devam etmek için onay istemeden önce bu sayıda API isteği otomatik olarak yap.", diff --git a/webview-ui/src/i18n/locales/vi/chat.json b/webview-ui/src/i18n/locales/vi/chat.json index 05f9ed8a3d42..eb7cdc2306bd 100644 --- a/webview-ui/src/i18n/locales/vi/chat.json +++ b/webview-ui/src/i18n/locales/vi/chat.json @@ -234,15 +234,17 @@ "tokens": "token" }, "followUpSuggest": { - "copyToInput": "Sao chép vào ô nhập liệu (hoặc Shift + nhấp chuột)" + "copyToInput": "Sao chép vào ô nhập liệu (hoặc Shift + nhấp chuột)", + "autoSelectCountdown": "Tự động chọn sau {{count}}s", + "countdownDisplay": "{{count}}s" }, "announcement": { "title": "🎉 Roo Code {{version}} Đã phát hành", - "description": "Roo Code {{version}} mang đến các tính năng mạnh mẽ và cải tiến mới dựa trên phản hồi của bạn.", + "description": "Roo Code {{version}} mang đến các tính năng mạnh mẽ mới và cải tiến đáng kể để nâng cao quy trình phát triển của bạn.", "whatsNew": "Có gì mới", - "feature1": "Ra mắt Roo Marketplace - Marketplace hiện đã hoạt động! Khám phá và cài đặt các chế độ và MCP dễ dàng hơn bao giờ hết.", - "feature2": "Các mô hình Gemini 2.5 - Đã thêm hỗ trợ cho các mô hình Gemini 2.5 Pro, Flash và Flash Lite mới.", - "feature3": "Hỗ trợ tệp Excel & Nhiều hơn nữa - Đã thêm hỗ trợ tệp Excel (.xlsx) và vô số sửa lỗi cùng cải tiến!", + "feature1": "Chia sẻ Nhiệm vụ 1-Click: Chia sẻ nhiệm vụ của bạn với đồng nghiệp và cộng đồng ngay lập tức chỉ với một cú nhấp chuột.", + "feature2": "Hỗ trợ Thư mục .roo Toàn cục: Tải quy tắc và cấu hình từ thư mục .roo toàn cục để có cài đặt nhất quán giữa các dự án.", + "feature3": "Cải thiện Chuyển đổi từ Architect sang Code: Chuyển đổi mượt mà từ lập kế hoạch trong chế độ Architect sang triển khai trong chế độ Code.", "hideButton": "Ẩn thông báo", "detailsDiscussLinks": "Nhận thêm chi tiết và thảo luận tại DiscordReddit 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo muốn tìm kiếm trong cơ sở mã cho {{query}}:", "wantsToSearchWithPath": "Roo muốn tìm kiếm trong cơ sở mã cho {{query}} trong {{path}}:", - "didSearch": "Đã tìm thấy {{count}} kết quả cho {{query}}:" + "didSearch": "Đã tìm thấy {{count}} kết quả cho {{query}}:", + "resultTooltip": "Điểm tương tự: {{score}} (nhấp để mở tệp)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/vi/common.json b/webview-ui/src/i18n/locales/vi/common.json index 16952117ef5d..26e11654707e 100644 --- a/webview-ui/src/i18n/locales/vi/common.json +++ b/webview-ui/src/i18n/locales/vi/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "Đang tạo biểu đồ mermaid...", "render_error": "Không thể hiển thị biểu đồ", + "fixing_syntax": "Đang sửa cú pháp Mermaid...", + "fix_syntax_button": "Sửa cú pháp bằng AI", + "original_code": "Mã gốc:", + "errors": { + "unknown_syntax": "Lỗi cú pháp không xác định", + "fix_timeout": "Yêu cầu sửa LLM đã hết thời gian", + "fix_failed": "Sửa LLM thất bại", + "fix_attempts": "Không thể sửa cú pháp sau {{attempts}} lần thử. Lỗi cuối cùng: {{error}}", + "no_fix_provided": "LLM không cung cấp được bản sửa", + "fix_request_failed": "Yêu cầu sửa thất bại" + }, "buttons": { "zoom": "Thu phóng", "zoomIn": "Phóng to", diff --git a/webview-ui/src/i18n/locales/vi/prompts.json b/webview-ui/src/i18n/locales/vi/prompts.json index 7a0b311a020a..89eb107a5f83 100644 --- a/webview-ui/src/i18n/locales/vi/prompts.json +++ b/webview-ui/src/i18n/locales/vi/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "Chế độ", "createNewMode": "Tạo chế độ mới", + "importMode": "Nhập chế độ", + "noMatchFound": "Không tìm thấy chế độ nào", "editModesConfig": "Chỉnh sửa cấu hình chế độ", "editGlobalModes": "Chỉnh sửa chế độ toàn cục", "editProjectModes": "Chỉnh sửa chế độ dự án (.roomodes)", @@ -50,11 +52,30 @@ "description": "Thêm hướng dẫn hành vi dành riêng cho chế độ {{modeName}}.", "loadFromFile": "Hướng dẫn tùy chỉnh dành riêng cho chế độ {{mode}} cũng có thể được tải từ thư mục .roo/rules-{{slug}}/ trong không gian làm việc của bạn (.roorules-{{slug}} và .clinerules-{{slug}} đã lỗi thời và sẽ sớm ngừng hoạt động)." }, + "exportMode": { + "title": "Xuất chế độ", + "description": "Xuất chế độ này sang tệp YAML với tất cả các quy tắc được bao gồm để dễ dàng chia sẻ với người khác.", + "export": "Xuất chế độ", + "exporting": "Đang xuất..." + }, "globalCustomInstructions": { "title": "Hướng dẫn tùy chỉnh cho tất cả các chế độ", "description": "Những hướng dẫn này áp dụng cho tất cả các chế độ. Chúng cung cấp một bộ hành vi cơ bản có thể được nâng cao bởi hướng dẫn dành riêng cho chế độ bên dưới. <0>Tìm hiểu thêm", "loadFromFile": "Hướng dẫn cũng có thể được tải từ thư mục .roo/rules/ trong không gian làm việc của bạn (.roorules và .clinerules đã lỗi thời và sẽ sớm ngừng hoạt động)." }, + "importMode": { + "selectLevel": "Chọn nơi để nhập chế độ này:", + "import": "Nhập", + "importing": "Đang nhập...", + "global": { + "label": "Cấp độ toàn cục", + "description": "Có sẵn trong tất cả các dự án. Các quy tắc sẽ được hợp nhất vào hướng dẫn tùy chỉnh." + }, + "project": { + "label": "Cấp độ dự án", + "description": "Chỉ có sẵn trong không gian làm việc này. Nếu chế độ đã xuất có chứa tệp quy tắc, chúng sẽ được tạo lại trong thư mục .roo/rules-{slug}/." + } + }, "systemPrompt": { "preview": "Xem trước lời nhắc hệ thống", "copy": "Sao chép lời nhắc hệ thống vào bộ nhớ tạm", @@ -164,5 +185,16 @@ }, "deleteMode": "Xóa chế độ" }, - "allFiles": "tất cả các tệp" + "allFiles": "tất cả các tệp", + "advanced": { + "title": "Nâng cao" + }, + "deleteMode": { + "title": "Xóa chế độ", + "message": "Bạn có chắc chắn muốn xóa chế độ \"{{modeName}}\" không?", + "rulesFolder": "Chế độ này có một thư mục quy tắc tại {{folderPath}} cũng sẽ bị xóa.", + "descriptionNoRules": "Bạn có chắc chắn muốn xóa chế độ tùy chỉnh này không?", + "confirm": "Xóa", + "cancel": "Hủy" + } } diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index c434276b3bdc..e553f5d0c2d7 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "Chọn nhà cung cấp", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "Khóa API:", + "geminiApiKeyPlaceholder": "Nhập khóa API Gemini của bạn", "openaiCompatibleProvider": "Tương thích OpenAI", - "openaiCompatibleBaseUrlLabel": "URL cơ sở:", - "openaiCompatibleApiKeyLabel": "Khóa API:", - "openaiCompatibleModelDimensionLabel": "Kích thước Embedding:", - "openaiCompatibleModelDimensionPlaceholder": "vd., 1536", - "openaiCompatibleModelDimensionDescription": "Kích thước embedding (kích thước đầu ra) cho mô hình của bạn. Kiểm tra tài liệu của nhà cung cấp để biết giá trị này. Giá trị phổ biến: 384, 768, 1536, 3072.", - "openaiKeyLabel": "Khóa OpenAI:", + "openAiKeyLabel": "Khóa API OpenAI", + "openAiKeyPlaceholder": "Nhập khóa API OpenAI của bạn", + "openAiCompatibleBaseUrlLabel": "URL cơ sở", + "openAiCompatibleApiKeyLabel": "Khóa API", + "openAiCompatibleApiKeyPlaceholder": "Nhập khóa API của bạn", + "openAiCompatibleModelDimensionLabel": "Kích thước Embedding:", + "modelDimensionLabel": "Kích thước mô hình", + "openAiCompatibleModelDimensionPlaceholder": "vd., 1536", + "openAiCompatibleModelDimensionDescription": "Kích thước embedding (kích thước đầu ra) cho mô hình của bạn. Kiểm tra tài liệu của nhà cung cấp để biết giá trị này. Giá trị phổ biến: 384, 768, 1536, 3072.", "modelLabel": "Mô hình", "selectModelPlaceholder": "Chọn mô hình", "ollamaUrlLabel": "URL Ollama:", "qdrantUrlLabel": "URL Qdrant", "qdrantKeyLabel": "Khóa Qdrant:", - "startIndexingButton": "Bắt đầu lập chỉ mục", - "clearIndexDataButton": "Xóa dữ liệu chỉ mục", + "startIndexingButton": "Bắt đầu", + "clearIndexDataButton": "Xóa chỉ mục", "unsavedSettingsMessage": "Vui lòng lưu cài đặt của bạn trước khi bắt đầu quá trình lập chỉ mục.", "clearDataDialog": { "title": "Bạn có chắc không?", "description": "Hành động này không thể hoàn tác. Điều này sẽ xóa vĩnh viễn dữ liệu chỉ mục mã nguồn của bạn.", "cancelButton": "Hủy", "confirmButton": "Xóa dữ liệu" - } + }, + "description": "Cấu hình cài đặt lập chỉ mục mã nguồn để kích hoạt tìm kiếm ngữ nghĩa cho dự án của bạn. <0>Tìm hiểu thêm", + "statusTitle": "Trạng thái", + "settingsTitle": "Cài đặt lập chỉ mục", + "disabledMessage": "Lập chỉ mục mã nguồn hiện đang bị tắt. Bật nó trong cài đặt chung để cấu hình các tùy chọn lập chỉ mục.", + "embedderProviderLabel": "Nhà cung cấp Embedder", + "modelPlaceholder": "Nhập tên mô hình", + "selectModel": "Chọn một mô hình", + "ollamaBaseUrlLabel": "URL cơ sở Ollama", + "qdrantApiKeyLabel": "Khóa API Qdrant", + "qdrantApiKeyPlaceholder": "Nhập khóa API Qdrant của bạn (tùy chọn)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "Không thể lưu cài đặt", + "modelDimensions": "({{dimension}} chiều)", + "saveSuccess": "Cài đặt đã được lưu thành công", + "saving": "Đang lưu...", + "saveSettings": "Lưu", + "indexingStatuses": { + "standby": "Chờ", + "indexing": "Đang lập chỉ mục", + "indexed": "Đã lập chỉ mục", + "error": "Lỗi" + }, + "close": "Đóng", + "advancedConfigLabel": "Cấu hình nâng cao", + "searchMinScoreLabel": "Ngưỡng điểm tìm kiếm", + "searchMinScoreDescription": "Điểm tương đồng tối thiểu (0.0-1.0) cần thiết cho kết quả tìm kiếm. Giá trị thấp hơn trả về nhiều kết quả hơn nhưng có thể kém liên quan hơn. Giá trị cao hơn trả về ít kết quả hơn nhưng có liên quan hơn.", + "searchMinScoreResetTooltip": "Đặt lại về giá trị mặc định (0.4)", + "searchMaxResultsLabel": "Số Kết Quả Tìm Kiếm Tối Đa", + "searchMaxResultsDescription": "Số lượng kết quả tìm kiếm tối đa được trả về khi truy vấn chỉ mục cơ sở mã. Giá trị cao hơn cung cấp nhiều ngữ cảnh hơn nhưng có thể bao gồm các kết quả ít liên quan hơn.", + "resetToDefault": "Đặt lại về mặc định" }, "autoApprove": { "description": "Cho phép Roo tự động thực hiện các hoạt động mà không cần phê duyệt. Chỉ bật những cài đặt này nếu bạn hoàn toàn tin tưởng AI và hiểu rõ các rủi ro bảo mật liên quan.", @@ -110,6 +149,11 @@ "label": "Công việc phụ", "description": "Cho phép tạo và hoàn thành các công việc phụ mà không cần phê duyệt" }, + "followupQuestions": { + "label": "Câu hỏi", + "description": "Tự động chọn câu trả lời đầu tiên được đề xuất cho các câu hỏi tiếp theo sau thời gian chờ đã cấu hình", + "timeoutLabel": "Thời gian chờ trước khi tự động chọn câu trả lời đầu tiên" + }, "execute": { "label": "Thực thi", "description": "Tự động thực thi các lệnh terminal được phép mà không cần phê duyệt", @@ -118,6 +162,10 @@ "commandPlaceholder": "Nhập tiền tố lệnh (ví dụ: 'git ')", "addButton": "Thêm" }, + "updateTodoList": { + "label": "Todo", + "description": "Danh sách việc cần làm được cập nhật tự động mà không cần phê duyệt" + }, "apiRequestLimit": { "title": "Số lượng yêu cầu tối đa", "description": "Tự động thực hiện số lượng API request này trước khi yêu cầu phê duyệt để tiếp tục với nhiệm vụ.", diff --git a/webview-ui/src/i18n/locales/zh-CN/chat.json b/webview-ui/src/i18n/locales/zh-CN/chat.json index d85c633699ca..93494e6f50fe 100644 --- a/webview-ui/src/i18n/locales/zh-CN/chat.json +++ b/webview-ui/src/i18n/locales/zh-CN/chat.json @@ -234,15 +234,17 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "复制到输入框(或按住Shift点击)" + "copyToInput": "复制到输入框(或按住Shift点击)", + "autoSelectCountdown": "{{count}}秒后自动选择", + "countdownDisplay": "{{count}}秒" }, "announcement": { "title": "🎉 Roo Code {{version}} 已发布", - "description": "Roo Code {{version}} 带来基于您反馈的重要新功能和改进。", + "description": "Roo Code {{version}} 带来强大的新功能和重大改进,提升您的开发工作流程。", "whatsNew": "新特性", - "feature1": "Roo 市场正式上线: 市场现已上线!比以往更轻松地发现和安装模式及 MCP。", - "feature2": "Gemini 2.5 模型: 新增对新版 Gemini 2.5 Pro、Flash 和 Flash Lite 模型的支持。", - "feature3": "Excel 文件支持及更多: 新增 Excel (.xlsx) 文件支持以及大量错误修复和改进!", + "feature1": "一键任务分享: 一键即可与同事和社区分享您的任务。", + "feature2": "全局 .roo 目录支持: 从全局 .roo 目录加载规则和配置,确保项目间设置一致。", + "feature3": "改进的架构师到代码转换: 从架构师模式的规划到代码模式的实现,实现无缝交接。", "hideButton": "隐藏公告", "detailsDiscussLinks": "在 DiscordReddit 获取更多详情并参与讨论 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo 需要搜索代码库: {{query}}", "wantsToSearchWithPath": "Roo 需要在 {{path}} 中搜索: {{query}}", - "didSearch": "找到 {{count}} 个结果: {{query}}" + "didSearch": "找到 {{count}} 个结果: {{query}}", + "resultTooltip": "相似度评分: {{score}} (点击打开文件)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/zh-CN/common.json b/webview-ui/src/i18n/locales/zh-CN/common.json index 29f11c7f2fba..067947fa769d 100644 --- a/webview-ui/src/i18n/locales/zh-CN/common.json +++ b/webview-ui/src/i18n/locales/zh-CN/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "生成 Mermaid 图表中...", "render_error": "无法渲染图表", + "fixing_syntax": "修复 Mermaid 语法中...", + "fix_syntax_button": "使用 AI 修复语法", + "original_code": "原始代码:", + "errors": { + "unknown_syntax": "未知语法错误", + "fix_timeout": "AI 修复请求超时", + "fix_failed": "AI 修复失败", + "fix_attempts": "在 {{attempts}} 次尝试后修复语法失败。最后错误:{{error}}", + "no_fix_provided": "AI 未能提供修复", + "fix_request_failed": "修复请求失败" + }, "buttons": { "zoom": "缩放", "zoomIn": "放大", diff --git a/webview-ui/src/i18n/locales/zh-CN/prompts.json b/webview-ui/src/i18n/locales/zh-CN/prompts.json index 2abf922b14f5..6406fdf63988 100644 --- a/webview-ui/src/i18n/locales/zh-CN/prompts.json +++ b/webview-ui/src/i18n/locales/zh-CN/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "模式配置", "createNewMode": "新建模式", + "importMode": "导入模式", + "noMatchFound": "未找到任何模式", "editModesConfig": "模式设置", "editGlobalModes": "修改全局模式", "editProjectModes": "编辑项目模式 (.roomodes)", @@ -50,6 +52,25 @@ "description": "{{modeName}}模式的专属规则", "loadFromFile": "支持从.roo/rules-{{slug}}/目录读取配置(.roorules-{{slug}}和.clinerules-{{slug}}已弃用并将很快停止工作)。" }, + "exportMode": { + "title": "导出模式", + "description": "将此模式导出为包含所有规则的 YAML 文件,以便与他人轻松共享。", + "export": "导出模式", + "exporting": "正在导出..." + }, + "importMode": { + "selectLevel": "选择导入模式的位置:", + "import": "导入", + "importing": "导入中...", + "global": { + "label": "全局", + "description": "适用于所有项目。如果导出的模式包含规则文件,则将在全局 .roo/rules-{slug}/ 文件夹中重新创建这些文件。" + }, + "project": { + "label": "项目级", + "description": "仅在此工作区可用。如果导出的模式包含规则文件,则将在 .roo/rules-{slug}/ 文件夹中重新创建这些文件。" + } + }, "globalCustomInstructions": { "title": "所有模式的自定义指令", "description": "这些指令适用于所有模式。它们提供了一套基础行为,可以通过下面的模式特定指令进行增强。<0>了解更多", @@ -164,5 +185,16 @@ }, "deleteMode": "删除模式" }, - "allFiles": "所有文件" + "allFiles": "所有文件", + "advanced": { + "title": "高级" + }, + "deleteMode": { + "title": "删除模式", + "message": "您确定要删除\"{{modeName}}\"模式吗?", + "rulesFolder": "此模式在 {{folderPath}} 有一个规则文件夹,该文件夹也将被删除。", + "descriptionNoRules": "您确定要删除此自定义模式吗?", + "confirm": "删除", + "cancel": "取消" + } } diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 80bcab26ebf9..d8d875c85fd7 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -38,33 +38,72 @@ }, "codeIndex": { "title": "代码库索引", + "description": "配置代码库索引设置以启用项目的语义搜索。<0>了解更多", + "statusTitle": "状态", "enableLabel": "启用代码库索引", "enableDescription": "<0>代码库索引是一个实验性功能,使用 AI 嵌入为您的项目创建语义搜索索引。这使 Roo Code 能够通过基于含义而非仅仅关键词来查找相关代码,从而更好地理解和导航大型代码库。", + "settingsTitle": "索引设置", + "disabledMessage": "代码库索引当前已禁用。在全局设置中启用它以配置索引选项。", "providerLabel": "嵌入提供商", + "embedderProviderLabel": "嵌入器提供商", "selectProviderPlaceholder": "选择提供商", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API 密钥:", + "geminiApiKeyPlaceholder": "输入您的Gemini API密钥", "openaiCompatibleProvider": "OpenAI 兼容", - "openaiCompatibleBaseUrlLabel": "基础 URL:", - "openaiCompatibleApiKeyLabel": "API 密钥:", - "openaiCompatibleModelDimensionLabel": "嵌入维度:", - "openaiCompatibleModelDimensionPlaceholder": "例如,1536", - "openaiCompatibleModelDimensionDescription": "模型的嵌入维度(输出大小)。请查阅您的提供商文档获取此值。常见值:384、768、1536、3072。", - "openaiKeyLabel": "OpenAI 密钥:", + "openAiKeyLabel": "OpenAI API 密钥", + "openAiKeyPlaceholder": "输入你的 OpenAI API 密钥", + "openAiCompatibleBaseUrlLabel": "基础 URL", + "openAiCompatibleApiKeyLabel": "API 密钥", + "openAiCompatibleApiKeyPlaceholder": "输入你的 API 密钥", + "openAiCompatibleModelDimensionLabel": "嵌入维度:", + "modelDimensionLabel": "模型维度", + "openAiCompatibleModelDimensionPlaceholder": "例如,1536", + "openAiCompatibleModelDimensionDescription": "模型的嵌入维度(输出大小)。请查阅您的提供商文档获取此值。常见值:384、768、1536、3072。", "modelLabel": "模型", + "modelPlaceholder": "输入模型名称", + "selectModel": "选择模型", "selectModelPlaceholder": "选择模型", "ollamaUrlLabel": "Ollama URL:", + "ollamaBaseUrlLabel": "Ollama 基础 URL", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant 密钥:", - "startIndexingButton": "开始索引", - "clearIndexDataButton": "清除索引数据", + "qdrantApiKeyLabel": "Qdrant API 密钥", + "qdrantApiKeyPlaceholder": "输入你的 Qdrant API 密钥(可选)", + "startIndexingButton": "开始", + "clearIndexDataButton": "清除索引", "unsavedSettingsMessage": "请先保存设置再开始索引过程。", "clearDataDialog": { "title": "确定要继续吗?", "description": "此操作无法撤消。这将永久删除您的代码库索引数据。", "cancelButton": "取消", "confirmButton": "清除数据" - } + }, + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "保存设置失败", + "modelDimensions": "({{dimension}} 维度)", + "saveSuccess": "设置保存成功", + "saving": "保存中...", + "saveSettings": "保存", + "indexingStatuses": { + "standby": "待机", + "indexing": "索引中", + "indexed": "已索引", + "error": "错误" + }, + "close": "关闭", + "advancedConfigLabel": "高级配置", + "searchMinScoreLabel": "搜索分数阈值", + "searchMinScoreDescription": "搜索结果所需的最低相似度分数(0.0-1.0)。较低的值返回更多结果,但可能不太相关。较高的值返回较少但更相关的结果。", + "searchMinScoreResetTooltip": "恢复默认值 (0.4)", + "searchMaxResultsLabel": "最大搜索结果数", + "searchMaxResultsDescription": "查询代码库索引时返回的最大搜索结果数。较高的值提供更多上下文,但可能包含相关性较低的结果。", + "resetToDefault": "恢复默认值" }, "autoApprove": { "description": "允许 Roo 自动执行操作而无需批准。只有在您完全信任 AI 并了解相关安全风险的情况下才启用这些设置。", @@ -110,6 +149,11 @@ "label": "子任务", "description": "允许创建和完成子任务而无需批准" }, + "followupQuestions": { + "label": "问题", + "description": "在配置的超时时间后自动选择后续问题的第一个建议答案", + "timeoutLabel": "自动选择第一个答案前的等待时间" + }, "execute": { "label": "执行", "description": "自动执行白名单中的命令而无需批准", @@ -118,6 +162,10 @@ "commandPlaceholder": "输入命令前缀(例如 'git ')", "addButton": "添加" }, + "updateTodoList": { + "label": "待办", + "description": "无需批准即可自动更新待办清单" + }, "apiRequestLimit": { "title": "最大请求数", "description": "在请求批准以继续执行任务之前,自动发出此数量的 API 请求。", diff --git a/webview-ui/src/i18n/locales/zh-TW/chat.json b/webview-ui/src/i18n/locales/zh-TW/chat.json index b3659f686934..e7a476cb375c 100644 --- a/webview-ui/src/i18n/locales/zh-TW/chat.json +++ b/webview-ui/src/i18n/locales/zh-TW/chat.json @@ -234,15 +234,17 @@ "tokens": "tokens" }, "followUpSuggest": { - "copyToInput": "複製到輸入框(或按住 Shift 並點選)" + "copyToInput": "複製到輸入框(或按住 Shift 並點選)", + "autoSelectCountdown": "{{count}}秒後自動選擇", + "countdownDisplay": "{{count}}秒" }, "announcement": { "title": "🎉 Roo Code {{version}} 已發布", - "description": "Roo Code {{version}} 帶來基於您意見回饋的重要新功能與改進。", + "description": "Roo Code {{version}} 帶來強大的新功能和重大改進,提升您的開發工作流程。", "whatsNew": "新功能", - "feature1": "Roo 市場正式上線: 市場現已上線!比以往更輕鬆地探索並安裝模式和 MCP。", - "feature2": "Gemini 2.5 模型: 新增對新版 Gemini 2.5 Pro、Flash 和 Flash Lite 模型的支援。", - "feature3": "Excel 檔案支援及更多: 新增 Excel (.xlsx) 檔案支援以及大量錯誤修復和改進!", + "feature1": "一鍵分享工作:只需一鍵即可立即與同事和社群分享您的工作。", + "feature2": "全域 .roo 目錄支援:從全域 .roo 目錄載入規則和設定,確保專案間設定一致。", + "feature3": "改進的 Architect 到 Code 轉換:從 Architect 模式的規劃到 Code 模式的實作,轉換更加順暢。", "hideButton": "隱藏公告", "detailsDiscussLinks": "在 DiscordReddit 取得更多詳細資訊並參與討論 🚀" }, @@ -294,7 +296,8 @@ "codebaseSearch": { "wantsToSearch": "Roo 想要搜尋程式碼庫:{{query}}", "wantsToSearchWithPath": "Roo 想要在 {{path}} 中搜尋:{{query}}", - "didSearch": "找到 {{count}} 個結果:{{query}}" + "didSearch": "找到 {{count}} 個結果:{{query}}", + "resultTooltip": "相似度評分:{{score}} (點擊開啟檔案)" }, "read-batch": { "approve": { diff --git a/webview-ui/src/i18n/locales/zh-TW/common.json b/webview-ui/src/i18n/locales/zh-TW/common.json index b8ec7f998eb9..7c1c2862b44e 100644 --- a/webview-ui/src/i18n/locales/zh-TW/common.json +++ b/webview-ui/src/i18n/locales/zh-TW/common.json @@ -17,6 +17,17 @@ "mermaid": { "loading": "產生 Mermaid 圖表中...", "render_error": "無法渲染圖表", + "fixing_syntax": "修復 Mermaid 語法中...", + "fix_syntax_button": "使用 AI 修復語法", + "original_code": "原始程式碼:", + "errors": { + "unknown_syntax": "未知的語法錯誤", + "fix_timeout": "LLM 修復請求超時", + "fix_failed": "LLM 修復失敗", + "fix_attempts": "嘗試 {{attempts}} 次後無法修復語法。最後錯誤:{{error}}", + "no_fix_provided": "LLM 未能提供修復方案", + "fix_request_failed": "修復請求失敗" + }, "buttons": { "zoom": "縮放", "zoomIn": "放大", diff --git a/webview-ui/src/i18n/locales/zh-TW/prompts.json b/webview-ui/src/i18n/locales/zh-TW/prompts.json index e853a5d91d57..143efd0dedf0 100644 --- a/webview-ui/src/i18n/locales/zh-TW/prompts.json +++ b/webview-ui/src/i18n/locales/zh-TW/prompts.json @@ -4,6 +4,8 @@ "modes": { "title": "模式", "createNewMode": "建立新模式", + "importMode": "匯入模式", + "noMatchFound": "找不到任何模式", "editModesConfig": "編輯模式設定", "editGlobalModes": "編輯全域模式", "editProjectModes": "編輯專案模式 (.roomodes)", @@ -50,6 +52,25 @@ "description": "為 {{modeName}} 模式新增專屬的行為指南。", "loadFromFile": "{{mode}} 模式的自訂指令也可以從工作區的 .roo/rules-{{slug}}/ 資料夾載入(.roorules-{{slug}} 和 .clinerules-{{slug}} 已棄用並將很快停止運作)。" }, + "exportMode": { + "title": "匯出模式", + "description": "將此模式匯出為包含所有規則的 YAML 檔案,以便與他人輕鬆分享。", + "export": "匯出模式", + "exporting": "正在匯出..." + }, + "importMode": { + "selectLevel": "選擇匯入模式的位置:", + "import": "匯入", + "importing": "匯入中...", + "global": { + "label": "全域", + "description": "適用於所有專案。規則將合併到自訂指令中。" + }, + "project": { + "label": "專案級", + "description": "僅在此工作區可用。如果匯出的模式包含規則檔案,則將在 .roo/rules-{slug}/ 資料夾中重新建立這些檔案。" + } + }, "globalCustomInstructions": { "title": "所有模式的自訂指令", "description": "這些指令適用於所有模式。它們提供了一組基本行為,可以透過下方的模式專屬自訂指令來強化。<0>了解更多", @@ -164,5 +185,16 @@ }, "deleteMode": "刪除模式" }, - "allFiles": "所有檔案" + "allFiles": "所有檔案", + "advanced": { + "title": "進階" + }, + "deleteMode": { + "title": "刪除模式", + "message": "您確定要刪除「{{modeName}}」模式嗎?", + "rulesFolder": "此模式在 {{folderPath}} 有一個規則資料夾,該資料夾也將被刪除。", + "descriptionNoRules": "您確定要刪除此自訂模式嗎?", + "confirm": "刪除", + "cancel": "取消" + } } diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 033230ebb49a..f08cb0dbba73 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -44,27 +44,66 @@ "selectProviderPlaceholder": "選擇提供者", "openaiProvider": "OpenAI", "ollamaProvider": "Ollama", + "geminiProvider": "Gemini", + "geminiApiKeyLabel": "API 金鑰:", + "geminiApiKeyPlaceholder": "輸入您的Gemini API金鑰", "openaiCompatibleProvider": "OpenAI 相容", - "openaiCompatibleBaseUrlLabel": "基礎 URL:", - "openaiCompatibleApiKeyLabel": "API 金鑰:", - "openaiCompatibleModelDimensionLabel": "嵌入維度:", - "openaiCompatibleModelDimensionPlaceholder": "例如,1536", - "openaiCompatibleModelDimensionDescription": "模型的嵌入維度(輸出大小)。請查閱您的提供商文件獲取此值。常見值:384、768、1536、3072。", - "openaiKeyLabel": "OpenAI 金鑰:", + "openAiKeyLabel": "OpenAI API 金鑰", + "openAiKeyPlaceholder": "輸入您的 OpenAI API 金鑰", + "openAiCompatibleBaseUrlLabel": "基礎 URL", + "openAiCompatibleApiKeyLabel": "API 金鑰", + "openAiCompatibleApiKeyPlaceholder": "輸入您的 API 金鑰", + "openAiCompatibleModelDimensionLabel": "嵌入維度:", + "modelDimensionLabel": "模型維度", + "openAiCompatibleModelDimensionPlaceholder": "例如,1536", + "openAiCompatibleModelDimensionDescription": "模型的嵌入維度(輸出大小)。請查閱您的提供商文件獲取此值。常見值:384、768、1536、3072。", "modelLabel": "模型", "selectModelPlaceholder": "選擇模型", "ollamaUrlLabel": "Ollama URL:", "qdrantUrlLabel": "Qdrant URL", "qdrantKeyLabel": "Qdrant 金鑰:", - "startIndexingButton": "開始索引", - "clearIndexDataButton": "清除索引資料", + "startIndexingButton": "開始", + "clearIndexDataButton": "清除索引", "unsavedSettingsMessage": "請先儲存設定再開始索引程序。", "clearDataDialog": { "title": "確定要繼續嗎?", "description": "此操作無法復原。這將永久刪除您的程式碼庫索引資料。", "cancelButton": "取消", "confirmButton": "清除資料" - } + }, + "description": "設定程式碼庫索引設定以啟用專案的語意搜尋。<0>了解更多", + "statusTitle": "狀態", + "settingsTitle": "索引設定", + "disabledMessage": "程式碼庫索引目前已停用。請在全域設定中啟用以設定索引選項。", + "embedderProviderLabel": "嵌入器提供者", + "modelPlaceholder": "輸入模型名稱", + "selectModel": "選擇模型", + "ollamaBaseUrlLabel": "Ollama 基礎 URL", + "qdrantApiKeyLabel": "Qdrant API 金鑰", + "qdrantApiKeyPlaceholder": "輸入您的 Qdrant API 金鑰(選用)", + "ollamaUrlPlaceholder": "http://localhost:11434", + "openAiCompatibleBaseUrlPlaceholder": "https://api.example.com", + "modelDimensionPlaceholder": "1536", + "qdrantUrlPlaceholder": "http://localhost:6333", + "saveError": "無法儲存設定", + "modelDimensions": "({{dimension}} 維度)", + "saveSuccess": "設定已成功儲存", + "saving": "儲存中...", + "saveSettings": "儲存", + "indexingStatuses": { + "standby": "待命", + "indexing": "索引中", + "indexed": "已索引", + "error": "錯誤" + }, + "close": "關閉", + "advancedConfigLabel": "進階設定", + "searchMinScoreLabel": "搜尋分數閾值", + "searchMinScoreDescription": "搜尋結果所需的最低相似度分數(0.0-1.0)。較低的值會傳回更多結果,但可能較不相關。較高的值會傳回較少但更相關的結果。", + "searchMinScoreResetTooltip": "重設為預設值 (0.4)", + "searchMaxResultsLabel": "最大搜尋結果數", + "searchMaxResultsDescription": "查詢程式碼庫索引時傳回的最大搜尋結果數。較高的值提供更多上下文,但可能包含相關性較低的結果。", + "resetToDefault": "重設為預設值" }, "autoApprove": { "description": "允許 Roo 無需核准即執行操作。僅在您完全信任 AI 並了解相關安全風險時啟用這些設定。", @@ -110,6 +149,11 @@ "label": "子工作", "description": "允許建立和完成子工作而無需核准" }, + "followupQuestions": { + "label": "問題", + "description": "在設定的逾時時間後自動選擇後續問題的第一個建議答案", + "timeoutLabel": "自動選擇第一個答案前的等待時間" + }, "execute": { "label": "執行", "description": "自動執行允許的終端機命令而無需核准", @@ -118,6 +162,10 @@ "commandPlaceholder": "輸入命令前綴(例如 'git ')", "addButton": "新增" }, + "updateTodoList": { + "label": "待辦", + "description": "自動更新待辦清單無需批准" + }, "apiRequestLimit": { "title": "最大請求數", "description": "在請求批准以繼續執行工作之前,自動發出此數量的 API 請求。", diff --git a/webview-ui/src/services/__tests__/mermaidSyntaxFixer.spec.ts b/webview-ui/src/services/__tests__/mermaidSyntaxFixer.spec.ts new file mode 100644 index 000000000000..1299f4e134d5 --- /dev/null +++ b/webview-ui/src/services/__tests__/mermaidSyntaxFixer.spec.ts @@ -0,0 +1,228 @@ +import { MermaidSyntaxFixer } from "../mermaidSyntaxFixer" +import { vi, beforeEach, afterEach } from "vitest" + +// Mock the mermaid library +vi.mock("mermaid", () => ({ + default: { + parse: vi.fn(), + }, +})) + +// Mock i18next +vi.mock("i18next", () => ({ + default: { + t: (key: string, options?: any) => { + // Return a simple translation based on the key + if (key === "common:mermaid.errors.unknown_syntax") return "Unknown syntax error" + if (key === "common:mermaid.errors.fix_timeout") return "Fix timeout" + if (key === "common:mermaid.errors.fix_request_failed") return "Fix request failed" + if (key === "common:mermaid.errors.fix_attempts") + return `Failed to fix syntax after ${options?.attempts} attempts: ${options?.error}` + if (key === "common:mermaid.errors.no_fix_provided") return "LLM failed to provide a fix" + return key + }, + }, +})) + +describe("MermaidSyntaxFixer", () => { + describe("applyDeterministicFixes", () => { + it("should replace --> with -->", () => { + const input = "A --> B" + const expected = "A --> B" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should replace multiple instances of --> with -->", () => { + const input = "A --> B\nB --> C\nC --> D" + const expected = "A --> B\nB --> C\nC --> D" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should handle complex mermaid diagrams with --> errors", () => { + const input = `graph TD + A[Start] --> B{Decision} + B --> C[Option 1] + B --> D[Option 2] + C --> E[End] + D --> E` + const expected = `graph TD + A[Start] --> B{Decision} + B --> C[Option 1] + B --> D[Option 2] + C --> E[End] + D --> E` + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should not modify code that does not contain -->", () => { + const input = "A --> B\nB --> C" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(input) + }) + + it("should handle empty string", () => { + const input = "" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe("") + }) + + it("should handle string with only -->", () => { + const input = "-->" + const expected = "-->" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should preserve other HTML entities that are not -->", () => { + const input = "A --> B & C < D" + const expected = "A --> B & C < D" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should handle mixed content with --> in different contexts", () => { + const input = `flowchart LR + A[User Input] --> B[Process] + B --> C{Valid?} + C -->|Yes| D[Success] + C -->|No| E[Error]` + const expected = `flowchart LR + A[User Input] --> B[Process] + B --> C{Valid?} + C -->|Yes| D[Success] + C -->|No| E[Error]` + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should handle --> at the beginning and end of lines", () => { + const input = "--> start\nmiddle --> middle\nend -->" + const expected = "--> start\nmiddle --> middle\nend -->" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + it("should handle --> with surrounding whitespace", () => { + const input = "A --> B" + const expected = "A --> B" + const result = MermaidSyntaxFixer.applyDeterministicFixes(input) + expect(result).toBe(expected) + }) + + describe("autoFixSyntax", () => { + let validateSyntaxSpy: any + let requestLLMFixSpy: any + beforeEach(() => { + validateSyntaxSpy = vi.spyOn(MermaidSyntaxFixer, "validateSyntax") + requestLLMFixSpy = vi.spyOn(MermaidSyntaxFixer as any, "requestLLMFix") + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + it("should return success when deterministic fixes are sufficient", async () => { + // Mock successful validation after deterministic fixes + validateSyntaxSpy.mockResolvedValue({ isValid: true }) + + const result = await MermaidSyntaxFixer.autoFixSyntax("A --> B") + + expect(result.success).toBe(true) + expect(result.fixedCode).toBe("A --> B") + expect(result.attempts).toBe(0) + // requestLLMFix should NOT be called when validation passes after deterministic fixes + expect(requestLLMFixSpy).not.toHaveBeenCalled() + }) + + it("should return success and fixed code when LLM validation succeeds", async () => { + const applyDeterministicFixesSpy = vi.spyOn(MermaidSyntaxFixer, "applyDeterministicFixes") + applyDeterministicFixesSpy.mockReturnValueOnce("original code") // First call + applyDeterministicFixesSpy.mockReturnValueOnce("deterministically fixed code") // Second call after LLM fix + + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "error" }) // First validation fails + validateSyntaxSpy.mockResolvedValueOnce({ isValid: true }) // Second validation succeeds + requestLLMFixSpy.mockResolvedValue({ fixedCode: "fixed code" }) + + const result = await MermaidSyntaxFixer.autoFixSyntax("original code") + + expect(result.success).toBe(true) + expect(result.fixedCode).toBe("deterministically fixed code") + expect(result.attempts).toBe(1) + expect(applyDeterministicFixesSpy).toHaveBeenCalledWith("fixed code") + }) + + it("should return the best attempt even when fix is not successful", async () => { + // Mock failed validation for initial and both LLM attempts + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "initial error" }) + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "error 1" }) + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "error 2" }) + + // Mock LLM fix attempts + requestLLMFixSpy.mockResolvedValueOnce({ fixedCode: "first attempt" }) + requestLLMFixSpy.mockResolvedValueOnce({ fixedCode: "second attempt" }) + + // Mock applyDeterministicFixes + const applyDeterministicFixesSpy = vi.spyOn(MermaidSyntaxFixer, "applyDeterministicFixes") + applyDeterministicFixesSpy.mockReturnValueOnce("original code") // Initial deterministic fix + applyDeterministicFixesSpy.mockReturnValueOnce("deterministically fixed first attempt") + applyDeterministicFixesSpy.mockReturnValueOnce("deterministically fixed second attempt") + + const result = await MermaidSyntaxFixer.autoFixSyntax("original code") + + expect(result.success).toBe(false) + expect(result.fixedCode).toBe("deterministically fixed second attempt") // Should return the deterministically fixed last attempt + expect(result.attempts).toBe(2) + expect(result.error).toContain("Failed to fix syntax after 2 attempts") + + expect(applyDeterministicFixesSpy).toHaveBeenCalledTimes(3) // Initial + 2 LLM attempts + expect(applyDeterministicFixesSpy).toHaveBeenNthCalledWith(2, "first attempt") + expect(applyDeterministicFixesSpy).toHaveBeenNthCalledWith(3, "second attempt") + }) + + it("should return the best attempt when LLM request fails", async () => { + // Mock failed initial validation + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "initial error" }) + + // Mock successful first attempt but failed second attempt + requestLLMFixSpy.mockResolvedValueOnce({ fixedCode: "first attempt" }) + requestLLMFixSpy.mockResolvedValueOnce({ requestError: "LLM request failed" }) + + // Mock failed validation for first attempt + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "still invalid" }) + + // Mock applyDeterministicFixes + const applyDeterministicFixesSpy = vi.spyOn(MermaidSyntaxFixer, "applyDeterministicFixes") + applyDeterministicFixesSpy.mockReturnValueOnce("original code") // Initial + applyDeterministicFixesSpy.mockReturnValueOnce("deterministically fixed first attempt") + + const result = await MermaidSyntaxFixer.autoFixSyntax("original code") + + expect(result.success).toBe(false) + expect(result.fixedCode).toBe("deterministically fixed first attempt") // Should return the deterministically fixed best attempt + expect(result.error).toContain("LLM request failed") + expect(applyDeterministicFixesSpy).toHaveBeenNthCalledWith(2, "first attempt") + }) + + it("should return the original code when LLM fails to provide a fix", async () => { + // Mock failed initial validation + validateSyntaxSpy.mockResolvedValueOnce({ isValid: false, error: "error" }) + + // Mock LLM returning null (no fix provided) + requestLLMFixSpy.mockResolvedValueOnce({ fixedCode: "" }) + + // Mock applyDeterministicFixes to return the original code + const applyDeterministicFixesSpy = vi.spyOn(MermaidSyntaxFixer, "applyDeterministicFixes") + applyDeterministicFixesSpy.mockReturnValue("original code") + + const result = await MermaidSyntaxFixer.autoFixSyntax("original code") + + expect(result.success).toBe(false) + expect(result.fixedCode).toBe("original code") // Should return the original code after deterministic fixes + expect(result.error).toBe("LLM failed to provide a fix") + }) + }) + }) +}) diff --git a/webview-ui/src/services/mermaidSyntaxFixer.ts b/webview-ui/src/services/mermaidSyntaxFixer.ts new file mode 100644 index 000000000000..528d4fe2b836 --- /dev/null +++ b/webview-ui/src/services/mermaidSyntaxFixer.ts @@ -0,0 +1,156 @@ +import { vscode } from "@src/utils/vscode" +import i18next from "i18next" + +export interface MermaidFixResult { + success: boolean + fixedCode?: string + error?: string + attempts?: number +} + +export interface MermaidValidationResult { + isValid: boolean + error?: string +} + +/** + * Service for validating and fixing Mermaid syntax using LLM assistance + */ +export class MermaidSyntaxFixer { + private static readonly MAX_FIX_ATTEMPTS = 2 + private static readonly FIX_TIMEOUT = 30000 // 30 seconds + + /** + * Applies deterministic fixes for common LLM errors before validation + */ + static applyDeterministicFixes(code: string): string { + let cleanedCode = code // use variable to be able to add comments to each step + + // Fix HTML entity encoding: --> should be -->; + // surprisingly, this does most of the heavy lifting in fixing the Mermaid syntax + cleanedCode = cleanedCode.replace(/-->/g, "-->") + + // sometimes the llm wraps the respond in a code block, starting with ```mermaid and ending with ``` + // we remove those to get the actual code + cleanedCode = cleanedCode.replace(/^```mermaid/, "").replace(/```$/, "") + + return cleanedCode + } + + /** + * Validates Mermaid syntax using the mermaid library + */ + static async validateSyntax(code: string): Promise { + try { + const mermaid = (await import("mermaid")).default + await mermaid.parse(code) + return { isValid: true } + } catch (error) { + return { + isValid: false, + error: error instanceof Error ? error.message : i18next.t("common:mermaid.errors.unknown_syntax"), + } + } + } + + /** + * Requests the LLM to fix the Mermaid syntax via the extension + */ + private static requestLLMFix( + code: string, + error: string, + ): Promise<{ fixedCode: string } | { requestError: string }> { + return new Promise((resolve, _reject) => { + const requestId = `mermaid-fix-${Date.now()}-${Math.random().toString(36).substring(2, 11)}` + + const timeout = setTimeout(() => { + cleanup() + resolve({ requestError: i18next.t("common:mermaid.errors.fix_timeout") }) + }, this.FIX_TIMEOUT) + + const messageListener = (event: MessageEvent) => { + const message = event.data + if (message.type === "mermaidFixResponse" && message.requestId === requestId) { + cleanup() + + if (message.success) { + resolve({ fixedCode: message.fixedCode }) + } else { + resolve({ + requestError: message.error || i18next.t("common:mermaid.errors.fix_request_failed"), + }) + } + } + } + + const cleanup = () => { + clearTimeout(timeout) + window.removeEventListener("message", messageListener) + } + + window.addEventListener("message", messageListener) + + vscode.postMessage({ + type: "fixMermaidSyntax", + requestId, + text: code, + values: { error }, + }) + }) + } + + /** + * Attempts to fix Mermaid syntax with automatic retry and fallback + * Always returns the best attempt at fixing the code, even if not completely successful + */ + static async autoFixSyntax(code: string): Promise { + let currentCode = code + let llmAttempts = 0 + let finalError: string | undefined + + while (true) { + currentCode = this.applyDeterministicFixes(currentCode) + + // Validate the current code + const validation = await this.validateSyntax(currentCode) + if (validation.isValid) { + return { + success: true, + fixedCode: currentCode, + attempts: llmAttempts, + } + } + + const lastError = validation.error || i18next.t("common:mermaid.errors.unknown_syntax") + + // break in the middle so we start and finish the loop with a deterministic fix + if (llmAttempts >= this.MAX_FIX_ATTEMPTS) { + finalError = i18next.t("common:mermaid.errors.fix_attempts", { + attempts: this.MAX_FIX_ATTEMPTS, + error: lastError, + }) + break + } + + llmAttempts++ + const result = await this.requestLLMFix(currentCode, lastError) + + if ("requestError" in result) { + finalError = result.requestError + break + } else if (!result.fixedCode) { + finalError = i18next.t("common:mermaid.errors.no_fix_provided") + break + } else { + currentCode = result.fixedCode + } + } + + return { + success: false, + fixedCode: currentCode, + error: finalError, + attempts: llmAttempts, + } + } +} diff --git a/webview-ui/src/utils/__tests__/context-mentions.test.ts b/webview-ui/src/utils/__tests__/context-mentions.spec.ts similarity index 100% rename from webview-ui/src/utils/__tests__/context-mentions.test.ts rename to webview-ui/src/utils/__tests__/context-mentions.spec.ts diff --git a/webview-ui/src/utils/context-mentions.ts b/webview-ui/src/utils/context-mentions.ts index 3299bc4e08fe..889dca9dbea6 100644 --- a/webview-ui/src/utils/context-mentions.ts +++ b/webview-ui/src/utils/context-mentions.ts @@ -6,6 +6,14 @@ import { mentionRegex } from "@roo/context-mentions" import { escapeSpaces } from "./path-mentions" +/** + * Gets the description for a mode, prioritizing description > whenToUse > roleDefinition + * and taking only the first line + */ +function getModeDescription(mode: ModeConfig): string { + return (mode.description || mode.whenToUse || mode.roleDefinition).split("\n")[0] +} + export interface SearchResult { path: string type: "file" | "folder" @@ -137,13 +145,13 @@ export function getContextMenuOptions( type: ContextMenuOptionType.Mode, value: result.item.original.slug, label: result.item.original.name, - description: (result.item.original.whenToUse || result.item.original.roleDefinition).split("\n")[0], + description: getModeDescription(result.item.original), })) : modes.map((mode) => ({ type: ContextMenuOptionType.Mode, value: mode.slug, label: mode.name, - description: (mode.description || mode.whenToUse || mode.roleDefinition).split("\n")[0], + description: getModeDescription(mode), })) return matchingModes.length > 0 ? matchingModes : [{ type: ContextMenuOptionType.NoResults }]