From fff862d4bf42f1dedffca32a4f734860e703cbb2 Mon Sep 17 00:00:00 2001
From: "d.o.it" <6849456+d-oit@users.noreply.github.com>
Date: Wed, 19 Mar 2025 18:12:12 +0100
Subject: [PATCH 1/6] Enhance Mistral API integration and add Git workflow
tools
This commit includes several major improvements:
1. Mistral API Integration:
- Enhanced rate limit handling with detailed user messages
- Improved error management with quota warnings
- Added pattern detection and loop prevention
- Optimized streaming performance
2. Mistral Model Updates:
- Increased token limits to 32K for mistral-small-latest
- Added support for mistral-saba-latest model
- Enabled image support for mistral-small-latest
3. Git Workflow Enhancements:
- Added comprehensive Git workflow guide
- Created tidy-commits script for commit cleanup
- Added npm run tidy-commits command
4. Code Quality:
- Removed deprecated rateLimitSeconds concept
- Added extensive rate limiting tests
- Improved debugging tools for Mistral API
---
.vscode/settings.json | 3 +-
docs/git-workflow.md | 65 ++
package-lock.json | 38 +-
package.json | 88 ++-
scripts/tidy-commits.sh | 85 +++
src/api/providers/__tests__/mistral.test.ts | 316 +++++++++-
src/api/providers/base-provider.ts | 5 +
src/api/providers/mistral.ts | 581 ++++++++++++++++--
src/api/transform/mistral-format.ts | 75 ++-
src/core/Cline.ts | 6 +-
src/core/__tests__/Cline.rateLimiting.test.ts | 380 ++++++++++++
src/core/sliding-window/index.ts | 4 +-
src/core/webview/ClineProvider.ts | 1 -
src/shared/api.ts | 8 +
src/utils/promises.ts | 8 +
.../src/components/settings/ApiOptions.tsx | 55 +-
16 files changed, 1564 insertions(+), 154 deletions(-)
create mode 100644 docs/git-workflow.md
create mode 100644 scripts/tidy-commits.sh
create mode 100644 src/core/__tests__/Cline.rateLimiting.test.ts
create mode 100644 src/utils/promises.ts
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 16a5c02292d..9fbef2c0ca9 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -9,5 +9,6 @@
"dist": true // set this to false to include "dist" folder in search results
},
// Turn off tsc task auto detection since we have the necessary tasks as npm scripts
- "typescript.tsc.autoDetect": "off"
+ "typescript.tsc.autoDetect": "off",
+ "postman.settings.dotenv-detection-notification-visibility": false
}
diff --git a/docs/git-workflow.md b/docs/git-workflow.md
new file mode 100644
index 00000000000..a780947c8c3
--- /dev/null
+++ b/docs/git-workflow.md
@@ -0,0 +1,65 @@
+# Git Workflow Guide
+
+## Tidying Up Commits Before Pushing
+
+This project includes a helpful script to tidy up your commits before pushing them to a remote branch. This helps maintain a clean and meaningful commit history.
+
+### How to Use
+
+Run the following command when you want to clean up your commits:
+
+```bash
+npm run tidy-commits
+```
+
+This will:
+
+1. Check if you have unpushed commits on your current branch
+2. Start an interactive rebase session to let you organize those commits
+3. Guide you through the process with helpful instructions
+
+### Available Rebase Commands
+
+During the interactive rebase, you can use these commands:
+
+- `pick`: Use the commit as is
+- `reword`: Use the commit but edit the commit message
+- `edit`: Use the commit but stop for amending (allows splitting commits)
+- `squash`: Combine with the previous commit (keeps both commit messages)
+- `fixup`: Combine with the previous commit (discards this commit's message)
+- `exec`: Run a command using shell
+
+### Best Practices
+
+- **Squash related changes**: Combine multiple small commits that relate to a single feature
+- **Write clear commit messages**: Each commit should clearly describe what changed and why
+- **Keep commits focused**: Each commit should represent a single logical change
+- **Reorder commits**: Place related commits together for better readability
+
+### Example Workflow
+
+```bash
+# Make multiple commits while working
+git commit -m "Add new feature"
+git commit -m "Fix typo"
+git commit -m "Improve performance"
+
+# When ready to push, tidy up your commits first
+npm run tidy-commits
+
+# You'll see an editor with your commits listed:
+# pick abc123 Add new feature
+# pick def456 Fix typo
+# pick ghi789 Improve performance
+
+# You might change it to:
+# pick abc123 Add new feature
+# fixup def456 Fix typo
+# pick ghi789 Improve performance
+
+# Save and close the editor to complete the rebase
+# Then push your cleaned-up commits
+git push
+```
+
+Remember: Only rebase commits that haven't been pushed yet. Rebasing public history can cause problems for other contributors.
diff --git a/package-lock.json b/package-lock.json
index cd961b0a210..afdaaf6a4e1 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -14,7 +14,7 @@
"@aws-sdk/client-bedrock-runtime": "^3.706.0",
"@google-cloud/vertexai": "^1.9.3",
"@google/generative-ai": "^0.18.0",
- "@mistralai/mistralai": "^1.3.6",
+ "@mistralai/mistralai": "^1.5.2",
"@modelcontextprotocol/sdk": "^1.0.1",
"@types/clone-deep": "^4.0.4",
"@types/pdf-parse": "^1.1.4",
@@ -56,7 +56,7 @@
"tree-sitter-wasms": "^0.1.11",
"turndown": "^7.2.0",
"web-tree-sitter": "^0.22.6",
- "zod": "^3.23.8"
+ "zod": "^3.24.2"
},
"devDependencies": {
"@changesets/cli": "^2.27.10",
@@ -4069,13 +4069,25 @@
}
},
"node_modules/@mistralai/mistralai": {
- "version": "1.3.6",
- "resolved": "https://registry.npmjs.org/@mistralai/mistralai/-/mistralai-1.3.6.tgz",
- "integrity": "sha512-2y7U5riZq+cIjKpxGO9y417XuZv9CpBXEAvbjRMzWPGhXY7U1ZXj4VO4H9riS2kFZqTR2yLEKSE6/pGWVVIqgQ==",
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/@mistralai/mistralai/-/mistralai-1.5.2.tgz",
+ "integrity": "sha512-mBTIDQmuAX9RowMYteZFHJIYlEwDcHzzaxgXzrFtlvH9CkKXK7R1VnZ1sZSe+uLMg0dIXUVdPRUh1SwyFeSqXw==",
+ "dependencies": {
+ "zod-to-json-schema": "^3.24.1"
+ },
"peerDependencies": {
"zod": ">= 3"
}
},
+ "node_modules/@mistralai/mistralai/node_modules/zod-to-json-schema": {
+ "version": "3.24.3",
+ "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.3.tgz",
+ "integrity": "sha512-HIAfWdYIt1sssHfYZFCXp4rU1w2r8hVVXYIlmoa0r0gABLs5di3RCqPU5DDROogVz1pAdYBaz7HK5n9pSUNs3A==",
+ "license": "ISC",
+ "peerDependencies": {
+ "zod": "^3.24.1"
+ }
+ },
"node_modules/@mixmark-io/domino": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/@mixmark-io/domino/-/domino-2.2.0.tgz",
@@ -7141,6 +7153,15 @@
"devtools-protocol": "*"
}
},
+ "node_modules/chromium-bidi/node_modules/zod": {
+ "version": "3.23.8",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz",
+ "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/colinhacks"
+ }
+ },
"node_modules/ci-info": {
"version": "3.9.0",
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
@@ -15382,9 +15403,10 @@
}
},
"node_modules/zod": {
- "version": "3.23.8",
- "resolved": "https://registry.npmjs.org/zod/-/zod-3.23.8.tgz",
- "integrity": "sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==",
+ "version": "3.24.2",
+ "resolved": "https://registry.npmjs.org/zod/-/zod-3.24.2.tgz",
+ "integrity": "sha512-lY7CDW43ECgW9u1TcT3IoXHflywfVqDYze4waEz812jR/bZ8FHDsl7pFQoSZTz5N+2NqRXs8GBwnAwo3ZNxqhQ==",
+ "license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
diff --git a/package.json b/package.json
index d6ccae86edd..a4bd48ec63d 100644
--- a/package.json
+++ b/package.json
@@ -191,40 +191,65 @@
}
]
},
- "configuration": {
- "title": "Roo Code",
- "properties": {
- "roo-cline.allowedCommands": {
- "type": "array",
- "items": {
- "type": "string"
+ "configuration": [
+ {
+ "title": "Roo Code",
+ "properties": {
+ "roo-cline.allowedCommands": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [
+ "npm test",
+ "npm install",
+ "tsc",
+ "git log",
+ "git diff",
+ "git show"
+ ],
+ "description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
},
- "default": [
- "npm test",
- "npm install",
- "tsc",
- "git log",
- "git diff",
- "git show"
- ],
- "description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
- },
- "roo-cline.vsCodeLmModelSelector": {
- "type": "object",
- "properties": {
- "vendor": {
- "type": "string",
- "description": "The vendor of the language model (e.g. copilot)"
+ "roo-cline.vsCodeLmModelSelector": {
+ "type": "object",
+ "properties": {
+ "vendor": {
+ "type": "string",
+ "description": "The vendor of the language model (e.g. copilot)"
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the language model (e.g. gpt-4)"
+ }
},
- "family": {
- "type": "string",
- "description": "The family of the language model (e.g. gpt-4)"
- }
+ "description": "Settings for VSCode Language Model API"
+ }
+ }
+ },
+ {
+ "title": "Roo Code > Debug",
+ "properties": {
+ "roo-cline.debug.mistral": {
+ "type": "boolean",
+ "default": false,
+ "markdownDescription": "Enable debug logging for Mistral API interactions. Logs will appear in the 'Roo Code Mistral' output channel.",
+ "scope": "window"
},
- "description": "Settings for VSCode Language Model API"
+ "roo-cline.debug.mistralVerbose": {
+ "type": "boolean",
+ "default": false,
+ "markdownDescription": "Enable verbose debug logging for Mistral API interactions, including request/response details. Logs will appear in the 'Roo Code Mistral Verbose' output channel.",
+ "scope": "window"
+ },
+ "roo-cline.debug.mistralSdk": {
+ "type": "boolean",
+ "default": false,
+ "markdownDescription": "Enable debug logging for Mistral SDK internal operations. Logs will appear in the output channel.",
+ "scope": "window"
+ }
}
}
- }
+ ]
},
"scripts": {
"build": "npm run build:webview && npm run vsix",
@@ -252,6 +277,7 @@
"prepare": "husky",
"publish:marketplace": "vsce publish && ovsx publish",
"publish": "npm run build && changeset publish && npm install --package-lock-only",
+ "tidy-commits": "bash scripts/tidy-commits.sh",
"version-packages": "changeset version && npm install --package-lock-only",
"vscode:prepublish": "npm run package",
"vsix": "rimraf bin && mkdirp bin && npx vsce package --out bin",
@@ -269,7 +295,7 @@
"@aws-sdk/client-bedrock-runtime": "^3.706.0",
"@google-cloud/vertexai": "^1.9.3",
"@google/generative-ai": "^0.18.0",
- "@mistralai/mistralai": "^1.3.6",
+ "@mistralai/mistralai": "^1.5.2",
"@modelcontextprotocol/sdk": "^1.0.1",
"@types/clone-deep": "^4.0.4",
"@types/pdf-parse": "^1.1.4",
@@ -311,7 +337,7 @@
"tree-sitter-wasms": "^0.1.11",
"turndown": "^7.2.0",
"web-tree-sitter": "^0.22.6",
- "zod": "^3.23.8"
+ "zod": "^3.24.2"
},
"devDependencies": {
"@changesets/cli": "^2.27.10",
diff --git a/scripts/tidy-commits.sh b/scripts/tidy-commits.sh
new file mode 100644
index 00000000000..63ad2153670
--- /dev/null
+++ b/scripts/tidy-commits.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# tidy-commits.sh - A script to help clean up commits before pushing
+# This script offers an interactive rebase option to tidy up commits
+
+# Get the current branch name
+branch="$(git rev-parse --abbrev-ref HEAD)"
+remote_branch="origin/$branch"
+
+# Check if we're on main branch
+if [ "$branch" = "main" ]; then
+ echo "โ You're on the main branch. Please checkout a feature branch to tidy commits."
+ exit 1
+fi
+
+# Check if the branch exists on remote
+if git ls-remote --exit-code --heads origin "$branch" >/dev/null 2>&1; then
+ # Get the number of commits ahead of remote
+ ahead=$(git rev-list --count "$remote_branch".."$branch")
+
+ if [ "$ahead" -gt 0 ]; then
+ echo "-------------------------------------------------------------------------------------"
+ echo "๐งน You have $ahead commit(s) that will be pushed to the remote."
+ echo "Starting interactive rebase for the last $ahead commit(s)..."
+ echo ""
+ echo "You can:"
+ echo " - Reorder commits by changing their order"
+ echo " - Edit commit messages with 'reword'"
+ echo " - Combine commits with 'squash' or 'fixup'"
+ echo " - Split or edit commits with 'edit'"
+ echo " - Remove commits by deleting their lines"
+ echo "-------------------------------------------------------------------------------------"
+
+ # Start interactive rebase
+ git rebase -i HEAD~"$ahead"
+
+ # Check if rebase was successful
+ if [ $? -eq 0 ]; then
+ echo "โ Commits tidied up successfully!"
+ else
+ echo "โ Rebase was aborted or had conflicts. Original commits remain unchanged."
+ exit 1
+ fi
+ else
+ echo "No unpushed commits found on branch '$branch'."
+ fi
+else
+ # Branch doesn't exist on remote yet
+ # Count all commits on this branch
+ commit_count=$(git rev-list --count HEAD)
+
+ echo "-------------------------------------------------------------------------------------"
+ echo "๐งน This appears to be a new branch with $commit_count commit(s)."
+ echo "Would you like to tidy up your commits before the first push? (y/n)"
+ read -r answer
+
+ if [[ "$answer" =~ ^[Yy]$ ]]; then
+ # Find the fork point with main
+ fork_point=$(git merge-base HEAD main)
+ ahead=$(git rev-list --count $fork_point..HEAD)
+
+ echo "Starting interactive rebase for $ahead commit(s) since branching from main..."
+ echo ""
+ echo "You can:"
+ echo " - Reorder commits by changing their order"
+ echo " - Edit commit messages with 'reword'"
+ echo " - Combine commits with 'squash' or 'fixup'"
+ echo " - Split or edit commits with 'edit'"
+ echo " - Remove commits by deleting their lines"
+ echo "-------------------------------------------------------------------------------------"
+
+ # Start interactive rebase from the fork point
+ git rebase -i $fork_point
+
+ # Check if rebase was successful
+ if [ $? -eq 0 ]; then
+ echo "โ Commits tidied up successfully!"
+ else
+ echo "โ Rebase was aborted or had conflicts. Original commits remain unchanged."
+ exit 1
+ fi
+ else
+ echo "Skipping commit cleanup."
+ fi
+fi
\ No newline at end of file
diff --git a/src/api/providers/__tests__/mistral.test.ts b/src/api/providers/__tests__/mistral.test.ts
index 781cb3dcfc5..b1a349f9fa8 100644
--- a/src/api/providers/__tests__/mistral.test.ts
+++ b/src/api/providers/__tests__/mistral.test.ts
@@ -4,32 +4,62 @@ import { Anthropic } from "@anthropic-ai/sdk"
import { ApiStreamTextChunk } from "../../transform/stream"
// Mock Mistral client
-const mockCreate = jest.fn()
-jest.mock("@mistralai/mistralai", () => {
- return {
- Mistral: jest.fn().mockImplementation(() => ({
- chat: {
- stream: mockCreate.mockImplementation(async (options) => {
- const stream = {
- [Symbol.asyncIterator]: async function* () {
- yield {
- data: {
- choices: [
- {
- delta: { content: "Test response" },
- index: 0,
- },
- ],
+const mockStream = jest.fn()
+jest.mock("@mistralai/mistralai", () => ({
+ Mistral: jest.fn().mockImplementation(() => ({
+ chat: {
+ stream: mockStream.mockImplementation(async () => {
+ const response = {
+ headers: {},
+ status: 200,
+ statusText: "OK",
+ }
+
+ const streamResponse = {
+ response,
+ headers: response.headers,
+ status: response.status,
+ statusText: response.statusText,
+ [Symbol.asyncIterator]: async function* () {
+ yield {
+ data: {
+ choices: [
+ {
+ delta: { content: "Test response" },
+ index: 0,
+ },
+ ],
+ usage: {
+ promptTokens: 10,
+ completionTokens: 5,
+ totalTokens: 15,
},
- }
+ },
+ }
+ },
+ }
+
+ return streamResponse
+ }),
+ complete: jest.fn().mockResolvedValue({
+ choices: [
+ {
+ message: {
+ content: "Test response",
+ role: "assistant",
},
- }
- return stream
- }),
- },
- })),
- }
-})
+ index: 0,
+ },
+ ],
+ usage: {
+ promptTokens: 10,
+ completionTokens: 5,
+ totalTokens: 15,
+ },
+ }),
+ },
+ })),
+}))
describe("MistralHandler", () => {
let handler: MistralHandler
@@ -37,13 +67,13 @@ describe("MistralHandler", () => {
beforeEach(() => {
mockOptions = {
- apiModelId: "codestral-latest", // Update to match the actual model ID
+ apiModelId: "codestral-latest",
mistralApiKey: "test-api-key",
includeMaxTokens: true,
modelTemperature: 0,
}
handler = new MistralHandler(mockOptions)
- mockCreate.mockClear()
+ mockStream.mockClear()
})
describe("constructor", () => {
@@ -84,8 +114,13 @@ describe("MistralHandler", () => {
const systemPrompt = "You are a helpful assistant."
const messages: Anthropic.Messages.MessageParam[] = [
{
- role: "user",
- content: [{ type: "text", text: "Hello!" }],
+ role: "user" as const,
+ content: [
+ {
+ type: "text" as const,
+ text: "Hello!",
+ },
+ ],
},
]
@@ -93,7 +128,7 @@ describe("MistralHandler", () => {
const iterator = handler.createMessage(systemPrompt, messages)
const result = await iterator.next()
- expect(mockCreate).toHaveBeenCalledWith({
+ expect(mockStream).toHaveBeenCalledWith({
model: mockOptions.apiModelId,
messages: expect.any(Array),
maxTokens: expect.any(Number),
@@ -102,6 +137,7 @@ describe("MistralHandler", () => {
expect(result.value).toBeDefined()
expect(result.done).toBe(false)
+ expect(result.value).toEqual({ type: "text", text: "Test response" })
})
it("should handle streaming response correctly", async () => {
@@ -119,8 +155,226 @@ describe("MistralHandler", () => {
})
it("should handle errors gracefully", async () => {
- mockCreate.mockRejectedValueOnce(new Error("API Error"))
- await expect(handler.createMessage(systemPrompt, messages).next()).rejects.toThrow("API Error")
+ mockStream.mockRejectedValueOnce(new Error("API Error"))
+
+ const iterator = handler.createMessage(systemPrompt, messages)
+ await expect(iterator.next()).rejects.toThrow("API Error")
+ })
+
+ it("should handle stream errors", async () => {
+ mockStream.mockImplementationOnce(async () => ({
+ headers: {},
+ status: 200,
+ statusText: "OK",
+ [Symbol.asyncIterator]: async function* () {
+ throw new Error("Stream Error")
+ },
+ }))
+
+ const iterator = handler.createMessage(systemPrompt, messages)
+ await expect(iterator.next()).rejects.toThrow("Stream Error")
+ })
+ })
+
+ describe("error handling and retries", () => {
+ let handler: MistralHandler
+ let originalMockStream: jest.Mock
+
+ beforeEach(() => {
+ mockOptions = {
+ apiModelId: "codestral-latest",
+ mistralApiKey: "test-api-key",
+ includeMaxTokens: true,
+ modelTemperature: 0,
+ }
+ handler = new MistralHandler(mockOptions)
+
+ // Create a successful response function
+ const createSuccessResponse = async () => {
+ const response = {
+ headers: {},
+ status: 200,
+ statusText: "OK",
+ }
+
+ return {
+ response,
+ headers: response.headers,
+ status: response.status,
+ statusText: response.statusText,
+ [Symbol.asyncIterator]: async function* () {
+ yield {
+ data: {
+ choices: [
+ {
+ delta: { content: "Test response" },
+ index: 0,
+ },
+ ],
+ usage: {
+ promptTokens: 10,
+ completionTokens: 5,
+ totalTokens: 15,
+ },
+ },
+ }
+ },
+ }
+ }
+
+ // Store the mock implementation directly
+ originalMockStream = jest.fn().mockImplementation(createSuccessResponse)
+ mockStream.mockImplementation(originalMockStream)
+ mockStream.mockClear()
+ })
+
+ it("should handle rate limit errors and retry", async () => {
+ // Mock rate limit error on first call, then succeed
+ let callCount = 0
+ mockStream.mockImplementation(async () => {
+ callCount++
+ if (callCount === 1) {
+ const error = new Error("You have been rate limited. Please retry after 2 seconds")
+ error.name = "RateLimitError"
+ throw error
+ }
+ // Call the original mock implementation
+ return originalMockStream()
+ })
+
+ const systemPrompt = "You are a helpful assistant."
+ const messages: Anthropic.Messages.MessageParam[] = [
+ {
+ role: "user" as const,
+ content: [
+ {
+ type: "text" as const,
+ text: "Hello!",
+ },
+ ],
+ },
+ ]
+
+ const iterator = handler.createMessage(systemPrompt, messages)
+ const result = await iterator.next()
+
+ expect(mockStream).toHaveBeenCalledTimes(2)
+ expect(result.value).toEqual({ type: "text", text: "Test response" })
+ })
+
+ it("should handle general API errors and retry with exponential backoff", async () => {
+ // Mock general error on first call, then succeed
+ let callCount = 0
+ mockStream.mockImplementation(async () => {
+ callCount++
+ if (callCount === 1) {
+ throw new Error("Temporary API error")
+ }
+ // Call the function directly instead of the mock
+ return originalMockStream()
+ })
+
+ const systemPrompt = "You are a helpful assistant."
+ const messages: Anthropic.Messages.MessageParam[] = [
+ {
+ role: "user" as const,
+ content: [
+ {
+ type: "text" as const,
+ text: "Hello!",
+ },
+ ],
+ },
+ ]
+
+ const iterator = handler.createMessage(systemPrompt, messages)
+ const result = await iterator.next()
+
+ expect(mockStream).toHaveBeenCalledTimes(2)
+ expect(result.value).toEqual({ type: "text", text: "Test response" })
+ })
+
+ it("should throw authentication errors without retrying", async () => {
+ mockStream.mockImplementation(async () => {
+ const error = new Error("Invalid authentication")
+ error.name = "AuthenticationError"
+ throw error
+ })
+
+ const systemPrompt = "You are a helpful assistant."
+ const messages: Anthropic.Messages.MessageParam[] = [
+ {
+ role: "user" as const,
+ content: [
+ {
+ type: "text" as const,
+ text: "Hello!",
+ },
+ ],
+ },
+ ]
+
+ const iterator = handler.createMessage(systemPrompt, messages)
+ await expect(iterator.next()).rejects.toThrow("authentication")
+ })
+ })
+
+ describe("base URL selection", () => {
+ it("should use codestral URL for codestral models", () => {
+ const handler = new MistralHandler({
+ apiModelId: "codestral-latest",
+ mistralApiKey: "test-api-key",
+ })
+
+ // We can't directly test private methods, but we can test the behavior
+ // indirectly by checking if the correct model is used
+ expect(handler.getModel().id).toBe("codestral-latest")
+ })
+
+ it("should use custom codestral URL if provided", () => {
+ const customUrl = "https://custom-codestral.example.com"
+ const handler = new MistralHandler({
+ apiModelId: "codestral-latest",
+ mistralApiKey: "test-api-key",
+ mistralCodestralUrl: customUrl,
+ })
+
+ expect(handler.getModel().id).toBe("codestral-latest")
+ })
+
+ it("should use standard Mistral URL for non-codestral models", () => {
+ const handler = new MistralHandler({
+ apiModelId: "mistral-large-latest",
+ mistralApiKey: "test-api-key",
+ })
+
+ expect(handler.getModel().id).toBe("mistral-large-latest")
+ })
+ })
+
+ describe("completePrompt", () => {
+ let handler: MistralHandler
+
+ beforeEach(() => {
+ mockOptions = {
+ apiModelId: "codestral-latest",
+ mistralApiKey: "test-api-key",
+ includeMaxTokens: true,
+ modelTemperature: 0,
+ }
+ handler = new MistralHandler(mockOptions)
+ mockStream.mockClear()
+ })
+
+ it("should complete prompt successfully", async () => {
+ const result = await handler.completePrompt("Test prompt")
+ expect(result).toBe("Test response")
+ })
+
+ it("should handle errors in completePrompt", async () => {
+ jest.spyOn(handler["client"].chat, "complete").mockRejectedValueOnce(new Error("API Error"))
+
+ await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Mistral completion error: API Error")
})
})
})
diff --git a/src/api/providers/base-provider.ts b/src/api/providers/base-provider.ts
index 34156e4adfe..8e99941f518 100644
--- a/src/api/providers/base-provider.ts
+++ b/src/api/providers/base-provider.ts
@@ -11,7 +11,12 @@ const TOKEN_FUDGE_FACTOR = 1.5
/**
* Base class for API providers that implements common functionality
*/
+import * as vscode from "vscode"
+
export abstract class BaseProvider implements ApiHandler {
+ // Add the property with a default value of false
+ readonly hasBuiltInRateLimiting: boolean = false
+
// Cache the Tiktoken encoder instance since it's stateless
private encoder: Tiktoken | null = null
abstract createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
diff --git a/src/api/providers/mistral.ts b/src/api/providers/mistral.ts
index 38f753c2610..43f8a9db6f9 100644
--- a/src/api/providers/mistral.ts
+++ b/src/api/providers/mistral.ts
@@ -1,25 +1,130 @@
import { Anthropic } from "@anthropic-ai/sdk"
import { Mistral } from "@mistralai/mistralai"
+import { ChatCompletionStreamRequest } from "@mistralai/mistralai/models/components"
import { SingleCompletionHandler } from "../"
-import {
- ApiHandlerOptions,
- mistralDefaultModelId,
- MistralModelId,
- mistralModels,
- ModelInfo,
- openAiNativeDefaultModelId,
- OpenAiNativeModelId,
- openAiNativeModels,
-} from "../../shared/api"
+import { ApiHandlerOptions, mistralDefaultModelId, MistralModelId, mistralModels, ModelInfo } from "../../shared/api"
import { convertToMistralMessages } from "../transform/mistral-format"
-import { ApiStream } from "../transform/stream"
+import { ApiStreamChunk } from "../transform/stream"
import { BaseProvider } from "./base-provider"
+import * as vscode from "vscode"
+import { logger } from "../../utils/logging"
+import { calculateApiCostOpenAI } from "../../utils/cost"
+
+// Create a custom debug logger that integrates with our existing logging system
+const createDebugLogger = (outputChannel?: vscode.OutputChannel, enableDebug?: boolean) => ({
+ debug: (...args: any[]) => {
+ if (enableDebug && outputChannel) {
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code Debug] ${message}`)
+ }
+ },
+ info: (...args: any[]) => {
+ if (enableDebug && outputChannel) {
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code Info] ${message}`)
+ }
+ },
+ warn: (...args: any[]) => {
+ if (enableDebug && outputChannel) {
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code Warning] ${message}`)
+ }
+ },
+ error: (...args: any[]) => {
+ if (outputChannel) {
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code Error] ${message}`)
+ }
+ },
+ // Add missing methods required by Mistral SDK Logger interface
+ log: (...args: any[]) => {
+ if (enableDebug && outputChannel) {
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code Log] ${message}`)
+ }
+ },
+ group: (...args: any[]) => {
+ if (enableDebug && outputChannel) {
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code Group] ${message}`)
+ }
+ },
+ groupEnd: () => {
+ if (enableDebug && outputChannel) {
+ outputChannel.appendLine(`[Roo Code GroupEnd]`)
+ }
+ },
+ logts: (...args: any[]) => {
+ if (enableDebug && outputChannel) {
+ const timestamp = new Date().toISOString()
+ const message = args
+ .map((arg) => (typeof arg === "object" ? JSON.stringify(arg, null, 2) : String(arg)))
+ .join(" ")
+ outputChannel.appendLine(`[Roo Code ${timestamp}] ${message}`)
+ }
+ },
+})
const MISTRAL_DEFAULT_TEMPERATURE = 0
+const MAX_RETRIES = 3 // Maximum number of retries for failed requests - const until a ui setting
+const INITIAL_RETRY_DELAY = 1000 // Initial retry delay in milliseconds
+const MAX_RETRY_DELAY = 32000 // Maximum retry delay in milliseconds
+const JITTER_FACTOR = 0.2 // Jitter factor for randomization (20%)
+
+// Define default headers
+export const defaultHeaders = {
+ "HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
+ "X-Title": "Roo Code",
+}
+
+interface TextContent {
+ type: "text"
+ text: string
+}
+
+interface ImageURLContent {
+ type: "image_url"
+ url: string
+}
+
+type MistralContent = string | (TextContent | ImageURLContent)[]
+
+interface MistralErrorResponse {
+ error: {
+ message: string
+ type: string
+ code: number
+ }
+}
export class MistralHandler extends BaseProvider implements SingleCompletionHandler {
+ // Add a new property to indicate this handler has built-in rate limiting
+ override readonly hasBuiltInRateLimiting: boolean = true
+
protected options: ApiHandlerOptions
private client: Mistral
+ private readonly enableDebugOutput: boolean
+ private readonly outputChannel?: vscode.OutputChannel
+ private readonly enableVerboseDebug: boolean
+ private readonly verboseOutputChannel?: vscode.OutputChannel
+ private readonly enableSdkDebug: boolean
+ private cachedModel: { id: MistralModelId; info: ModelInfo; forModelId: string | undefined } | null = null
+ private static readonly outputChannelName = "Roo Code Mistral"
+ private static readonly verboseOutputChannelName = "Roo Code Mistral Verbose"
+ private static sharedOutputChannel: vscode.OutputChannel | undefined
+ private static sharedVerboseOutputChannel: vscode.OutputChannel | undefined
constructor(options: ApiHandlerOptions) {
super()
@@ -27,71 +132,383 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
throw new Error("Mistral API key is required")
}
- // Set default model ID if not provided
+ this.cachedModel = null
this.options = {
...options,
apiModelId: options.apiModelId || mistralDefaultModelId,
}
+ try {
+ const config = vscode.workspace.getConfiguration("roo-cline")
+ this.enableDebugOutput = config?.get("debug.mistral") || false
+ this.enableVerboseDebug = config?.get("debug.mistralVerbose") || false
+ this.enableSdkDebug = config?.get("debug.mistralSdk") || false
+ } catch {
+ this.enableDebugOutput = false
+ this.enableVerboseDebug = false
+ this.enableSdkDebug = false
+ }
+
+ if (this.enableDebugOutput) {
+ try {
+ if (!MistralHandler.sharedOutputChannel) {
+ MistralHandler.sharedOutputChannel = vscode.window.createOutputChannel(
+ MistralHandler.outputChannelName,
+ )
+ }
+ this.outputChannel = MistralHandler.sharedOutputChannel
+ } catch {
+ // Ignore output channel creation errors in tests
+ }
+ }
+
+ if (this.enableVerboseDebug) {
+ try {
+ if (!MistralHandler.sharedVerboseOutputChannel) {
+ MistralHandler.sharedVerboseOutputChannel = vscode.window.createOutputChannel(
+ MistralHandler.verboseOutputChannelName,
+ )
+ }
+ this.verboseOutputChannel = MistralHandler.sharedVerboseOutputChannel
+ } catch {
+ // Ignore output channel creation errors in tests
+ }
+ }
+
const baseUrl = this.getBaseUrl()
- console.debug(`[Roo Code] MistralHandler using baseUrl: ${baseUrl}`)
+ this.logDebug(`MistralHandler using baseUrl: ${baseUrl}`)
+
+ // Create custom debug logger that integrates with our logging system
+ const debugLogger = this.enableSdkDebug
+ ? createDebugLogger(this.enableVerboseDebug ? this.verboseOutputChannel : this.outputChannel, true)
+ : undefined
+
+ // Initialize the Mistral client
this.client = new Mistral({
- serverURL: baseUrl,
apiKey: this.options.mistralApiKey,
+ debugLogger,
+ serverURL: baseUrl,
})
}
+ private logDebug(...messages: (string | object)[]): void {
+ if (this.enableDebugOutput && this.outputChannel) {
+ const formattedMessages = messages
+ .map((msg) => (typeof msg === "object" ? JSON.stringify(msg, null, 2) : msg))
+ .join(" ")
+ this.outputChannel.appendLine(`[Roo Code] ${formattedMessages}`)
+ }
+ }
+
+ private logVerbose(...messages: (string | object)[]): void {
+ if (this.enableVerboseDebug && this.verboseOutputChannel) {
+ const formattedMessages = messages
+ .map((msg) => (typeof msg === "object" ? JSON.stringify(msg, null, 2) : msg))
+ .join(" ")
+ this.verboseOutputChannel.appendLine(`[Roo Code] ${new Date().toISOString()} ${formattedMessages}`)
+ }
+ }
+
private getBaseUrl(): string {
const modelId = this.options.apiModelId ?? mistralDefaultModelId
- console.debug(`[Roo Code] MistralHandler using modelId: ${modelId}`)
+ this.logDebug(`MistralHandler using modelId: ${modelId}`)
if (modelId?.startsWith("codestral-")) {
return this.options.mistralCodestralUrl || "https://codestral.mistral.ai"
}
return "https://api.mistral.ai"
}
- override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
- const response = await this.client.chat.stream({
- model: this.options.apiModelId || mistralDefaultModelId,
- messages: [{ role: "system", content: systemPrompt }, ...convertToMistralMessages(messages)],
- maxTokens: this.options.includeMaxTokens ? this.getModel().info.maxTokens : undefined,
- temperature: this.options.modelTemperature ?? MISTRAL_DEFAULT_TEMPERATURE,
+ private exponentialBackoff(retryCount: number): number {
+ const delay = Math.min(
+ INITIAL_RETRY_DELAY * Math.pow(2, retryCount) * (1 + JITTER_FACTOR * Math.random()),
+ MAX_RETRY_DELAY,
+ )
+ this.logDebug(`Calculated backoff delay: ${delay}ms for retry ${retryCount}`)
+ return delay
+ }
+
+ private async handleRateLimitError(error: Error): Promise {
+ const retryAfterMatch = error.message.match(/retry after (\d+)/i)
+ const retryAfter = retryAfterMatch ? parseInt(retryAfterMatch[1], 10) * 1000 : 60000 // Convert to ms or default to 1 minute
+
+ logger.warn("Mistral rate limit hit", {
+ ctx: "mistral",
+ retryAfterMs: retryAfter,
+ errorMessage: error.message,
})
- for await (const chunk of response) {
- const delta = chunk.data.choices[0]?.delta
- if (delta?.content) {
- let content: string = ""
- if (typeof delta.content === "string") {
- content = delta.content
- } else if (Array.isArray(delta.content)) {
- content = delta.content.map((c) => (c.type === "text" ? c.text : "")).join("")
+ this.logDebug(`Rate limit hit. Waiting ${retryAfter}ms before retry`)
+ await new Promise((resolve) => setTimeout(resolve, retryAfter))
+ }
+
+ private async retryWithBackoff(operation: () => Promise): Promise {
+ let retryCount = 0
+
+ while (true) {
+ try {
+ const result = await operation()
+
+ // Check if result is a Response object and has status code
+ if (result && typeof result === "object" && "status" in result && result.status === 429) {
+ // Handle rate limit response
+ await this.handleRateLimit(result as unknown as Response)
+ const backoffDelay = this.exponentialBackoff(retryCount)
+ await new Promise((resolve) => setTimeout(resolve, backoffDelay))
+ retryCount++
+ continue
}
- yield {
- type: "text",
- text: content,
+
+ return result
+ } catch (error) {
+ if (retryCount >= MAX_RETRIES) {
+ this.logDebug(`Maximum retry count (${MAX_RETRIES}) reached, giving up`)
+ throw error
}
- }
- if (chunk.data.usage) {
- yield {
- type: "usage",
- inputTokens: chunk.data.usage.promptTokens || 0,
- outputTokens: chunk.data.usage.completionTokens || 0,
+ const isRateLimit = error instanceof Error && error.message.includes("rate limit")
+
+ if (isRateLimit) {
+ await this.handleRateLimitError(error)
+ } else {
+ const backoffDelay = this.exponentialBackoff(retryCount)
+ this.logDebug(
+ `Retrying operation after ${backoffDelay}ms (attempt ${retryCount + 1}/${MAX_RETRIES})`,
+ )
+ this.logVerbose(`Retry reason:`, error)
+ logger.warn("Mistral API retry", {
+ ctx: "mistral",
+ retryCount: retryCount + 1,
+ backoffDelay,
+ error: error instanceof Error ? error.message : String(error),
+ })
+ await new Promise((resolve) => setTimeout(resolve, backoffDelay))
}
+
+ retryCount++
+ }
+ }
+ }
+
+ private abortController?: AbortController
+
+ private cleanup(): void {
+ if (this.abortController) {
+ this.abortController.abort()
+ this.abortController = undefined
+ }
+ }
+
+ private handleError(error: unknown): never {
+ // Convert error to a string or object before logging
+ const errorForLogging = error instanceof Error ? { message: error.message, stack: error.stack } : String(error)
+
+ this.logDebug("Mistral API error:", errorForLogging)
+
+ if (error instanceof Error) {
+ // Check for specific Mistral API error types
+ if (error.message.includes("rate limit")) {
+ this.handleRateLimitError(error)
+ } else if (error.message.includes("authentication")) {
+ logger.error("Mistral authentication error", { ctx: "mistral", error: errorForLogging })
+ throw new Error(`Mistral API authentication error: ${error.message}`)
+ } else if (error.message.includes("invalid model")) {
+ logger.error("Mistral invalid model error", { ctx: "mistral", error: errorForLogging })
+ throw new Error(`Mistral API model error: ${error.message}`)
+ } else if (error.message.includes("context length")) {
+ logger.error("Mistral context length error", { ctx: "mistral", error: errorForLogging })
+ throw new Error(`Mistral API context length error: ${error.message}`)
+ } else if (error.message.includes("timeout")) {
+ logger.error("Mistral timeout error", { ctx: "mistral", error: errorForLogging })
+ throw new Error(`Mistral API timeout: ${error.message}`)
+ } else {
+ logger.error("Mistral general error", { ctx: "mistral", error: errorForLogging })
+ throw new Error(`Mistral API error: ${error.message}`)
}
}
+
+ logger.error("Mistral unknown error", { ctx: "mistral", error: String(error) })
+ throw new Error(`Mistral API error: ${String(error)}`)
}
override getModel(): { id: MistralModelId; info: ModelInfo } {
- const modelId = this.options.apiModelId
- if (modelId && modelId in mistralModels) {
- const id = modelId as MistralModelId
- return { id, info: mistralModels[id] }
+ if (this.cachedModel && this.cachedModel.forModelId === this.options.apiModelId) {
+ this.logDebug(`Using cached model: ${this.cachedModel.id}`)
+ return {
+ id: this.cachedModel.id,
+ info: this.cachedModel.info,
+ }
}
- return {
+
+ if (this.options.apiModelId && this.options.apiModelId in mistralModels) {
+ const id = this.options.apiModelId as MistralModelId
+ this.logDebug(`Using model: ${id}`)
+ this.cachedModel = {
+ id,
+ info: mistralModels[id],
+ forModelId: this.options.apiModelId,
+ }
+ return {
+ id: this.cachedModel.id,
+ info: this.cachedModel.info,
+ }
+ }
+
+ this.logDebug(`Using default model: ${mistralDefaultModelId}`)
+ this.cachedModel = {
id: mistralDefaultModelId,
info: mistralModels[mistralDefaultModelId],
+ forModelId: undefined,
+ }
+ return {
+ id: this.cachedModel.id,
+ info: this.cachedModel.info,
+ }
+ }
+
+ override async *createMessage(
+ systemPrompt: string,
+ messages: Anthropic.Messages.MessageParam[],
+ ): AsyncGenerator {
+ try {
+ this.logDebug(`Creating message with system prompt: ${systemPrompt}`)
+
+ // Clean up any existing state
+ this.cleanup()
+ this.abortController = new AbortController()
+ const signal = this.abortController.signal
+
+ let hasYieldedUsage = false
+ const stream = await this.retryWithBackoff(async () => {
+ if (signal.aborted) {
+ throw new Error("Stream aborted before start")
+ }
+
+ // Set up stream options with required parameters
+ const streamOptions: ChatCompletionStreamRequest = {
+ model: this.getModel().id,
+ messages: [{ role: "system", content: systemPrompt }, ...convertToMistralMessages(messages)] as any, // Type assertion to bypass type checking
+ temperature: this.options.modelTemperature ?? MISTRAL_DEFAULT_TEMPERATURE,
+ stream: true,
+ } as any // Type assertion to bypass type checking
+
+ // Add return_usage parameter (not in TypeScript definition but supported by API)
+ ;(streamOptions as any).return_usage = true
+
+ // Create stream with abort handling
+ const stream = await this.client.chat.stream(streamOptions)
+
+ // Set up abort handler
+ signal.addEventListener("abort", () => {
+ this.logDebug("Stream aborted by user")
+ this.cleanup()
+ })
+
+ return stream
+ })
+
+ this.logDebug("Stream connection established")
+
+ try {
+ for await (const chunk of stream) {
+ if (signal.aborted) {
+ this.logDebug("Stream aborted during processing")
+ return
+ }
+
+ // Log chunk details in verbose mode
+ if (this.enableVerboseDebug) {
+ this.logVerbose(`Chunk received: ${JSON.stringify(chunk, null, 2)}`)
+ }
+
+ // Handle content chunks and completion signals
+ if (chunk.data.choices?.[0]?.delta) {
+ const delta = chunk.data.choices[0].delta
+
+ // Check for finish reason (completion signal)
+ if (chunk.data.choices[0].finishReason === "stop") {
+ this.logDebug("Received completion signal with finishReason: stop")
+
+ // Check for usage metrics in the final chunk
+ if (chunk.data.usage && !hasYieldedUsage) {
+ hasYieldedUsage = true
+ const inputTokens = chunk.data.usage.promptTokens || 0
+ const outputTokens = chunk.data.usage.completionTokens || 0
+
+ // Only log detailed metrics when debug is enabled
+ if (this.enableDebugOutput) {
+ const modelInfo = this.getModel().info
+ const totalCost = calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens)
+ const timestamp = new Date().toISOString()
+ this.logDebug(
+ `[${timestamp}] Usage metrics - Input tokens: ${inputTokens}, Output tokens: ${outputTokens}, Cost: $${totalCost.toFixed(6)}`,
+ )
+ }
+ yield {
+ type: "usage",
+ inputTokens: inputTokens,
+ outputTokens: outputTokens,
+ }
+ }
+
+ // Yield an empty text chunk to signal completion
+ yield { type: "text", text: "" }
+ continue
+ }
+
+ // Process content if it exists
+ if (delta.content !== undefined) {
+ let content: string = ""
+ if (typeof delta.content === "string") {
+ content = delta.content
+ } else if (Array.isArray(delta.content)) {
+ content = delta.content.map((c) => (c.type === "text" ? c.text : "")).join("")
+ }
+
+ if (content) {
+ this.logDebug(`Received content: "${content}"`)
+ yield { type: "text", text: content }
+ }
+ }
+ }
+
+ // Handle usage metrics
+ if (chunk.data.usage && !hasYieldedUsage) {
+ hasYieldedUsage = true
+ const inputTokens = chunk.data.usage.promptTokens || 0
+ const outputTokens = chunk.data.usage.completionTokens || 0
+ const modelInfo = this.getModel().info
+ const totalCost = calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens)
+
+ // Only log detailed metrics when debug is enabled
+ if (this.enableDebugOutput) {
+ const timestamp = new Date().toISOString()
+ this.logDebug(
+ `[${timestamp}] Usage metrics - Input tokens: ${inputTokens}, Output tokens: ${outputTokens}, Cost: $${totalCost.toFixed(6)}`,
+ )
+ }
+
+ yield {
+ type: "usage",
+ inputTokens: chunk.data.usage.promptTokens || 0,
+ outputTokens: chunk.data.usage.completionTokens || 0,
+ }
+ }
+ }
+
+ this.cleanup()
+ this.logDebug("Stream completed successfully")
+ } catch (error) {
+ this.cleanup()
+ if (signal.aborted) {
+ this.logDebug("Stream aborted due to error:", error)
+ return
+ }
+ this.logDebug("Stream error occurred:", error)
+ throw error
+ }
+ } catch (error) {
+ this.cleanup()
+ this.handleError(error)
}
}
@@ -115,4 +532,82 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
throw error
}
}
+
+ /**
+ * Handle rate limit errors by extracting details from the HTTP response and notifying the user.
+ *
+ * @param response The HTTP response object
+ */
+ protected async handleRateLimit(response: Response): Promise {
+ this.logDebug(`Handling rate limit response: ${response.status} ${response.statusText}`)
+
+ try {
+ // Extract all rate limit headers
+ const rateLimitLimitMinute = response.headers.get("x-ratelimit-limit-minute")
+ const rateLimitRemainingMinute = response.headers.get("x-ratelimit-remaining-minute")
+ const rateLimitLimitDay = response.headers.get("x-ratelimit-limit-day")
+ const rateLimitRemainingDay = response.headers.get("x-ratelimit-remaining-day")
+ const rateLimitReset = response.headers.get("x-ratelimit-reset")
+ const retryAfter = response.headers.get("retry-after")
+
+ // Log all headers for debugging
+ response.headers.forEach((value, key) => {
+ this.logDebug(`${key}: ${value}`)
+ })
+
+ // Try to get error message from response body
+ const message = await response
+ .clone()
+ .json()
+ .then((data) => {
+ this.logDebug(`Rate limit response body: ${JSON.stringify(data)}`)
+ return (data as MistralErrorResponse).error?.message || "Rate limit exceeded"
+ })
+ .catch((err) => {
+ this.logDebug(`Failed to parse rate limit response body: ${err}`)
+ return "Rate limit exceeded"
+ })
+
+ // Format a clear user message based on available rate limit information
+ if (rateLimitRemainingMinute !== null && rateLimitLimitMinute !== null) {
+ const remainingMinute = parseInt(rateLimitRemainingMinute, 10)
+ const limitMinute = parseInt(rateLimitLimitMinute, 10)
+ const remainingDay = rateLimitRemainingDay ? parseInt(rateLimitRemainingDay, 10) : null
+ const limitDay = rateLimitLimitDay ? parseInt(rateLimitLimitDay, 10) : null
+
+ // Calculate percentage of limits remaining
+ const percentRemainingMinute = (remainingMinute / limitMinute) * 100
+ const percentRemainingDay =
+ remainingDay !== null && limitDay !== null ? (remainingDay / limitDay) * 100 : null
+
+ let userMessage = `${message}. `
+
+ if (remainingMinute <= 0) {
+ const waitTime = rateLimitReset ? `${rateLimitReset} seconds` : "some time"
+ userMessage += `Minute limit reached (${limitMinute} requests). Please wait ${waitTime} before trying again.`
+ vscode.window.showErrorMessage(userMessage)
+ } else if (percentRemainingMinute <= 20) {
+ // Warning for approaching minute limit
+ userMessage += `โ ๏ธ Approaching minute limit: ${remainingMinute}/${limitMinute} requests remaining (${percentRemainingMinute.toFixed(0)}%).`
+ vscode.window.showWarningMessage(userMessage)
+ } else if (percentRemainingDay !== null && percentRemainingDay <= 10) {
+ // Warning for approaching daily limit
+ userMessage += `โ ๏ธ Approaching daily limit: ${remainingDay}/${limitDay} requests remaining (${percentRemainingDay.toFixed(0)}%).`
+ vscode.window.showWarningMessage(userMessage)
+ } else {
+ // Just log the current status without showing a notification
+ this.logDebug(
+ `Rate limit status: ${remainingMinute}/${limitMinute} minute requests, ${remainingDay}/${limitDay} daily requests`,
+ )
+ }
+ } else if (retryAfter) {
+ vscode.window.showErrorMessage(`${message}. Retry after ${retryAfter} seconds.`)
+ } else {
+ vscode.window.showErrorMessage(message)
+ }
+ } catch (error) {
+ this.logDebug(`Error handling rate limit: ${error}`)
+ vscode.window.showErrorMessage("Rate limit exceeded")
+ }
+ }
}
diff --git a/src/api/transform/mistral-format.ts b/src/api/transform/mistral-format.ts
index baf81ef24d2..2ff7d431785 100644
--- a/src/api/transform/mistral-format.ts
+++ b/src/api/transform/mistral-format.ts
@@ -21,6 +21,7 @@ export function convertToMistralMessages(anthropicMessages: Anthropic.Messages.M
})
} else {
if (anthropicMessage.role === "user") {
+ // Handle user messages with potential tool results
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[]
toolMessages: Anthropic.ToolResultBlockParam[]
@@ -30,12 +31,31 @@ export function convertToMistralMessages(anthropicMessages: Anthropic.Messages.M
acc.toolMessages.push(part)
} else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part)
- } // user cannot send tool_use messages
+ }
return acc
},
{ nonToolMessages: [], toolMessages: [] },
)
+ // First add any tool results
+ for (const toolMsg of toolMessages) {
+ const content =
+ typeof toolMsg.content === "string"
+ ? toolMsg.content
+ : toolMsg.content?.map((c) => (c.type === "text" ? c.text : "")).join("\n")
+
+ if (content) {
+ mistralMessages.push({
+ role: "tool",
+ content: JSON.stringify({
+ tool_use_id: toolMsg.tool_use_id,
+ content,
+ }),
+ })
+ }
+ }
+
+ // Then add the user message if there are non-tool messages
if (nonToolMessages.length > 0) {
mistralMessages.push({
role: "user",
@@ -53,6 +73,7 @@ export function convertToMistralMessages(anthropicMessages: Anthropic.Messages.M
})
}
} else if (anthropicMessage.role === "assistant") {
+ // Handle assistant messages with potential tool uses
const { nonToolMessages, toolMessages } = anthropicMessage.content.reduce<{
nonToolMessages: (Anthropic.TextBlockParam | Anthropic.ImageBlockParam)[]
toolMessages: Anthropic.ToolUseBlockParam[]
@@ -62,28 +83,50 @@ export function convertToMistralMessages(anthropicMessages: Anthropic.Messages.M
acc.toolMessages.push(part)
} else if (part.type === "text" || part.type === "image") {
acc.nonToolMessages.push(part)
- } // assistant cannot send tool_result messages
+ }
return acc
},
{ nonToolMessages: [], toolMessages: [] },
)
- let content: string | undefined
- if (nonToolMessages.length > 0) {
- content = nonToolMessages
- .map((part) => {
- if (part.type === "image") {
- return "" // impossible as the assistant cannot send images
- }
- return part.text
+ // Convert text content
+ let textContent = nonToolMessages
+ .map((part) => {
+ if (part.type === "image") return ""
+ return part.text
+ })
+ .filter(Boolean)
+ .join("\n")
+
+ // Add tool uses as structured content
+ if (toolMessages.length > 0) {
+ // If there's text content, add it first
+ if (textContent) {
+ mistralMessages.push({
+ role: "assistant",
+ content: textContent,
})
- .join("\n")
- }
+ }
- mistralMessages.push({
- role: "assistant",
- content,
- })
+ // Add each tool use as a separate message
+ for (const toolMsg of toolMessages) {
+ mistralMessages.push({
+ role: "assistant",
+ content: JSON.stringify({
+ type: "function",
+ name: toolMsg.name,
+ id: toolMsg.id,
+ input: toolMsg.input,
+ }),
+ })
+ }
+ } else if (textContent) {
+ // If no tools but we have text, add it
+ mistralMessages.push({
+ role: "assistant",
+ content: textContent,
+ })
+ }
}
}
}
diff --git a/src/core/Cline.ts b/src/core/Cline.ts
index 5bc280dc423..b847708039c 100644
--- a/src/core/Cline.ts
+++ b/src/core/Cline.ts
@@ -1050,8 +1050,10 @@ export class Cline {
let rateLimitDelay = 0
- // Only apply rate limiting if this isn't the first request
- if (this.lastApiRequestTime) {
+ // Only apply rate limiting if this isn't the first request AND the API handler doesn't have built-in rate limiting
+ const hasBuiltInRateLimiting = "hasBuiltInRateLimiting" in this.api && this.api.hasBuiltInRateLimiting === true
+
+ if (this.lastApiRequestTime && !hasBuiltInRateLimiting) {
const now = Date.now()
const timeSinceLastRequest = now - this.lastApiRequestTime
const rateLimit = rateLimitSeconds || 0
diff --git a/src/core/__tests__/Cline.rateLimiting.test.ts b/src/core/__tests__/Cline.rateLimiting.test.ts
new file mode 100644
index 00000000000..7b8ce9863df
--- /dev/null
+++ b/src/core/__tests__/Cline.rateLimiting.test.ts
@@ -0,0 +1,380 @@
+import { Cline } from "../Cline"
+import { BaseProvider } from "../../api/providers/base-provider"
+import { delay } from "../../utils/promises"
+
+// Mock dependencies
+jest.mock("../../utils/promises", () => ({
+ delay: jest.fn().mockResolvedValue(undefined),
+}))
+
+// Mock fs-related modules
+jest.mock("fs/promises", () => ({
+ mkdir: jest.fn().mockResolvedValue(undefined),
+ writeFile: jest.fn().mockResolvedValue(undefined),
+ readFile: jest.fn().mockResolvedValue("[]"),
+ unlink: jest.fn().mockResolvedValue(undefined),
+ stat: jest.fn().mockResolvedValue({ size: 0 }),
+ readdir: jest.fn().mockResolvedValue([]),
+}))
+
+// Mock vscode with more detailed implementation
+jest.mock("vscode", () => {
+ const mockEventEmitter = {
+ event: jest.fn(),
+ fire: jest.fn(),
+ }
+
+ return {
+ window: {
+ showInformationMessage: jest.fn(),
+ showWarningMessage: jest.fn(),
+ showErrorMessage: jest.fn(),
+ // ... other existing mock properties
+ },
+ // Add RelativePattern class mock
+ RelativePattern: jest.fn().mockImplementation((base, pattern) => ({
+ base,
+ pattern,
+ })),
+ // ... rest of your existing mock
+ workspace: {
+ // Add this if not already present
+ createFileSystemWatcher: jest.fn().mockReturnValue({
+ onDidChange: jest.fn().mockReturnValue({ dispose: jest.fn() }),
+ onDidCreate: jest.fn().mockReturnValue({ dispose: jest.fn() }),
+ onDidDelete: jest.fn().mockReturnValue({ dispose: jest.fn() }),
+ dispose: jest.fn(),
+ }),
+ // ... other existing workspace properties
+ },
+ // ... rest of your existing mock
+ }
+}) // Mock path operations
+jest.mock("path", () => ({
+ join: jest.fn((...args) => args.join("/")),
+ dirname: jest.fn((path) => path.split("/").slice(0, -1).join("/")),
+ basename: jest.fn((path) => path.split("/").pop()),
+}))
+
+// Mock the DecorationController and DiffViewProvider to avoid initialization issues
+jest.mock("../../integrations/editor/DecorationController", () => ({
+ DecorationController: jest.fn().mockImplementation(() => ({
+ addLines: jest.fn(),
+ clearDecorations: jest.fn(),
+ dispose: jest.fn(),
+ })),
+}))
+
+jest.mock("../../integrations/editor/DiffViewProvider", () => ({
+ DiffViewProvider: jest.fn().mockImplementation(() => ({
+ register: jest.fn(),
+ dispose: jest.fn(),
+ })),
+}))
+
+// Mock the Mistral SDK
+jest.mock("@mistralai/mistralai", () => {
+ return {
+ MistralClient: jest.fn().mockImplementation(() => ({
+ chat: {
+ stream: jest.fn().mockResolvedValue({
+ [Symbol.asyncIterator]: jest.fn().mockImplementation(() => ({
+ next: jest.fn().mockResolvedValue({
+ done: false,
+ value: { choices: [{ delta: { content: "test" } }] },
+ }),
+ })),
+ }),
+ },
+ })),
+ }
+})
+
+// Mock puppeteer and related modules
+jest.mock("puppeteer-chromium-resolver", () => ({
+ default: jest.fn().mockResolvedValue({
+ puppeteer: {
+ launch: jest.fn().mockResolvedValue({
+ newPage: jest.fn().mockResolvedValue({
+ goto: jest.fn().mockResolvedValue({}),
+ content: jest.fn().mockResolvedValue(""),
+ close: jest.fn().mockResolvedValue({}),
+ }),
+ close: jest.fn().mockResolvedValue({}),
+ }),
+ },
+ executablePath: "/mock/chrome",
+ }),
+}))
+
+// Mock the UrlContentFetcher
+jest.mock("../../services/browser/UrlContentFetcher", () => ({
+ UrlContentFetcher: jest.fn().mockImplementation(() => ({
+ fetchContent: jest.fn().mockResolvedValue(""),
+ launchBrowser: jest.fn().mockResolvedValue(undefined),
+ ensureChromiumExists: jest.fn().mockResolvedValue({
+ puppeteer: {
+ launch: jest.fn().mockResolvedValue({
+ newPage: jest.fn().mockResolvedValue({
+ goto: jest.fn().mockResolvedValue({}),
+ content: jest.fn().mockResolvedValue(""),
+ close: jest.fn().mockResolvedValue({}),
+ }),
+ close: jest.fn().mockResolvedValue({}),
+ }),
+ },
+ executablePath: "/mock/chrome",
+ }),
+ dispose: jest.fn().mockResolvedValue(undefined),
+ })),
+}))
+
+// Mock yargs
+jest.mock("yargs", () => ({
+ __esModule: true,
+ default: {
+ parse: jest.fn(),
+ command: jest.fn().mockReturnThis(),
+ option: jest.fn().mockReturnThis(),
+ help: jest.fn().mockReturnThis(),
+ alias: jest.fn().mockReturnThis(),
+ version: jest.fn().mockReturnThis(),
+ },
+}))
+
+// Mock puppeteer-core
+jest.mock("puppeteer-core", () => ({
+ __esModule: true,
+ default: {
+ launch: jest.fn().mockResolvedValue({
+ newPage: jest.fn().mockResolvedValue({
+ goto: jest.fn().mockResolvedValue({}),
+ content: jest.fn().mockResolvedValue(""),
+ close: jest.fn().mockResolvedValue({}),
+ }),
+ close: jest.fn().mockResolvedValue({}),
+ }),
+ },
+}))
+
+// Mock @puppeteer/browsers
+jest.mock("@puppeteer/browsers", () => ({
+ install: jest.fn().mockResolvedValue({}),
+ canDownload: jest.fn().mockResolvedValue(true),
+ computeExecutablePath: jest.fn().mockReturnValue("/mock/chrome"),
+ detectBrowserPlatform: jest.fn().mockReturnValue("linux"),
+ Browser: { CHROME: "chrome" },
+ Product: { CHROME: "chrome" },
+}))
+
+class MockProviderWithBuiltInRateLimiting extends BaseProvider {
+ override hasBuiltInRateLimiting = true
+ createMessage = jest.fn()
+ getModel = jest.fn().mockReturnValue({ info: {} })
+ getState = jest.fn().mockResolvedValue({
+ rateLimitSeconds: 5,
+ requestDelaySeconds: 1,
+ alwaysApproveResubmit: false,
+ })
+ postStateToWebview = jest.fn()
+ postMessageToWebview = jest.fn()
+ context = {
+ globalState: {
+ get: jest.fn(),
+ update: jest.fn(),
+ },
+ extensionUri: { fsPath: "/mock/extension" },
+ }
+}
+
+class MockProviderWithoutBuiltInRateLimiting extends BaseProvider {
+ override hasBuiltInRateLimiting = false
+ createMessage = jest.fn()
+ getModel = jest.fn().mockReturnValue({ info: {} })
+ getState = jest.fn().mockResolvedValue({
+ rateLimitSeconds: 5,
+ requestDelaySeconds: 1,
+ alwaysApproveResubmit: false,
+ })
+ postStateToWebview = jest.fn()
+ postMessageToWebview = jest.fn()
+ context = {
+ globalState: {
+ get: jest.fn(),
+ update: jest.fn(),
+ },
+ extensionUri: { fsPath: "/mock/extension" },
+ }
+}
+
+class MockMistralProvider extends BaseProvider {
+ override hasBuiltInRateLimiting = true
+ createMessage = jest.fn()
+ getModel = jest.fn().mockReturnValue({ id: "mistral-model", info: {} })
+ getState = jest.fn().mockResolvedValue({
+ rateLimitSeconds: 5,
+ requestDelaySeconds: 1,
+ alwaysApproveResubmit: false,
+ })
+ postStateToWebview = jest.fn()
+ postMessageToWebview = jest.fn()
+ context = {
+ globalState: {
+ get: jest.fn(),
+ update: jest.fn(),
+ },
+ extensionUri: { fsPath: "/mock/extension" },
+ }
+}
+
+describe("Cline rate limiting tests", () => {
+ beforeEach(() => {
+ jest.clearAllMocks()
+ })
+
+ test("should apply rate limiting when provider doesn't have built-in rate limiting", async () => {
+ // Arrange
+ const [cline, task] = Cline.create({
+ provider: new MockProviderWithoutBuiltInRateLimiting() as any,
+ apiConfiguration: {
+ apiProvider: "anthropic",
+ apiModelId: "test-model",
+ apiKey: "test-key",
+ },
+ task: "test task",
+ startTask: false, // Prevent actual task start
+ })
+
+ // Set lastApiRequestTime to simulate a recent request
+ cline["lastApiRequestTime"] = Date.now() - 2000 // 2 seconds ago
+ cline["apiConversationHistory"] = []
+
+ // Act
+ const generator = cline["attemptApiRequest"](0)
+
+ // Manually trigger the generator to start execution
+ try {
+ await generator.next()
+ } catch (e) {
+ // Expected to throw since we're not fully mocking everything
+ }
+
+ // Assert
+ // Should have called delay for the countdown (3 seconds remaining from 5 second limit)
+ expect(delay).toHaveBeenCalledWith(1000)
+ expect(delay).toHaveBeenCalledTimes(3)
+
+ // Clean up
+ await cline.abortTask(true)
+ await task.catch(() => {})
+ })
+
+ test("should skip rate limiting when provider has built-in rate limiting", async () => {
+ // Arrange
+ const [cline, task] = Cline.create({
+ provider: new MockProviderWithBuiltInRateLimiting() as any,
+ apiConfiguration: {
+ apiProvider: "anthropic",
+ apiModelId: "test-model",
+ apiKey: "test-key",
+ },
+ task: "test task",
+ startTask: false, // Prevent actual task start
+ })
+
+ // Set lastApiRequestTime to simulate a recent request
+ cline["lastApiRequestTime"] = Date.now() - 2000 // 2 seconds ago
+ cline["apiConversationHistory"] = []
+
+ // Act
+ const generator = cline["attemptApiRequest"](0)
+
+ // Manually trigger the generator to start execution
+ try {
+ await generator.next()
+ } catch (e) {
+ // Expected to throw since we're not fully mocking everything
+ }
+
+ // Assert
+ // Should not have called delay for the countdown since rate limiting is skipped
+ expect(delay).not.toHaveBeenCalledWith(1000)
+
+ // Clean up
+ await cline.abortTask(true)
+ await task.catch(() => {})
+ })
+
+ test("should calculate correct rate limit delay", async () => {
+ // Arrange
+ const [cline, task] = Cline.create({
+ provider: new MockProviderWithoutBuiltInRateLimiting() as any,
+ apiConfiguration: {
+ apiProvider: "anthropic",
+ apiModelId: "test-model",
+ apiKey: "test-key",
+ },
+ task: "test task",
+ startTask: false, // Prevent actual task start
+ })
+
+ // Set lastApiRequestTime to simulate a recent request
+ const now = Date.now()
+ cline["lastApiRequestTime"] = now - 3000 // 3 seconds ago
+
+ // Mock Date.now to return a consistent value for testing
+ const originalDateNow = Date.now
+ Date.now = jest.fn().mockReturnValue(now)
+
+ // Calculate the rate limit delay manually
+ const timeSinceLastRequest = 3000 // 3 seconds
+ const rateLimit = 5 // 5 seconds from mockState
+ const expectedDelay = Math.ceil(Math.max(0, rateLimit * 1000 - timeSinceLastRequest) / 1000) // Should be 2 seconds
+
+ // Restore Date.now
+ Date.now = originalDateNow
+
+ // Assert
+ expect(expectedDelay).toBe(2) // Confirm our manual calculation matches expected behavior
+
+ // Clean up
+ await cline.abortTask(true)
+ await task.catch(() => {})
+ })
+
+ test("should skip rate limiting when using Mistral provider", async () => {
+ // Arrange
+ const [cline, task] = Cline.create({
+ provider: new MockMistralProvider() as any,
+ apiConfiguration: {
+ apiProvider: "mistral",
+ apiModelId: "codestral-latest",
+ apiKey: "test-key",
+ },
+ task: "test task",
+ startTask: false, // Prevent actual task start
+ })
+
+ // Set lastApiRequestTime to simulate a recent request
+ cline["lastApiRequestTime"] = Date.now() - 2000 // 2 seconds ago
+ cline["apiConversationHistory"] = []
+
+ // Act
+ const generator = cline["attemptApiRequest"](0)
+
+ // Manually trigger the generator to start execution
+ try {
+ await generator.next()
+ } catch (e) {
+ // Expected to throw since we're not fully mocking everything
+ }
+
+ // Assert
+ // Should not have called delay for the countdown since rate limiting is skipped
+ expect(delay).not.toHaveBeenCalledWith(1000)
+
+ // Clean up
+ await cline.abortTask(true)
+ await task.catch(() => {})
+ })
+})
diff --git a/src/core/sliding-window/index.ts b/src/core/sliding-window/index.ts
index 67c0028fab2..3e9e2e3853a 100644
--- a/src/core/sliding-window/index.ts
+++ b/src/core/sliding-window/index.ts
@@ -79,7 +79,9 @@ export async function truncateConversationIfNeeded({
apiHandler,
}: TruncateOptions): Promise {
// Calculate the maximum tokens reserved for response
- const reservedTokens = maxTokens || contextWindow * 0.2
+ // Ensure maxTokens is reasonable (not more than 80% of context window)
+ const reservedTokens =
+ maxTokens && maxTokens > 0 && maxTokens < contextWindow * 0.8 ? maxTokens : contextWindow * 0.2
// Estimate tokens for the last message (which is always a user message)
const lastMessage = messages[messages.length - 1]
diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts
index f98f19e4ff6..bcd4773c70f 100644
--- a/src/core/webview/ClineProvider.ts
+++ b/src/core/webview/ClineProvider.ts
@@ -1906,7 +1906,6 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("telemetrySetting", telemetrySetting)
const isOptedIn = telemetrySetting === "enabled"
telemetryService.updateTelemetryState(isOptedIn)
- await this.postStateToWebview()
break
}
}
diff --git a/src/shared/api.ts b/src/shared/api.ts
index 986a4123542..23b2eafa977 100644
--- a/src/shared/api.ts
+++ b/src/shared/api.ts
@@ -883,6 +883,14 @@ export const mistralModels = {
outputPrice: 0.04,
},
"mistral-small-latest": {
+ maxTokens: 32_000,
+ contextWindow: 32_000,
+ supportsImages: true,
+ supportsPromptCache: false,
+ inputPrice: 0.2,
+ outputPrice: 0.6,
+ },
+ "mistral-saba-latest": {
maxTokens: 32_000,
contextWindow: 32_000,
supportsImages: false,
diff --git a/src/utils/promises.ts b/src/utils/promises.ts
new file mode 100644
index 00000000000..8ce7cbf18ca
--- /dev/null
+++ b/src/utils/promises.ts
@@ -0,0 +1,8 @@
+/**
+ * Delays execution for the specified number of milliseconds
+ * @param ms Time to delay in milliseconds
+ * @returns A promise that resolves after the specified delay
+ */
+export const delay = (ms: number): Promise => {
+ return new Promise((resolve) => setTimeout(resolve, ms))
+}
diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx
index 656a6831cba..7e11f278f63 100644
--- a/webview-ui/src/components/settings/ApiOptions.tsx
+++ b/webview-ui/src/components/settings/ApiOptions.tsx
@@ -433,40 +433,55 @@ const ApiOptions = ({
)}
{selectedProvider === "mistral" && (
- <>
+
- Mistral API Key
+ placeholder="Enter API Key...">
+ Mistral API Key
-
+
This key is stored locally and only used to make API requests from this extension.
-
- {!apiConfiguration?.mistralApiKey && (
-
- Get Mistral / Codestral API Key
-
- )}
+
+ You can get a La Plateforme (api.mistral.ai) or Codestral (codestral.mistral.ai) API key by
+ signing up here.
+
+
+
{(apiConfiguration?.apiModelId?.startsWith("codestral-") ||
(!apiConfiguration?.apiModelId && mistralDefaultModelId.startsWith("codestral-"))) && (
- <>
+
- Codestral Base URL (Optional)
+ placeholder="Default: https://codestral.mistral.ai">
+ Codestral Base URL (Optional)
-
- Set an alternative URL for the Codestral model.
-
- >
+
+ Set alternative URL for Codestral model: https://api.mistral.ai
+
+
)}
- >
+
)}
{selectedProvider === "bedrock" && (
From b31b387c9105eb3711d7f8fbea5ce270d096eb44 Mon Sep 17 00:00:00 2001
From: "d.o.it" <6849456+d-oit@users.noreply.github.com>
Date: Wed, 19 Mar 2025 18:26:59 +0100
Subject: [PATCH 2/6] Remove tidy-commits.sh script for commit cleanup
functionality
---
docs/git-workflow.md | 65 -------------------------------
scripts/tidy-commits.sh | 85 -----------------------------------------
2 files changed, 150 deletions(-)
delete mode 100644 docs/git-workflow.md
delete mode 100644 scripts/tidy-commits.sh
diff --git a/docs/git-workflow.md b/docs/git-workflow.md
deleted file mode 100644
index a780947c8c3..00000000000
--- a/docs/git-workflow.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Git Workflow Guide
-
-## Tidying Up Commits Before Pushing
-
-This project includes a helpful script to tidy up your commits before pushing them to a remote branch. This helps maintain a clean and meaningful commit history.
-
-### How to Use
-
-Run the following command when you want to clean up your commits:
-
-```bash
-npm run tidy-commits
-```
-
-This will:
-
-1. Check if you have unpushed commits on your current branch
-2. Start an interactive rebase session to let you organize those commits
-3. Guide you through the process with helpful instructions
-
-### Available Rebase Commands
-
-During the interactive rebase, you can use these commands:
-
-- `pick`: Use the commit as is
-- `reword`: Use the commit but edit the commit message
-- `edit`: Use the commit but stop for amending (allows splitting commits)
-- `squash`: Combine with the previous commit (keeps both commit messages)
-- `fixup`: Combine with the previous commit (discards this commit's message)
-- `exec`: Run a command using shell
-
-### Best Practices
-
-- **Squash related changes**: Combine multiple small commits that relate to a single feature
-- **Write clear commit messages**: Each commit should clearly describe what changed and why
-- **Keep commits focused**: Each commit should represent a single logical change
-- **Reorder commits**: Place related commits together for better readability
-
-### Example Workflow
-
-```bash
-# Make multiple commits while working
-git commit -m "Add new feature"
-git commit -m "Fix typo"
-git commit -m "Improve performance"
-
-# When ready to push, tidy up your commits first
-npm run tidy-commits
-
-# You'll see an editor with your commits listed:
-# pick abc123 Add new feature
-# pick def456 Fix typo
-# pick ghi789 Improve performance
-
-# You might change it to:
-# pick abc123 Add new feature
-# fixup def456 Fix typo
-# pick ghi789 Improve performance
-
-# Save and close the editor to complete the rebase
-# Then push your cleaned-up commits
-git push
-```
-
-Remember: Only rebase commits that haven't been pushed yet. Rebasing public history can cause problems for other contributors.
diff --git a/scripts/tidy-commits.sh b/scripts/tidy-commits.sh
deleted file mode 100644
index 63ad2153670..00000000000
--- a/scripts/tidy-commits.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-# tidy-commits.sh - A script to help clean up commits before pushing
-# This script offers an interactive rebase option to tidy up commits
-
-# Get the current branch name
-branch="$(git rev-parse --abbrev-ref HEAD)"
-remote_branch="origin/$branch"
-
-# Check if we're on main branch
-if [ "$branch" = "main" ]; then
- echo "โ You're on the main branch. Please checkout a feature branch to tidy commits."
- exit 1
-fi
-
-# Check if the branch exists on remote
-if git ls-remote --exit-code --heads origin "$branch" >/dev/null 2>&1; then
- # Get the number of commits ahead of remote
- ahead=$(git rev-list --count "$remote_branch".."$branch")
-
- if [ "$ahead" -gt 0 ]; then
- echo "-------------------------------------------------------------------------------------"
- echo "๐งน You have $ahead commit(s) that will be pushed to the remote."
- echo "Starting interactive rebase for the last $ahead commit(s)..."
- echo ""
- echo "You can:"
- echo " - Reorder commits by changing their order"
- echo " - Edit commit messages with 'reword'"
- echo " - Combine commits with 'squash' or 'fixup'"
- echo " - Split or edit commits with 'edit'"
- echo " - Remove commits by deleting their lines"
- echo "-------------------------------------------------------------------------------------"
-
- # Start interactive rebase
- git rebase -i HEAD~"$ahead"
-
- # Check if rebase was successful
- if [ $? -eq 0 ]; then
- echo "โ Commits tidied up successfully!"
- else
- echo "โ Rebase was aborted or had conflicts. Original commits remain unchanged."
- exit 1
- fi
- else
- echo "No unpushed commits found on branch '$branch'."
- fi
-else
- # Branch doesn't exist on remote yet
- # Count all commits on this branch
- commit_count=$(git rev-list --count HEAD)
-
- echo "-------------------------------------------------------------------------------------"
- echo "๐งน This appears to be a new branch with $commit_count commit(s)."
- echo "Would you like to tidy up your commits before the first push? (y/n)"
- read -r answer
-
- if [[ "$answer" =~ ^[Yy]$ ]]; then
- # Find the fork point with main
- fork_point=$(git merge-base HEAD main)
- ahead=$(git rev-list --count $fork_point..HEAD)
-
- echo "Starting interactive rebase for $ahead commit(s) since branching from main..."
- echo ""
- echo "You can:"
- echo " - Reorder commits by changing their order"
- echo " - Edit commit messages with 'reword'"
- echo " - Combine commits with 'squash' or 'fixup'"
- echo " - Split or edit commits with 'edit'"
- echo " - Remove commits by deleting their lines"
- echo "-------------------------------------------------------------------------------------"
-
- # Start interactive rebase from the fork point
- git rebase -i $fork_point
-
- # Check if rebase was successful
- if [ $? -eq 0 ]; then
- echo "โ Commits tidied up successfully!"
- else
- echo "โ Rebase was aborted or had conflicts. Original commits remain unchanged."
- exit 1
- fi
- else
- echo "Skipping commit cleanup."
- fi
-fi
\ No newline at end of file
From c04aac7ea27a490eccc5560bf609e476da2bfad7 Mon Sep 17 00:00:00 2001
From: "d.o.it" <6849456+d-oit@users.noreply.github.com>
Date: Wed, 19 Mar 2025 18:48:40 +0100
Subject: [PATCH 3/6] Add postStateToWebview call after updating telemetry
state
---
src/core/webview/ClineProvider.ts | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts
index bcd4773c70f..f98f19e4ff6 100644
--- a/src/core/webview/ClineProvider.ts
+++ b/src/core/webview/ClineProvider.ts
@@ -1906,6 +1906,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
await this.updateGlobalState("telemetrySetting", telemetrySetting)
const isOptedIn = telemetrySetting === "enabled"
telemetryService.updateTelemetryState(isOptedIn)
+ await this.postStateToWebview()
break
}
}
From 1f716caf5792439c73882b9163a38c54cadd4a13 Mon Sep 17 00:00:00 2001
From: "d.o.it" <6849456+d-oit@users.noreply.github.com>
Date: Wed, 19 Mar 2025 19:24:57 +0100
Subject: [PATCH 4/6] Fix ApiOptions to include selectedModelId check for
codestral model
---
webview-ui/src/components/settings/ApiOptions.tsx | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx
index 7e11f278f63..5f16e1d7334 100644
--- a/webview-ui/src/components/settings/ApiOptions.tsx
+++ b/webview-ui/src/components/settings/ApiOptions.tsx
@@ -461,7 +461,8 @@ const ApiOptions = ({