diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 15833e00c4..beae8d587d 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -21,6 +21,7 @@ import { qwenCodeModels, rooModels, sambaNovaModels, + tarsModels, vertexModels, vscodeLlmModels, xaiModels, @@ -66,6 +67,7 @@ export const providerNames = [ "featherless", "io-intelligence", "roo", + "tars", "vercel-ai-gateway", ] as const @@ -265,6 +267,11 @@ const requestySchema = baseProviderSettingsSchema.extend({ requestyModelId: z.string().optional(), }) +const tarsSchema = baseProviderSettingsSchema.extend({ + tarsApiKey: z.string().optional(), + tarsModelId: z.string().optional(), +}) + const humanRelaySchema = baseProviderSettingsSchema const fakeAiSchema = baseProviderSettingsSchema.extend({ @@ -359,6 +366,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })), unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })), requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })), + tarsSchema.merge(z.object({ apiProvider: z.literal("tars") })), humanRelaySchema.merge(z.object({ apiProvider: z.literal("human-relay") })), fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })), xaiSchema.merge(z.object({ apiProvider: z.literal("xai") })), @@ -399,6 +407,7 @@ export const providerSettingsSchema = z.object({ ...moonshotSchema.shape, ...unboundSchema.shape, ...requestySchema.shape, + ...tarsSchema.shape, ...humanRelaySchema.shape, ...fakeAiSchema.shape, ...xaiSchema.shape, @@ -414,6 +423,7 @@ export const providerSettingsSchema = z.object({ ...ioIntelligenceSchema.shape, ...qwenCodeSchema.shape, ...rooSchema.shape, + ...tarsSchema.shape, ...vercelAiGatewaySchema.shape, ...codebaseIndexProviderSchema.shape, }) @@ -543,6 +553,11 @@ export const MODELS_BY_PROVIDER: Record< label: "SambaNova", models: Object.keys(sambaNovaModels), }, + tars: { + id: "tars", + label: "Tars", + models: Object.keys(tarsModels), + }, vertex: { id: "vertex", label: "GCP Vertex AI", diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 97fa10ca82..c651b3c2a1 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -23,6 +23,7 @@ export * from "./qwen-code.js" export * from "./requesty.js" export * from "./roo.js" export * from "./sambanova.js" +export * from "./tars.js" export * from "./unbound.js" export * from "./vertex.js" export * from "./vscode-llm.js" diff --git a/packages/types/src/providers/tars.ts b/packages/types/src/providers/tars.ts new file mode 100644 index 0000000000..bdfc171f39 --- /dev/null +++ b/packages/types/src/providers/tars.ts @@ -0,0 +1,21 @@ +import type { ModelInfo } from "../model.js" + +export const tarsDefaultModelId = "claude-3-5-haiku-20241022" + +export const tarsDefaultModelInfo: ModelInfo = { + maxTokens: 8192, + contextWindow: 200000, + supportsImages: true, + supportsComputerUse: false, + supportsPromptCache: true, + inputPrice: 0.8, + outputPrice: 4.0, + cacheWritesPrice: 1.0, + cacheReadsPrice: 0.08, + description: + "Claude 3.5 Haiku - Fast and cost-effective with excellent coding capabilities. Ideal for development tasks with 200k context window", +} + +export const tarsModels = { + [tarsDefaultModelId]: tarsDefaultModelInfo, +} as const satisfies Record diff --git a/src/api/index.ts b/src/api/index.ts index b50afbb023..37652e3442 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -22,6 +22,7 @@ import { VsCodeLmHandler, UnboundHandler, RequestyHandler, + TarsHandler, HumanRelayHandler, FakeAIHandler, XAIHandler, @@ -130,6 +131,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new UnboundHandler(options) case "requesty": return new RequestyHandler(options) + case "tars": + return new TarsHandler(options) case "human-relay": return new HumanRelayHandler() case "fake-ai": diff --git a/src/api/providers/__tests__/tars.spec.ts b/src/api/providers/__tests__/tars.spec.ts new file mode 100644 index 0000000000..5f654bd042 --- /dev/null +++ b/src/api/providers/__tests__/tars.spec.ts @@ -0,0 +1,234 @@ +// npx vitest run api/providers/__tests__/tars.spec.ts + +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + +import { TarsHandler } from "../tars" +import { ApiHandlerOptions } from "../../../shared/api" +import { Package } from "../../../shared/package" + +const mockCreate = vitest.fn() + +vitest.mock("openai", () => { + return { + default: vitest.fn().mockImplementation(() => ({ + chat: { + completions: { + create: mockCreate, + }, + }, + })), + } +}) + +vitest.mock("delay", () => ({ default: vitest.fn(() => Promise.resolve()) })) + +vitest.mock("../fetchers/modelCache", () => ({ + getModels: vitest.fn().mockImplementation(() => { + return Promise.resolve({ + "gpt-4o": { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: true, + supportsComputerUse: false, + inputPrice: 2.5, + outputPrice: 10.0, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + description: + "OpenAI GPT-4o model routed through TARS for optimal performance and reliability. TARS automatically selects the best available provider.", + }, + }) + }), +})) + +describe("TarsHandler", () => { + const mockOptions: ApiHandlerOptions = { + tarsApiKey: "test-key", + tarsModelId: "gpt-4o", + } + + beforeEach(() => vitest.clearAllMocks()) + + it("initializes with correct options", () => { + const handler = new TarsHandler(mockOptions) + expect(handler).toBeInstanceOf(TarsHandler) + + expect(OpenAI).toHaveBeenCalledWith({ + baseURL: "https://api.router.tetrate.ai/v1", + apiKey: mockOptions.tarsApiKey, + defaultHeaders: { + "HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline", + "X-Title": "Roo Code", + "User-Agent": `RooCode/${Package.version}`, + }, + }) + }) + + describe("fetchModel", () => { + it("returns correct model info when options are provided", async () => { + const handler = new TarsHandler(mockOptions) + const result = await handler.fetchModel() + + expect(result).toMatchObject({ + id: mockOptions.tarsModelId, + info: { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: true, + supportsComputerUse: false, + inputPrice: 2.5, + outputPrice: 10.0, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + description: + "OpenAI GPT-4o model routed through TARS for optimal performance and reliability. TARS automatically selects the best available provider.", + }, + }) + }) + + it("returns default model info when options are not provided", async () => { + const handler = new TarsHandler({}) + const result = await handler.fetchModel() + + expect(result).toMatchObject({ + id: "claude-3-5-haiku-20241022", + info: { + maxTokens: 8192, + contextWindow: 200000, + supportsImages: true, + supportsPromptCache: true, + supportsComputerUse: false, + inputPrice: 0.8, + outputPrice: 4.0, + cacheWritesPrice: 1.0, + cacheReadsPrice: 0.08, + description: + "Claude 3.5 Haiku - Fast and cost-effective with excellent coding capabilities. Ideal for development tasks with 200k context window", + }, + }) + }) + }) + + describe("createMessage", () => { + it("generates correct stream chunks", async () => { + const handler = new TarsHandler(mockOptions) + + const mockStream = { + async *[Symbol.asyncIterator]() { + yield { + id: mockOptions.tarsModelId, + choices: [{ delta: { content: "test response" } }], + } + yield { + id: "test-id", + choices: [{ delta: { reasoning_content: "test reasoning" } }], + } + yield { + id: "test-id", + choices: [{ delta: {} }], + usage: { + prompt_tokens: 10, + completion_tokens: 20, + prompt_tokens_details: { + caching_tokens: 5, + cached_tokens: 2, + }, + }, + } + }, + } + + mockCreate.mockResolvedValue(mockStream) + + const systemPrompt = "test system prompt" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user" as const, content: "test message" }] + const metadata = { taskId: "test-task-id", mode: "test-mode" } + + const generator = handler.createMessage(systemPrompt, messages, metadata) + const chunks = [] + + for await (const chunk of generator) { + chunks.push(chunk) + } + + // Verify stream chunks + expect(chunks).toHaveLength(3) // text, reasoning, and usage chunks + expect(chunks[0]).toEqual({ type: "text", text: "test response" }) + expect(chunks[1]).toEqual({ type: "reasoning", text: "test reasoning" }) + expect(chunks[2]).toEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 20, + cacheWriteTokens: 5, + cacheReadTokens: 2, + totalCost: expect.any(Number), + }) + + // Verify OpenAI client was called with correct parameters + expect(mockCreate).toHaveBeenCalledWith({ + max_tokens: 16384, + messages: [ + { + role: "system", + content: "test system prompt", + }, + { + role: "user", + content: "test message", + }, + ], + model: "gpt-4o", + stream: true, + stream_options: { include_usage: true }, + temperature: 0, + }) + }) + + it("handles API errors", async () => { + const handler = new TarsHandler(mockOptions) + const mockError = new Error("API Error") + mockCreate.mockRejectedValue(mockError) + + const generator = handler.createMessage("test", []) + await expect(generator.next()).rejects.toThrow("API Error") + }) + }) + + describe("completePrompt", () => { + it("returns correct response", async () => { + const handler = new TarsHandler(mockOptions) + const mockResponse = { choices: [{ message: { content: "test completion" } }] } + + mockCreate.mockResolvedValue(mockResponse) + + const result = await handler.completePrompt("test prompt") + + expect(result).toBe("test completion") + + expect(mockCreate).toHaveBeenCalledWith({ + model: mockOptions.tarsModelId, + max_tokens: 16384, + messages: [{ role: "system", content: "test prompt" }], + temperature: 0, + }) + }) + + it("handles API errors", async () => { + const handler = new TarsHandler(mockOptions) + const mockError = new Error("API Error") + mockCreate.mockRejectedValue(mockError) + + await expect(handler.completePrompt("test prompt")).rejects.toThrow("API Error") + }) + + it("handles unexpected errors", async () => { + const handler = new TarsHandler(mockOptions) + mockCreate.mockRejectedValue(new Error("Unexpected error")) + + await expect(handler.completePrompt("test prompt")).rejects.toThrow("Unexpected error") + }) + }) +}) diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 0005e8205f..b81321b66b 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -12,6 +12,7 @@ import { fileExistsAtPath } from "../../../utils/fs" import { getOpenRouterModels } from "./openrouter" import { getVercelAiGatewayModels } from "./vercel-ai-gateway" import { getRequestyModels } from "./requesty" +import { getTarsModels } from "./tars" import { getGlamaModels } from "./glama" import { getUnboundModels } from "./unbound" import { getLiteLLMModels } from "./litellm" @@ -62,6 +63,10 @@ export const getModels = async (options: GetModelsOptions): Promise // Requesty models endpoint requires an API key for per-user custom policies models = await getRequestyModels(options.baseUrl, options.apiKey) break + case "tars": + // TARS models endpoint requires an API key + models = await getTarsModels(options.apiKey) + break case "glama": models = await getGlamaModels() break diff --git a/src/api/providers/fetchers/tars.ts b/src/api/providers/fetchers/tars.ts new file mode 100644 index 0000000000..a77cb5e5e5 --- /dev/null +++ b/src/api/providers/fetchers/tars.ts @@ -0,0 +1,54 @@ +import axios from "axios" + +import type { ModelInfo } from "@roo-code/types" + +import { parseApiPrice } from "../../../shared/cost" + +export async function getTarsModels(apiKey?: string): Promise> { + const models: Record = {} + + try { + const headers: Record = {} + + if (apiKey) { + headers["Authorization"] = `Bearer ${apiKey}` + } + + const url = "https://api.router.tetrate.ai/v1/models" + const response = await axios.get(url, { headers }) + const rawModels = response.data.data + + for (const rawModel of rawModels) { + // TARS supports reasoning for Claude and Gemini models similar to Requesty + const reasoningBudget = + rawModel.supports_reasoning && + (rawModel.id.includes("claude") || + rawModel.id.includes("coding/gemini-2.5") || + rawModel.id.includes("vertex/gemini-2.5")) + const reasoningEffort = + rawModel.supports_reasoning && + (rawModel.id.includes("openai") || rawModel.id.includes("google/gemini-2.5")) + + const modelInfo: ModelInfo = { + maxTokens: rawModel.max_output_tokens || rawModel.max_tokens || 4096, + contextWindow: rawModel.context_window || 128000, + supportsPromptCache: rawModel.supports_caching || rawModel.supports_prompt_cache || false, + supportsImages: rawModel.supports_vision || rawModel.supports_images || false, + supportsComputerUse: rawModel.supports_computer_use || false, + supportsReasoningBudget: reasoningBudget, + supportsReasoningEffort: reasoningEffort, + inputPrice: parseApiPrice(rawModel.input_price) || 0, + outputPrice: parseApiPrice(rawModel.output_price) || 0, + description: rawModel.description, + cacheWritesPrice: parseApiPrice(rawModel.caching_price || rawModel.cache_write_price) || 0, + cacheReadsPrice: parseApiPrice(rawModel.cached_price || rawModel.cache_read_price) || 0, + } + + models[rawModel.id] = modelInfo + } + } catch (error) { + console.error(`Error fetching TARS models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`) + } + + return models +} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index c3786c5f56..8c790ff505 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -24,6 +24,7 @@ export { OpenRouterHandler } from "./openrouter" export { QwenCodeHandler } from "./qwen-code" export { RequestyHandler } from "./requesty" export { SambaNovaHandler } from "./sambanova" +export { TarsHandler } from "./tars" export { UnboundHandler } from "./unbound" export { VertexHandler } from "./vertex" export { VsCodeLmHandler } from "./vscode-lm" diff --git a/src/api/providers/tars.ts b/src/api/providers/tars.ts new file mode 100644 index 0000000000..58f0968b5d --- /dev/null +++ b/src/api/providers/tars.ts @@ -0,0 +1,134 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + +import { type ModelInfo, tarsDefaultModelId, tarsDefaultModelInfo } from "@roo-code/types" + +import type { ApiHandlerOptions, ModelRecord } from "../../shared/api" +import { calculateApiCostOpenAI } from "../../shared/cost" + +import { convertToOpenAiMessages } from "../transform/openai-format" +import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" +import { getModelParams } from "../transform/model-params" +import { RouterProvider } from "./router-provider" +import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" + +// TARS usage includes an extra field for Anthropic use cases. +// Safely cast the prompt token details section to the appropriate structure. +interface TarsUsage extends OpenAI.CompletionUsage { + prompt_tokens_details?: { + caching_tokens?: number + cached_tokens?: number + } + total_cost?: number +} + +export class TarsHandler extends RouterProvider implements SingleCompletionHandler { + constructor(options: ApiHandlerOptions) { + super({ + options, + name: "tars", + baseURL: "https://api.router.tetrate.ai/v1", + apiKey: options.tarsApiKey, + modelId: options.tarsModelId, + defaultModelId: tarsDefaultModelId, + defaultModelInfo: tarsDefaultModelInfo, + }) + } + + protected processUsageMetrics(usage: OpenAI.CompletionUsage, modelInfo?: ModelInfo): ApiStreamUsageChunk { + const tarsUsage = usage as TarsUsage + const inputTokens = tarsUsage?.prompt_tokens || 0 + const outputTokens = tarsUsage?.completion_tokens || 0 + const cacheWriteTokens = tarsUsage?.prompt_tokens_details?.caching_tokens || 0 + const cacheReadTokens = tarsUsage?.prompt_tokens_details?.cached_tokens || 0 + const totalCost = modelInfo + ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) + : 0 + + return { + type: "usage", + inputTokens: inputTokens, + outputTokens: outputTokens, + cacheWriteTokens: cacheWriteTokens, + cacheReadTokens: cacheReadTokens, + totalCost: totalCost, + } + } + + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + const { id: model, info } = await this.fetchModel() + + const params = getModelParams({ + format: "openai", + modelId: model, + model: info, + settings: this.options, + }) + + const { maxTokens: max_tokens, temperature } = params + + const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages), + ] + + const completionParams: OpenAI.Chat.ChatCompletionCreateParams = { + messages: openAiMessages, + model, + max_tokens, + temperature, + stream: true, + stream_options: { include_usage: true }, + } + + const stream = await this.client.chat.completions.create(completionParams) + let lastUsage: OpenAI.CompletionUsage | undefined = undefined + + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + + if (delta?.content) { + yield { type: "text", text: delta.content } + } + + if (delta && "reasoning_content" in delta && delta.reasoning_content) { + yield { type: "reasoning", text: (delta.reasoning_content as string | undefined) || "" } + } + + if (chunk.usage) { + lastUsage = chunk.usage + } + } + + if (lastUsage) { + yield this.processUsageMetrics(lastUsage, info) + } + } + + async completePrompt(prompt: string): Promise { + const { id: model, info } = await this.fetchModel() + const params = getModelParams({ + format: "openai", + modelId: model, + model: info, + settings: this.options, + }) + const { maxTokens: max_tokens, temperature } = params + + let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [{ role: "system", content: prompt }] + + const completionParams: OpenAI.Chat.ChatCompletionCreateParams = { + model, + max_tokens, + messages: openAiMessages, + temperature: temperature, + } + + const response = await this.client.chat.completions.create(completionParams) + return response.choices[0]?.message.content || "" + } +} diff --git a/src/core/webview/__tests__/ClineProvider.spec.ts b/src/core/webview/__tests__/ClineProvider.spec.ts index e5e7e85da5..511e22fc0a 100644 --- a/src/core/webview/__tests__/ClineProvider.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.spec.ts @@ -2636,6 +2636,7 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", + tarsApiKey: "tars-key", glamaApiKey: "glama-key", unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", @@ -2666,6 +2667,7 @@ describe("ClineProvider - Router Models", () => { // Verify getModels was called for each provider with correct options expect(getModels).toHaveBeenCalledWith({ provider: "openrouter" }) expect(getModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) + expect(getModels).toHaveBeenCalledWith({ provider: "tars", apiKey: "tars-key" }) expect(getModels).toHaveBeenCalledWith({ provider: "glama" }) expect(getModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(getModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) @@ -2681,6 +2683,7 @@ describe("ClineProvider - Router Models", () => { routerModels: { openrouter: mockModels, requesty: mockModels, + tars: mockModels, glama: mockModels, unbound: mockModels, litellm: mockModels, @@ -2699,6 +2702,7 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", + tarsApiKey: "tars-key", glamaApiKey: "glama-key", unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", @@ -2715,6 +2719,7 @@ describe("ClineProvider - Router Models", () => { vi.mocked(getModels) .mockResolvedValueOnce(mockModels) // openrouter success .mockRejectedValueOnce(new Error("Requesty API error")) // requesty fail + .mockResolvedValueOnce(mockModels) // tars success .mockResolvedValueOnce(mockModels) // glama success .mockRejectedValueOnce(new Error("Unbound API error")) // unbound fail .mockResolvedValueOnce(mockModels) // vercel-ai-gateway success @@ -2728,6 +2733,7 @@ describe("ClineProvider - Router Models", () => { routerModels: { openrouter: mockModels, requesty: {}, + tars: mockModels, glama: mockModels, unbound: {}, ollama: {}, @@ -2776,6 +2782,7 @@ describe("ClineProvider - Router Models", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", + tarsApiKey: "tars-key", glamaApiKey: "glama-key", unboundApiKey: "unbound-key", // No litellm config @@ -2839,6 +2846,7 @@ describe("ClineProvider - Router Models", () => { routerModels: { openrouter: mockModels, requesty: mockModels, + tars: mockModels, glama: mockModels, unbound: mockModels, litellm: {}, diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index 06dbc03502..15e95869e4 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -143,6 +143,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", + tarsApiKey: "tars-key", glamaApiKey: "glama-key", unboundApiKey: "unbound-key", litellmApiKey: "litellm-key", @@ -176,6 +177,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { // Verify getModels was called for each provider expect(mockGetModels).toHaveBeenCalledWith({ provider: "openrouter" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) + expect(mockGetModels).toHaveBeenCalledWith({ provider: "tars", apiKey: "tars-key" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "glama" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) @@ -191,6 +193,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { routerModels: { openrouter: mockModels, requesty: mockModels, + tars: mockModels, glama: mockModels, unbound: mockModels, litellm: mockModels, @@ -244,6 +247,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { apiConfiguration: { openRouterApiKey: "openrouter-key", requestyApiKey: "requesty-key", + tarsApiKey: "tars-key", glamaApiKey: "glama-key", unboundApiKey: "unbound-key", // Missing litellm config @@ -279,6 +283,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { routerModels: { openrouter: mockModels, requesty: mockModels, + tars: mockModels, glama: mockModels, unbound: mockModels, litellm: {}, @@ -303,6 +308,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { mockGetModels .mockResolvedValueOnce(mockModels) // openrouter .mockRejectedValueOnce(new Error("Requesty API error")) // requesty + .mockResolvedValueOnce(mockModels) // tars .mockResolvedValueOnce(mockModels) // glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound .mockResolvedValueOnce(mockModels) // vercel-ai-gateway @@ -318,6 +324,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { routerModels: { openrouter: mockModels, requesty: {}, + tars: mockModels, glama: mockModels, unbound: {}, litellm: {}, @@ -355,6 +362,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { mockGetModels .mockRejectedValueOnce(new Error("Structured error message")) // openrouter .mockRejectedValueOnce(new Error("Requesty API error")) // requesty + .mockRejectedValueOnce(new Error("TARS API error")) // tars .mockRejectedValueOnce(new Error("Glama API error")) // glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound .mockRejectedValueOnce(new Error("Vercel AI Gateway error")) // vercel-ai-gateway @@ -379,6 +387,13 @@ describe("webviewMessageHandler - requestRouterModels", () => { values: { provider: "requesty" }, }) + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "TARS API error", + values: { provider: "tars" }, + }) + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index 6bf1320ccf..2db1897f9c 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -540,6 +540,7 @@ export const webviewMessageHandler = async ( const routerModels: Partial> = { openrouter: {}, requesty: {}, + tars: {}, glama: {}, unbound: {}, litellm: {}, @@ -561,6 +562,7 @@ export const webviewMessageHandler = async ( const modelFetchPromises: Array<{ key: RouterName; options: GetModelsOptions }> = [ { key: "openrouter", options: { provider: "openrouter" } }, + { key: "tars", options: { provider: "tars", apiKey: apiConfiguration.tarsApiKey } }, { key: "requesty", options: { diff --git a/src/shared/api.ts b/src/shared/api.ts index 30dfd7393b..25076f9fba 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -27,6 +27,7 @@ const routerNames = [ "ollama", "lmstudio", "io-intelligence", + "tars", "vercel-ai-gateway", ] as const @@ -146,6 +147,7 @@ export const getModelMaxOutputTokens = ({ export type GetModelsOptions = | { provider: "openrouter" } | { provider: "glama" } + | { provider: "tars"; apiKey?: string } | { provider: "requesty"; apiKey?: string; baseUrl?: string } | { provider: "unbound"; apiKey?: string } | { provider: "litellm"; apiKey: string; baseUrl: string } diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 80ecd75ae4..c4cec64fce 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -84,6 +84,7 @@ import { OpenRouter, QwenCode, Requesty, + Tars, SambaNova, Unbound, Vertex, @@ -466,6 +467,16 @@ const ApiOptions = ({ /> )} + {selectedProvider === "tars" && ( + + )} + {selectedProvider === "glama" && ( void + routerModels?: RouterModels + organizationAllowList: OrganizationAllowList + modelValidationError?: string +} + +export const Tars = ({ + apiConfiguration, + setApiConfigurationField, + routerModels, + organizationAllowList, + modelValidationError, +}: TarsProps) => { + const { t } = useAppTranslation() + + const handleInputChange = useCallback( + ( + field: K, + transform: (event: E) => ProviderSettings[K] = inputEventTransform, + ) => + (event: E | Event) => { + setApiConfigurationField(field, transform(event as E)) + }, + [setApiConfigurationField], + ) + + return ( + <> + +
+ +
+
+
+ {t("settings:providers.apiKeyStorageNotice")} +
+ {!apiConfiguration?.tarsApiKey && ( + + {t("settings:providers.getTarsApiKey")} + + )} + + + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index eedbba0c29..8e3bf8be3c 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -20,6 +20,7 @@ export { OpenRouter } from "./OpenRouter" export { QwenCode } from "./QwenCode" export { Requesty } from "./Requesty" export { SambaNova } from "./SambaNova" +export { Tars } from "./Tars" export { Unbound } from "./Unbound" export { Vertex } from "./Vertex" export { VSCodeLM } from "./VSCodeLM" diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index e9470e0902..7f26438be2 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -31,6 +31,7 @@ import { vscodeLlmDefaultModelId, openRouterDefaultModelId, requestyDefaultModelId, + tarsDefaultModelId, glamaDefaultModelId, unboundDefaultModelId, litellmDefaultModelId, @@ -140,6 +141,11 @@ function getSelectedModel({ const info = routerModels.requesty[id] return { id, info } } + case "tars": { + const id = apiConfiguration.tarsModelId ?? tarsDefaultModelId + const info = routerModels.tars?.[id] + return { id, info } + } case "glama": { const id = apiConfiguration.glamaModelId ?? glamaDefaultModelId const info = routerModels.glama[id] diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index d6cef7045d..6669d7e4d0 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -469,7 +469,10 @@ "placeholder": "Per defecte: claude", "maxTokensLabel": "Tokens màxims de sortida", "maxTokensDescription": "Nombre màxim de tokens de sortida per a les respostes de Claude Code. El valor per defecte és 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index e222810031..47ebd56587 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -469,7 +469,10 @@ "placeholder": "Standard: claude", "maxTokensLabel": "Maximale Ausgabe-Tokens", "maxTokensDescription": "Maximale Anzahl an Ausgabe-Tokens für Claude Code-Antworten. Standard ist 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 33fba24b8e..4d06fcbc80 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -260,6 +260,9 @@ "getRequestyApiKey": "Get Requesty API Key", "getRequestyBaseUrl": "Base URL", "requestyUseCustomBaseUrl": "Use custom base URL", + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation", "openRouterTransformsText": "Compress prompts and message chains to the context size (OpenRouter Transforms)", "anthropicApiKey": "Anthropic API Key", "getAnthropicApiKey": "Get Anthropic API Key", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index b2deeda932..5a8ea64228 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -469,7 +469,10 @@ "placeholder": "Por defecto: claude", "maxTokensLabel": "Tokens máximos de salida", "maxTokensDescription": "Número máximo de tokens de salida para las respuestas de Claude Code. El valor predeterminado es 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index f36a1850dd..f57016a110 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -469,7 +469,10 @@ "placeholder": "Défaut : claude", "maxTokensLabel": "Jetons de sortie max", "maxTokensDescription": "Nombre maximum de jetons de sortie pour les réponses de Claude Code. La valeur par défaut est 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 21b390aadd..d006220ef7 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -469,7 +469,10 @@ "placeholder": "डिफ़ॉल्ट: claude", "maxTokensLabel": "अधिकतम आउटपुट टोकन", "maxTokensDescription": "Claude Code प्रतिक्रियाओं के लिए आउटपुट टोकन की अधिकतम संख्या। डिफ़ॉल्ट 8000 है।" - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index 61c7670078..1a81199474 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -473,7 +473,10 @@ "placeholder": "Default: claude", "maxTokensLabel": "Token Output Maks", "maxTokensDescription": "Jumlah maksimum token output untuk respons Claude Code. Default adalah 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 3e383fc564..a9c62627cf 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -469,7 +469,10 @@ "placeholder": "Predefinito: claude", "maxTokensLabel": "Token di output massimi", "maxTokensDescription": "Numero massimo di token di output per le risposte di Claude Code. Il valore predefinito è 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 33dfc8be35..8df0c122f6 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -469,7 +469,10 @@ "placeholder": "デフォルト:claude", "maxTokensLabel": "最大出力トークン", "maxTokensDescription": "Claude Codeレスポンスの最大出力トークン数。デフォルトは8000です。" - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 969da2c9ae..28f7b1dd4e 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -469,7 +469,10 @@ "placeholder": "기본값: claude", "maxTokensLabel": "최대 출력 토큰", "maxTokensDescription": "Claude Code 응답의 최대 출력 토큰 수. 기본값은 8000입니다." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index aa962a56ab..6da0e5bd0a 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -469,7 +469,10 @@ "placeholder": "Standaard: claude", "maxTokensLabel": "Max Output Tokens", "maxTokensDescription": "Maximaal aantal output-tokens voor Claude Code-reacties. Standaard is 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index ccc3868fb8..d9fa69067e 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -469,7 +469,10 @@ "placeholder": "Domyślnie: claude", "maxTokensLabel": "Maksymalna liczba tokenów wyjściowych", "maxTokensDescription": "Maksymalna liczba tokenów wyjściowych dla odpowiedzi Claude Code. Domyślnie 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index b6674e682e..51da0d2b8b 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -469,7 +469,10 @@ "placeholder": "Padrão: claude", "maxTokensLabel": "Tokens de saída máximos", "maxTokensDescription": "Número máximo de tokens de saída para respostas do Claude Code. O padrão é 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index e416777719..ac0f638b8f 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -469,7 +469,10 @@ "placeholder": "По умолчанию: claude", "maxTokensLabel": "Макс. выходных токенов", "maxTokensDescription": "Максимальное количество выходных токенов для ответов Claude Code. По умолчанию 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 0335802ba0..6ecf33fdf0 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -469,7 +469,10 @@ "placeholder": "Varsayılan: claude", "maxTokensLabel": "Maksimum Çıktı Token sayısı", "maxTokensDescription": "Claude Code yanıtları için maksimum çıktı token sayısı. Varsayılan 8000'dir." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index 92565cb85e..710091b54b 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -469,7 +469,10 @@ "placeholder": "Mặc định: claude", "maxTokensLabel": "Số token đầu ra tối đa", "maxTokensDescription": "Số lượng token đầu ra tối đa cho các phản hồi của Claude Code. Mặc định là 8000." - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 9a3879523d..d420ecdf62 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -469,7 +469,10 @@ "placeholder": "默认:claude", "maxTokensLabel": "最大输出 Token", "maxTokensDescription": "Claude Code 响应的最大输出 Token 数量。默认为 8000。" - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index b8ade3ca8b..1a22f0c66b 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -469,7 +469,10 @@ "placeholder": "預設:claude", "maxTokensLabel": "最大輸出 Token", "maxTokensDescription": "Claude Code 回應的最大輸出 Token 數量。預設為 8000。" - } + }, + "tarsApiKey": "TARS API Key", + "getTarsApiKey": "Get TARS API Key", + "tarsDocumentationLink": "TARS Documentation" }, "browser": { "enable": { diff --git a/webview-ui/src/utils/__tests__/validate.test.ts b/webview-ui/src/utils/__tests__/validate.test.ts index 2f62dd181d..078956e3bc 100644 --- a/webview-ui/src/utils/__tests__/validate.test.ts +++ b/webview-ui/src/utils/__tests__/validate.test.ts @@ -35,6 +35,7 @@ describe("Model Validation Functions", () => { }, }, requesty: {}, + tars: {}, unbound: {}, litellm: {}, ollama: {}, diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 1cbeba76d0..2e49237e0d 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -47,6 +47,11 @@ function validateModelsAndKeysProvided(apiConfiguration: ProviderSettings): stri return i18next.t("settings:validation.apiKey") } break + case "tars": + if (!apiConfiguration.tarsApiKey) { + return i18next.t("settings:validation.apiKey") + } + break case "litellm": if (!apiConfiguration.litellmApiKey) { return i18next.t("settings:validation.apiKey") @@ -193,6 +198,8 @@ function getModelIdForProvider(apiConfiguration: ProviderSettings, provider: str return apiConfiguration.unboundModelId case "requesty": return apiConfiguration.requestyModelId + case "tars": + return apiConfiguration.tarsModelId case "litellm": return apiConfiguration.litellmModelId case "openai": @@ -280,6 +287,9 @@ export function validateModelId(apiConfiguration: ProviderSettings, routerModels case "litellm": modelId = apiConfiguration.litellmModelId break + case "tars": + modelId = apiConfiguration.tarsModelId + break case "io-intelligence": modelId = apiConfiguration.ioIntelligenceModelId break @@ -321,7 +331,8 @@ export function getModelValidationError( return orgError.message } - return validateModelId(configWithModelId, routerModels) + const modelError = validateModelId(configWithModelId, routerModels) + return modelError } /**