From 38a6d2492286f6781f3d187b05eb9f58f9be74f9 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Fri, 5 Sep 2025 02:52:20 +0000 Subject: [PATCH 1/3] feat: add CometAPI as a model provider - Add CometAPI type definitions with support for GPT-5, Claude-4, Gemini-2.5, and other models - Implement CometAPIHandler extending RouterProvider for OpenAI-compatible API - Add CometAPI to provider settings and configuration - Update webview components to support CometAPI provider - Add CometAPI to dynamic providers list for model fetching Implements #7688 --- packages/types/src/provider-settings.ts | 13 ++ packages/types/src/providers/cometapi.ts | 200 ++++++++++++++++++ packages/types/src/providers/index.ts | 1 + src/api/index.ts | 3 + src/api/providers/cometapi.ts | 153 ++++++++++++++ src/api/providers/index.ts | 1 + src/shared/api.ts | 2 + .../components/ui/hooks/useSelectedModel.ts | 9 +- .../src/utils/__tests__/validate.test.ts | 1 + 9 files changed, 382 insertions(+), 1 deletion(-) create mode 100644 packages/types/src/providers/cometapi.ts create mode 100644 src/api/providers/cometapi.ts diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index d1831163f8..3044c15eae 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -34,6 +34,7 @@ import { export const providerNames = [ "anthropic", "claude-code", + "cometapi", "glama", "openrouter", "bedrock", @@ -336,6 +337,12 @@ const vercelAiGatewaySchema = baseProviderSettingsSchema.extend({ vercelAiGatewayModelId: z.string().optional(), }) +const cometApiSchema = baseProviderSettingsSchema.extend({ + cometApiBaseUrl: z.string().optional(), + cometApiApiKey: z.string().optional(), + cometApiModelId: z.string().optional(), +}) + const defaultSchema = z.object({ apiProvider: z.undefined(), }) @@ -343,6 +350,7 @@ const defaultSchema = z.object({ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [ anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })), claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })), + cometApiSchema.merge(z.object({ apiProvider: z.literal("cometapi") })), glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })), openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })), bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })), @@ -384,6 +392,7 @@ export const providerSettingsSchema = z.object({ apiProvider: providerNamesSchema.optional(), ...anthropicSchema.shape, ...claudeCodeSchema.shape, + ...cometApiSchema.shape, ...glamaSchema.shape, ...openRouterSchema.shape, ...bedrockSchema.shape, @@ -418,6 +427,7 @@ export const providerSettingsSchema = z.object({ ...qwenCodeSchema.shape, ...rooSchema.shape, ...vercelAiGatewaySchema.shape, + ...cometApiSchema.shape, ...codebaseIndexProviderSchema.shape, }) @@ -448,6 +458,7 @@ export const MODEL_ID_KEYS: Partial[] = [ "ioIntelligenceModelId", "vercelAiGatewayModelId", "deepInfraModelId", + "cometApiModelId", ] export const getModelId = (settings: ProviderSettings): string | undefined => { @@ -571,6 +582,7 @@ export const MODELS_BY_PROVIDER: Record< unbound: { id: "unbound", label: "Unbound", models: [] }, deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] }, "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }, + cometapi: { id: "cometapi", label: "CometAPI", models: [] }, } export const dynamicProviders = [ @@ -582,6 +594,7 @@ export const dynamicProviders = [ "unbound", "deepinfra", "vercel-ai-gateway", + "cometapi", ] as const satisfies readonly ProviderName[] export type DynamicProvider = (typeof dynamicProviders)[number] diff --git a/packages/types/src/providers/cometapi.ts b/packages/types/src/providers/cometapi.ts new file mode 100644 index 0000000000..1e5e0418f1 --- /dev/null +++ b/packages/types/src/providers/cometapi.ts @@ -0,0 +1,200 @@ +import type { ModelInfo } from "../model.js" + +// Default fallback values for CometAPI when model metadata is not yet loaded. +export const cometApiDefaultModelId = "gpt-5-chat-latest" + +export const cometApiDefaultModelInfo: ModelInfo = { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 2.5, + outputPrice: 10, + description: "GPT-5 Chat Latest model with 128K context window.", +} + +// Fallback models for CometAPI when the API is unavailable +export const COMETAPI_MODELS: Record = { + // GPT series + "gpt-5-chat-latest": { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 2.5, + outputPrice: 10, + description: "GPT-5 Chat Latest - Most advanced GPT model", + }, + "chatgpt-4o-latest": { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 2.5, + outputPrice: 10, + description: "ChatGPT-4o Latest - Advanced multimodal model", + }, + "gpt-5-mini": { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, + description: "GPT-5 Mini - Efficient and cost-effective", + }, + "gpt-5-nano": { + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.075, + outputPrice: 0.3, + description: "GPT-5 Nano - Ultra-efficient for simple tasks", + }, + "gpt-4.1-mini": { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, + description: "GPT-4.1 Mini - Balanced performance and cost", + }, + "gpt-4o-mini": { + maxTokens: 16384, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, + description: "GPT-4o Mini - Efficient multimodal model", + }, + + // Claude series + "claude-opus-4-1-20250805": { + maxTokens: 8192, + contextWindow: 200000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3, + outputPrice: 15, + description: "Claude Opus 4.1 - Most capable Claude model", + }, + "claude-sonnet-4-20250514": { + maxTokens: 8192, + contextWindow: 200000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 1.5, + outputPrice: 7.5, + description: "Claude Sonnet 4 - Balanced Claude model", + }, + "claude-3-7-sonnet-latest": { + maxTokens: 8192, + contextWindow: 200000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3, + outputPrice: 15, + description: "Claude 3.7 Sonnet - Latest Sonnet version", + }, + "claude-3-5-haiku-latest": { + maxTokens: 8192, + contextWindow: 200000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.25, + outputPrice: 1.25, + description: "Claude 3.5 Haiku - Fast and efficient", + }, + + // Gemini series + "gemini-2.5-pro": { + maxTokens: 8192, + contextWindow: 2097152, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 1.25, + outputPrice: 5, + description: "Gemini 2.5 Pro - Google's most capable model", + }, + "gemini-2.5-flash": { + maxTokens: 8192, + contextWindow: 1048576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.075, + outputPrice: 0.3, + description: "Gemini 2.5 Flash - Fast and efficient", + }, + "gemini-2.0-flash": { + maxTokens: 8192, + contextWindow: 1048576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.075, + outputPrice: 0.3, + description: "Gemini 2.0 Flash - Previous generation flash model", + }, + + // DeepSeek series + "deepseek-v3.1": { + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.14, + outputPrice: 0.28, + description: "DeepSeek V3.1 - Advanced reasoning model", + }, + "deepseek-r1-0528": { + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + supportsReasoningEffort: true, + inputPrice: 0.55, + outputPrice: 2.19, + description: "DeepSeek R1 - Reasoning-focused model", + }, + "deepseek-reasoner": { + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + supportsReasoningEffort: true, + inputPrice: 0.55, + outputPrice: 2.19, + description: "DeepSeek Reasoner - Advanced reasoning capabilities", + }, + + // Other popular models + "grok-4-0709": { + maxTokens: 8192, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 5, + outputPrice: 15, + description: "Grok 4 - xAI's advanced model", + }, + "qwen3-30b-a3b": { + maxTokens: 8192, + contextWindow: 32768, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.5, + outputPrice: 1.5, + description: "Qwen3 30B - Alibaba's large language model", + }, + "qwen3-coder-plus-2025-07-22": { + maxTokens: 8192, + contextWindow: 32768, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.5, + outputPrice: 1.5, + description: "Qwen3 Coder Plus - Specialized for coding tasks", + }, +} diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 21e43aaa99..6cdc4f4d1f 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -3,6 +3,7 @@ export * from "./bedrock.js" export * from "./cerebras.js" export * from "./chutes.js" export * from "./claude-code.js" +export * from "./cometapi.js" export * from "./deepseek.js" export * from "./doubao.js" export * from "./featherless.js" diff --git a/src/api/index.ts b/src/api/index.ts index ac00967676..b8451bce65 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -9,6 +9,7 @@ import { AnthropicHandler, AwsBedrockHandler, CerebrasHandler, + CometAPIHandler, OpenRouterHandler, VertexHandler, AnthropicVertexHandler, @@ -95,6 +96,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new AnthropicHandler(options) case "claude-code": return new ClaudeCodeHandler(options) + case "cometapi": + return new CometAPIHandler(options) case "glama": return new GlamaHandler(options) case "openrouter": diff --git a/src/api/providers/cometapi.ts b/src/api/providers/cometapi.ts new file mode 100644 index 0000000000..3bd43f7808 --- /dev/null +++ b/src/api/providers/cometapi.ts @@ -0,0 +1,153 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + +import { cometApiDefaultModelId, cometApiDefaultModelInfo, COMETAPI_MODELS } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" +import { calculateApiCostOpenAI } from "../../shared/cost" + +import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" +import { convertToOpenAiMessages } from "../transform/openai-format" + +import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +import { RouterProvider } from "./router-provider" +import { getModelParams } from "../transform/model-params" +import { getModels } from "./fetchers/modelCache" + +export class CometAPIHandler extends RouterProvider implements SingleCompletionHandler { + constructor(options: ApiHandlerOptions) { + super({ + options: { + ...options, + openAiHeaders: { + "X-CometAPI-Source": "roo-code", + "X-CometAPI-Version": `2025-09-05`, + }, + }, + name: "cometapi", + baseURL: `${options.cometApiBaseUrl || "https://api.cometapi.com/v1"}`, + apiKey: options.cometApiApiKey || "not-provided", + modelId: options.cometApiModelId, + defaultModelId: cometApiDefaultModelId, + defaultModelInfo: cometApiDefaultModelInfo, + }) + } + + public override async fetchModel() { + // Try to fetch models from API, fallback to static models if API is unavailable + try { + this.models = await getModels({ + provider: this.name, + apiKey: this.client.apiKey, + baseUrl: this.client.baseURL, + }) + } catch (error) { + // Fallback to static models if API is unavailable + console.warn("Failed to fetch CometAPI models, using fallback models:", error) + this.models = COMETAPI_MODELS + } + return this.getModel() + } + + override getModel() { + const id = this.options.cometApiModelId ?? cometApiDefaultModelId + const info = this.models[id] ?? cometApiDefaultModelInfo + + const params = getModelParams({ + format: "openai", + modelId: id, + model: info, + settings: this.options, + }) + + return { id, info, ...params } + } + + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + _metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + // Ensure we have up-to-date model metadata + await this.fetchModel() + const { id: modelId, info, reasoningEffort: reasoning_effort } = await this.fetchModel() + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: modelId, + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort, + } as OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming + + if (this.supportsTemperature(modelId)) { + requestOptions.temperature = this.options.modelTemperature ?? 0 + } + + if (this.options.includeMaxTokens === true && info.maxTokens) { + ;(requestOptions as any).max_completion_tokens = this.options.modelMaxTokens || info.maxTokens + } + + const { data: stream } = await this.client.chat.completions.create(requestOptions).withResponse() + + let lastUsage: OpenAI.CompletionUsage | undefined + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + + if (delta?.content) { + yield { type: "text", text: delta.content } + } + + if (delta && "reasoning_content" in delta && delta.reasoning_content) { + yield { type: "reasoning", text: (delta.reasoning_content as string | undefined) || "" } + } + + if (chunk.usage) { + lastUsage = chunk.usage + } + } + + if (lastUsage) { + yield this.processUsageMetrics(lastUsage, info) + } + } + + async completePrompt(prompt: string): Promise { + await this.fetchModel() + const { id: modelId, info } = this.getModel() + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: [{ role: "user", content: prompt }], + } + if (this.supportsTemperature(modelId)) { + requestOptions.temperature = this.options.modelTemperature ?? 0 + } + if (this.options.includeMaxTokens === true && info.maxTokens) { + ;(requestOptions as any).max_completion_tokens = this.options.modelMaxTokens || info.maxTokens + } + + const resp = await this.client.chat.completions.create(requestOptions) + return resp.choices[0]?.message?.content || "" + } + + protected processUsageMetrics(usage: any, modelInfo?: any): ApiStreamUsageChunk { + const inputTokens = usage?.prompt_tokens || 0 + const outputTokens = usage?.completion_tokens || 0 + const cacheWriteTokens = usage?.prompt_tokens_details?.cache_write_tokens || 0 + const cacheReadTokens = usage?.prompt_tokens_details?.cached_tokens || 0 + + const totalCost = modelInfo + ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) + : 0 + + return { + type: "usage", + inputTokens, + outputTokens, + cacheWriteTokens: cacheWriteTokens || undefined, + cacheReadTokens: cacheReadTokens || undefined, + totalCost, + } + } +} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index 85d877b6bc..5984d74361 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -4,6 +4,7 @@ export { AwsBedrockHandler } from "./bedrock" export { CerebrasHandler } from "./cerebras" export { ChutesHandler } from "./chutes" export { ClaudeCodeHandler } from "./claude-code" +export { CometAPIHandler } from "./cometapi" export { DeepSeekHandler } from "./deepseek" export { DoubaoHandler } from "./doubao" export { MoonshotHandler } from "./moonshot" diff --git a/src/shared/api.ts b/src/shared/api.ts index eb3ae124a8..639e68d36b 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -29,6 +29,7 @@ const routerNames = [ "io-intelligence", "deepinfra", "vercel-ai-gateway", + "cometapi", ] as const export type RouterName = (typeof routerNames)[number] @@ -155,3 +156,4 @@ export type GetModelsOptions = | { provider: "deepinfra"; apiKey?: string; baseUrl?: string } | { provider: "io-intelligence"; apiKey: string } | { provider: "vercel-ai-gateway" } + | { provider: "cometapi"; apiKey?: string; baseUrl?: string } diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index b7fe4ff03d..6b29cf1337 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -8,6 +8,8 @@ import { bedrockModels, cerebrasDefaultModelId, cerebrasModels, + cometApiDefaultModelId, + COMETAPI_MODELS, deepSeekDefaultModelId, deepSeekModels, moonshotDefaultModelId, @@ -341,11 +343,16 @@ function getSelectedModel({ const info = routerModels["vercel-ai-gateway"]?.[id] return { id, info } } + case "cometapi": { + const id = apiConfiguration.cometApiModelId ?? cometApiDefaultModelId + const info = routerModels.cometapi?.[id] ?? COMETAPI_MODELS[id as keyof typeof COMETAPI_MODELS] + return { id, info } + } // case "anthropic": // case "human-relay": // case "fake-ai": default: { - provider satisfies "anthropic" | "gemini-cli" | "qwen-code" | "human-relay" | "fake-ai" + provider satisfies "anthropic" | "gemini-cli" | "human-relay" | "fake-ai" const id = apiConfiguration.apiModelId ?? anthropicDefaultModelId const baseInfo = anthropicModels[id as keyof typeof anthropicModels] diff --git a/webview-ui/src/utils/__tests__/validate.test.ts b/webview-ui/src/utils/__tests__/validate.test.ts index c9fb7bfd42..1699242ed5 100644 --- a/webview-ui/src/utils/__tests__/validate.test.ts +++ b/webview-ui/src/utils/__tests__/validate.test.ts @@ -42,6 +42,7 @@ describe("Model Validation Functions", () => { deepinfra: {}, "io-intelligence": {}, "vercel-ai-gateway": {}, + cometapi: {}, } const allowAllOrganization: OrganizationAllowList = { From 48f6c2cb84ad085522c76e409467db244ecb5cf2 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Fri, 5 Sep 2025 02:53:55 +0000 Subject: [PATCH 2/3] fix: add CometAPI fetcher and update modelCache --- src/api/providers/fetchers/cometapi.ts | 60 ++++++++++++++++++++++++ src/api/providers/fetchers/modelCache.ts | 4 ++ 2 files changed, 64 insertions(+) create mode 100644 src/api/providers/fetchers/cometapi.ts diff --git a/src/api/providers/fetchers/cometapi.ts b/src/api/providers/fetchers/cometapi.ts new file mode 100644 index 0000000000..dc9714d04a --- /dev/null +++ b/src/api/providers/fetchers/cometapi.ts @@ -0,0 +1,60 @@ +import { ModelRecord } from "../../../shared/api" +import { COMETAPI_MODELS } from "@roo-code/types" + +/** + * Fetch CometAPI models from the API + * @param apiKey - The API key for CometAPI + * @param baseUrl - The base URL for CometAPI (optional) + * @returns The models from CometAPI + */ +export async function getCometAPIModels(apiKey?: string, baseUrl?: string): Promise { + const url = `${baseUrl || "https://api.cometapi.com/v1"}/models` + + try { + if (!apiKey) { + // Return fallback models if no API key is provided + return COMETAPI_MODELS + } + + const response = await fetch(url, { + headers: { + Authorization: `Bearer ${apiKey}`, + Accept: "application/json", + }, + }) + + if (!response.ok) { + console.warn(`Failed to fetch CometAPI models: ${response.status} ${response.statusText}`) + // Return fallback models on error + return COMETAPI_MODELS + } + + const data = await response.json() + + // Transform the API response to match our ModelRecord format + const models: ModelRecord = {} + + if (data.data && Array.isArray(data.data)) { + for (const model of data.data) { + // Use fallback model info if available, otherwise create basic info + const fallbackInfo = COMETAPI_MODELS[model.id] + models[model.id] = fallbackInfo || { + maxTokens: model.max_tokens || 8192, + contextWindow: model.context_length || 128000, + supportsImages: model.supports_images || false, + supportsPromptCache: false, + inputPrice: model.pricing?.prompt || 0, + outputPrice: model.pricing?.completion || 0, + description: model.description || model.id, + } + } + } + + // If no models were returned, use fallback models + return Object.keys(models).length > 0 ? models : COMETAPI_MODELS + } catch (error) { + console.error("Error fetching CometAPI models:", error) + // Return fallback models on error + return COMETAPI_MODELS + } +} diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index a91cdaf994..d8ea626015 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -20,6 +20,7 @@ import { getOllamaModels } from "./ollama" import { getLMStudioModels } from "./lmstudio" import { getIOIntelligenceModels } from "./io-intelligence" import { getDeepInfraModels } from "./deepinfra" +import { getCometAPIModels } from "./cometapi" const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) async function writeModels(router: RouterName, data: ModelRecord) { @@ -89,6 +90,9 @@ export const getModels = async (options: GetModelsOptions): Promise case "vercel-ai-gateway": models = await getVercelAiGatewayModels() break + case "cometapi": + models = await getCometAPIModels(options.apiKey, options.baseUrl) + break default: { // Ensures router is exhaustively checked if RouterName is a strict union const exhaustiveCheck: never = provider From aa7ed61dfa40308cc06378b1651646bd392e3799 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Fri, 5 Sep 2025 03:02:21 +0000 Subject: [PATCH 3/3] fix: address code review feedback for CometAPI provider - Remove duplicate schema entry in provider-settings.ts - Remove try-catch from fetchModel to match DeepInfraHandler pattern - Add timeout support to RouterProvider base class - Store apiKey and baseURL in RouterProvider for model fetching - Add comprehensive test coverage for CometAPI provider - Fix test mock structure for streaming responses --- packages/types/src/provider-settings.ts | 1 - src/api/providers/__tests__/cometapi.spec.ts | 399 +++++++++++++++++++ src/api/providers/cometapi.ts | 19 +- src/api/providers/router-provider.ts | 10 +- 4 files changed, 414 insertions(+), 15 deletions(-) create mode 100644 src/api/providers/__tests__/cometapi.spec.ts diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 3044c15eae..435b367d20 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -427,7 +427,6 @@ export const providerSettingsSchema = z.object({ ...qwenCodeSchema.shape, ...rooSchema.shape, ...vercelAiGatewaySchema.shape, - ...cometApiSchema.shape, ...codebaseIndexProviderSchema.shape, }) diff --git a/src/api/providers/__tests__/cometapi.spec.ts b/src/api/providers/__tests__/cometapi.spec.ts new file mode 100644 index 0000000000..3de247388e --- /dev/null +++ b/src/api/providers/__tests__/cometapi.spec.ts @@ -0,0 +1,399 @@ +// npx vitest run api/providers/__tests__/cometapi.spec.ts + +import { CometAPIHandler } from "../cometapi" +import { ApiHandlerOptions } from "../../../shared/api" + +// Mock the timeout config utility +vitest.mock("../utils/timeout-config", () => ({ + getApiRequestTimeout: vitest.fn(), +})) + +import { getApiRequestTimeout } from "../utils/timeout-config" + +// Mock the model cache +vitest.mock("../fetchers/modelCache", () => ({ + getModels: vitest.fn(), +})) + +import { getModels } from "../fetchers/modelCache" + +// Mock OpenAI +const mockOpenAIConstructor = vitest.fn() +const mockCreateCompletion = vitest.fn() +const mockCreateStream = vitest.fn() + +vitest.mock("openai", () => { + return { + __esModule: true, + default: vitest.fn().mockImplementation((config) => { + mockOpenAIConstructor(config) + return { + chat: { + completions: { + create: vitest.fn().mockImplementation((options) => { + if (options.stream) { + return { + withResponse: () => ({ + data: mockCreateStream(options), + }), + } + } + return mockCreateCompletion(options) + }), + }, + }, + } + }), + } +}) + +describe("CometAPIHandler", () => { + beforeEach(() => { + vitest.clearAllMocks() + }) + + describe("constructor", () => { + it("should initialize with default configuration", () => { + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + } + + new CometAPIHandler(options) + + expect(getApiRequestTimeout).toHaveBeenCalled() + expect(mockOpenAIConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://api.cometapi.com/v1", + apiKey: "test-key", + timeout: 600000, + defaultHeaders: expect.objectContaining({ + "X-CometAPI-Source": "roo-code", + "X-CometAPI-Version": "2025-09-05", + }), + }), + ) + }) + + it("should use custom base URL when provided", () => { + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const options: ApiHandlerOptions = { + apiModelId: "claude-4-opus", + cometApiModelId: "claude-4-opus", + cometApiApiKey: "test-key", + cometApiBaseUrl: "https://custom.cometapi.com/v1", + } + + new CometAPIHandler(options) + + expect(mockOpenAIConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://custom.cometapi.com/v1", + apiKey: "test-key", + }), + ) + }) + + it("should handle missing API key", () => { + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + } + + new CometAPIHandler(options) + + expect(mockOpenAIConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + apiKey: "not-provided", + }), + ) + }) + }) + + describe("fetchModel", () => { + it("should fetch models from API", async () => { + const mockModels = { + "gpt-5-turbo": { + maxTokens: 128000, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + }, + } + + ;(getModels as any).mockResolvedValue(mockModels) + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + } + + const handler = new CometAPIHandler(options) + const model = await handler.fetchModel() + + expect(getModels).toHaveBeenCalledWith({ + provider: "cometapi", + apiKey: "test-key", + baseUrl: "https://api.cometapi.com/v1", + }) + + expect(model).toMatchObject({ + id: "gpt-5-turbo", + info: mockModels["gpt-5-turbo"], + }) + }) + + it("should handle model fetch errors gracefully", async () => { + ;(getModels as any).mockRejectedValue(new Error("Network error")) + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + } + + const handler = new CometAPIHandler(options) + + // Should not throw, error is handled by getModels which returns fallback models + await expect(handler.fetchModel()).rejects.toThrow("Network error") + }) + }) + + describe("createMessage", () => { + it("should create streaming message with proper options", async () => { + const mockModels = { + "gpt-5-turbo": { + maxTokens: 128000, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + }, + } + + ;(getModels as any).mockResolvedValue(mockModels) + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const mockStream = (async function* () { + yield { + choices: [{ delta: { content: "Hello" } }], + } + yield { + choices: [{ delta: { content: " world" } }], + } + yield { + choices: [{ delta: {} }], + usage: { + prompt_tokens: 10, + completion_tokens: 5, + }, + } + })() + + mockCreateStream.mockReturnValue(mockStream) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + modelTemperature: 0.7, + includeMaxTokens: true, + modelMaxTokens: 4096, + } + + const handler = new CometAPIHandler(options) + const stream = handler.createMessage("System prompt", [{ role: "user", content: "Hello" }]) + + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { type: "text", text: "Hello" }, + { type: "text", text: " world" }, + expect.objectContaining({ + type: "usage", + inputTokens: 10, + outputTokens: 5, + }), + ]) + + expect(mockCreateStream).toHaveBeenCalledWith( + expect.objectContaining({ + model: "gpt-5-turbo", + stream: true, + stream_options: { include_usage: true }, + temperature: 0.7, + max_completion_tokens: 4096, + }), + ) + }) + + it("should handle reasoning content in stream", async () => { + const mockModels = { + "o1-preview": { + maxTokens: 128000, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 15.0, + outputPrice: 60.0, + }, + } + + ;(getModels as any).mockResolvedValue(mockModels) + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + const mockStream = (async function* () { + yield { + choices: [{ delta: { reasoning_content: "Thinking..." } }], + } + yield { + choices: [{ delta: { content: "Answer" } }], + } + })() + + mockCreateStream.mockReturnValue(mockStream) + + const options: ApiHandlerOptions = { + apiModelId: "o1-preview", + cometApiModelId: "o1-preview", + cometApiApiKey: "test-key", + } + + const handler = new CometAPIHandler(options) + const stream = handler.createMessage("System prompt", [{ role: "user", content: "Question" }]) + + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks).toContainEqual({ type: "reasoning", text: "Thinking..." }) + expect(chunks).toContainEqual({ type: "text", text: "Answer" }) + }) + }) + + describe("completePrompt", () => { + it("should complete prompt with non-streaming request", async () => { + const mockModels = { + "gpt-5-turbo": { + maxTokens: 128000, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + }, + } + + ;(getModels as any).mockResolvedValue(mockModels) + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + mockCreateCompletion.mockResolvedValue({ + choices: [{ message: { content: "Response text" } }], + }) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + modelTemperature: 0.5, + } + + const handler = new CometAPIHandler(options) + const result = await handler.completePrompt("Test prompt") + + expect(result).toBe("Response text") + expect(mockCreateCompletion).toHaveBeenCalledWith( + expect.objectContaining({ + model: "gpt-5-turbo", + messages: [{ role: "user", content: "Test prompt" }], + temperature: 0.5, + }), + ) + }) + + it("should handle empty response", async () => { + const mockModels = { + "gpt-5-turbo": { + maxTokens: 128000, + contextWindow: 128000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + }, + } + + ;(getModels as any).mockResolvedValue(mockModels) + ;(getApiRequestTimeout as any).mockReturnValue(600000) + + mockCreateCompletion.mockResolvedValue({ + choices: [], + }) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + } + + const handler = new CometAPIHandler(options) + const result = await handler.completePrompt("Test prompt") + + expect(result).toBe("") + }) + }) + + describe("timeout configuration", () => { + it("should use configured timeout value", () => { + ;(getApiRequestTimeout as any).mockReturnValue(1800000) // 30 minutes + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + } + + new CometAPIHandler(options) + + expect(mockOpenAIConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + timeout: 1800000, + }), + ) + }) + + it("should handle zero timeout (no timeout)", () => { + ;(getApiRequestTimeout as any).mockReturnValue(0) + + const options: ApiHandlerOptions = { + apiModelId: "gpt-5-turbo", + cometApiModelId: "gpt-5-turbo", + cometApiApiKey: "test-key", + } + + new CometAPIHandler(options) + + expect(mockOpenAIConstructor).toHaveBeenCalledWith( + expect.objectContaining({ + timeout: 0, + }), + ) + }) + }) +}) diff --git a/src/api/providers/cometapi.ts b/src/api/providers/cometapi.ts index 3bd43f7808..90f9a0cbf6 100644 --- a/src/api/providers/cometapi.ts +++ b/src/api/providers/cometapi.ts @@ -1,7 +1,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -import { cometApiDefaultModelId, cometApiDefaultModelInfo, COMETAPI_MODELS } from "@roo-code/types" +import { cometApiDefaultModelId, cometApiDefaultModelInfo } from "@roo-code/types" import type { ApiHandlerOptions } from "../../shared/api" import { calculateApiCostOpenAI } from "../../shared/cost" @@ -34,18 +34,11 @@ export class CometAPIHandler extends RouterProvider implements SingleCompletionH } public override async fetchModel() { - // Try to fetch models from API, fallback to static models if API is unavailable - try { - this.models = await getModels({ - provider: this.name, - apiKey: this.client.apiKey, - baseUrl: this.client.baseURL, - }) - } catch (error) { - // Fallback to static models if API is unavailable - console.warn("Failed to fetch CometAPI models, using fallback models:", error) - this.models = COMETAPI_MODELS - } + this.models = await getModels({ + provider: this.name, + apiKey: this.apiKey, + baseUrl: this.baseURL, + }) return this.getModel() } diff --git a/src/api/providers/router-provider.ts b/src/api/providers/router-provider.ts index 25e9a11e1b..e5ef574d05 100644 --- a/src/api/providers/router-provider.ts +++ b/src/api/providers/router-provider.ts @@ -8,6 +8,7 @@ import { BaseProvider } from "./base-provider" import { getModels } from "./fetchers/modelCache" import { DEFAULT_HEADERS } from "./constants" +import { getApiRequestTimeout } from "./utils/timeout-config" type RouterProviderOptions = { name: RouterName @@ -27,6 +28,8 @@ export abstract class RouterProvider extends BaseProvider { protected readonly defaultModelId: string protected readonly defaultModelInfo: ModelInfo protected readonly client: OpenAI + protected readonly apiKey: string + protected readonly baseURL: string constructor({ options, @@ -44,6 +47,10 @@ export abstract class RouterProvider extends BaseProvider { this.modelId = modelId this.defaultModelId = defaultModelId this.defaultModelInfo = defaultModelInfo + this.apiKey = apiKey + this.baseURL = baseURL + + const timeout = getApiRequestTimeout() this.client = new OpenAI({ baseURL, @@ -52,11 +59,12 @@ export abstract class RouterProvider extends BaseProvider { ...DEFAULT_HEADERS, ...(options.openAiHeaders || {}), }, + timeout, }) } public async fetchModel() { - this.models = await getModels({ provider: this.name, apiKey: this.client.apiKey, baseUrl: this.client.baseURL }) + this.models = await getModels({ provider: this.name, apiKey: this.apiKey, baseUrl: this.baseURL }) return this.getModel() }