Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 118 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,121 @@ export * from "./vercel-ai-gateway.js"
export * from "./zai.js"
export * from "./deepinfra.js"
export * from "./minimax.js"

import { anthropicDefaultModelId } from "./anthropic.js"
import { bedrockDefaultModelId } from "./bedrock.js"
import { cerebrasDefaultModelId } from "./cerebras.js"
import { chutesDefaultModelId } from "./chutes.js"
import { claudeCodeDefaultModelId } from "./claude-code.js"
import { deepSeekDefaultModelId } from "./deepseek.js"
import { doubaoDefaultModelId } from "./doubao.js"
import { featherlessDefaultModelId } from "./featherless.js"
import { fireworksDefaultModelId } from "./fireworks.js"
import { geminiDefaultModelId } from "./gemini.js"
import { glamaDefaultModelId } from "./glama.js"
import { groqDefaultModelId } from "./groq.js"
import { ioIntelligenceDefaultModelId } from "./io-intelligence.js"
import { litellmDefaultModelId } from "./lite-llm.js"
import { mistralDefaultModelId } from "./mistral.js"
import { moonshotDefaultModelId } from "./moonshot.js"
import { openRouterDefaultModelId } from "./openrouter.js"
import { qwenCodeDefaultModelId } from "./qwen-code.js"
import { requestyDefaultModelId } from "./requesty.js"
import { rooDefaultModelId } from "./roo.js"
import { sambaNovaDefaultModelId } from "./sambanova.js"
import { unboundDefaultModelId } from "./unbound.js"
import { vertexDefaultModelId } from "./vertex.js"
import { vscodeLlmDefaultModelId } from "./vscode-llm.js"
import { xaiDefaultModelId } from "./xai.js"
import { vercelAiGatewayDefaultModelId } from "./vercel-ai-gateway.js"
import { internationalZAiDefaultModelId, mainlandZAiDefaultModelId } from "./zai.js"
import { deepInfraDefaultModelId } from "./deepinfra.js"
import { minimaxDefaultModelId } from "./minimax.js"

// Import the ProviderName type from provider-settings to avoid duplication
import type { ProviderName } from "../provider-settings.js"

/**
* Get the default model ID for a given provider.
* This function returns only the provider's default model ID, without considering user configuration.
* Used as a fallback when provider models are still loading.
*/
export function getProviderDefaultModelId(
provider: ProviderName,
options: { isChina?: boolean } = { isChina: false },
): string {
switch (provider) {
case "openrouter":
return openRouterDefaultModelId
case "requesty":
return requestyDefaultModelId
case "glama":
return glamaDefaultModelId
case "unbound":
return unboundDefaultModelId
case "litellm":
return litellmDefaultModelId
case "xai":
return xaiDefaultModelId
case "groq":
return groqDefaultModelId
case "huggingface":
return "meta-llama/Llama-3.3-70B-Instruct"
case "chutes":
return chutesDefaultModelId
case "bedrock":
return bedrockDefaultModelId
case "vertex":
return vertexDefaultModelId
case "gemini":
return geminiDefaultModelId
case "deepseek":
return deepSeekDefaultModelId
case "doubao":
return doubaoDefaultModelId
case "moonshot":
return moonshotDefaultModelId
case "minimax":
return minimaxDefaultModelId
case "zai":
return options?.isChina ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId
case "openai-native":
return "gpt-4o" // Based on openai-native patterns
case "mistral":
return mistralDefaultModelId
case "openai":
return "" // OpenAI provider uses custom model configuration
case "ollama":
return "" // Ollama uses dynamic model selection
case "lmstudio":
return "" // LMStudio uses dynamic model selection
case "deepinfra":
return deepInfraDefaultModelId
case "vscode-lm":
return vscodeLlmDefaultModelId
case "claude-code":
return claudeCodeDefaultModelId
case "cerebras":
return cerebrasDefaultModelId
case "sambanova":
return sambaNovaDefaultModelId
case "fireworks":
return fireworksDefaultModelId
case "featherless":
return featherlessDefaultModelId
case "io-intelligence":
return ioIntelligenceDefaultModelId
case "roo":
return rooDefaultModelId
case "qwen-code":
return qwenCodeDefaultModelId
case "vercel-ai-gateway":
return vercelAiGatewayDefaultModelId
case "anthropic":
case "gemini-cli":
case "human-relay":
case "fake-ai":
default:
return anthropicDefaultModelId
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ describe("useSelectedModel", () => {
})
})

it("should use only specific provider info when base model info is missing", () => {
it("should fall back to default when configured model doesn't exist in available models", () => {
const specificProviderInfo: ModelInfo = {
maxTokens: 8192,
contextWindow: 16384,
Expand All @@ -106,7 +106,18 @@ describe("useSelectedModel", () => {

mockUseRouterModels.mockReturnValue({
data: {
openrouter: {},
openrouter: {
"anthropic/claude-sonnet-4.5": {
maxTokens: 8192,
contextWindow: 200_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 3.0,
outputPrice: 15.0,
cacheWritesPrice: 3.75,
cacheReadsPrice: 0.3,
},
},
requesty: {},
glama: {},
unbound: {},
Expand All @@ -127,15 +138,29 @@ describe("useSelectedModel", () => {

const apiConfiguration: ProviderSettings = {
apiProvider: "openrouter",
openRouterModelId: "test-model",
openRouterModelId: "test-model", // This model doesn't exist in available models
openRouterSpecificProvider: "test-provider",
}

const wrapper = createWrapper()
const { result } = renderHook(() => useSelectedModel(apiConfiguration), { wrapper })

expect(result.current.id).toBe("test-model")
expect(result.current.info).toEqual(specificProviderInfo)
// Should fall back to provider default since "test-model" doesn't exist
expect(result.current.id).toBe("anthropic/claude-sonnet-4.5")
// Should still use specific provider info for the default model if specified
expect(result.current.info).toEqual({
...{
maxTokens: 8192,
contextWindow: 200_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 3.0,
outputPrice: 15.0,
cacheWritesPrice: 3.75,
cacheReadsPrice: 0.3,
},
...specificProviderInfo,
})
})

it("should demonstrate the merging behavior validates the comment about missing fields", () => {
Expand Down Expand Up @@ -244,12 +269,12 @@ describe("useSelectedModel", () => {
expect(result.current.info).toEqual(baseModelInfo)
})

it("should fall back to default when both base and specific provider info are missing", () => {
it("should fall back to default when configured model and provider don't exist", () => {
mockUseRouterModels.mockReturnValue({
data: {
openrouter: {
"anthropic/claude-sonnet-4": {
// Default model
"anthropic/claude-sonnet-4.5": {
// Default model - using correct default model name
maxTokens: 8192,
contextWindow: 200_000,
supportsImages: true,
Expand Down Expand Up @@ -285,8 +310,19 @@ describe("useSelectedModel", () => {
const wrapper = createWrapper()
const { result } = renderHook(() => useSelectedModel(apiConfiguration), { wrapper })

expect(result.current.id).toBe("non-existent-model")
expect(result.current.info).toBeUndefined()
// Should fall back to provider default since "non-existent-model" doesn't exist
expect(result.current.id).toBe("anthropic/claude-sonnet-4.5")
// Should use base model info since provider doesn't exist
expect(result.current.info).toEqual({
maxTokens: 8192,
contextWindow: 200_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 3.0,
outputPrice: 15.0,
cacheWritesPrice: 3.75,
cacheReadsPrice: 0.3,
})
})
})

Expand Down
Loading