Skip to content

Commit 0176cc5

Browse files
committed
Move constants so they can be used by the webview
1 parent 10371e9 commit 0176cc5

File tree

9 files changed

+50
-43
lines changed

9 files changed

+50
-43
lines changed

src/api/providers/__tests__/openai.test.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import { OpenAiHandler } from "../openai"
22
import { ApiHandlerOptions } from "../../../shared/api"
33
import { Anthropic } from "@anthropic-ai/sdk"
4-
import { DEEP_SEEK_DEFAULT_TEMPERATURE } from "../constants"
54

65
// Mock OpenAI client
76
const mockCreate = jest.fn()

src/api/providers/constants.ts

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,3 @@ export const DEFAULT_HEADERS = {
66
export const ANTHROPIC_DEFAULT_MAX_TOKENS = 8192
77

88
export const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6
9-
10-
export const AZURE_AI_INFERENCE_PATH = "/models/chat/completions"
11-
12-
export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta", "grok-3-mini-beta", "grok-3-mini-fast-beta"])

src/api/providers/fetchers/__tests__/openrouter.test.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ import path from "path"
44

55
import { back as nockBack } from "nock"
66

7-
import { getOpenRouterModels, modelsSupportingPromptCache } from "../openrouter"
7+
import { PROMPT_CACHING_MODELS } from "../../../../shared/api"
8+
9+
import { getOpenRouterModels } from "../openrouter"
810

911
nockBack.fixtures = path.join(__dirname, "fixtures")
1012
nockBack.setMode("dryrun")
@@ -21,7 +23,7 @@ describe("OpenRouter API", () => {
2123
.filter(([_, model]) => model.supportsPromptCache)
2224
.map(([id, _]) => id)
2325
.sort(),
24-
).toEqual(Array.from(modelsSupportingPromptCache).sort())
26+
).toEqual(Array.from(PROMPT_CACHING_MODELS).sort())
2527

2628
expect(
2729
Object.entries(models)

src/api/providers/fetchers/openrouter.ts

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -110,26 +110,3 @@ export async function getOpenRouterModels(options?: ApiHandlerOptions) {
110110

111111
return models
112112
}
113-
114-
export const modelsSupportingPromptCache = new Set([
115-
"anthropic/claude-3-haiku",
116-
"anthropic/claude-3-haiku:beta",
117-
"anthropic/claude-3-opus",
118-
"anthropic/claude-3-opus:beta",
119-
"anthropic/claude-3-sonnet",
120-
"anthropic/claude-3-sonnet:beta",
121-
"anthropic/claude-3.5-haiku",
122-
"anthropic/claude-3.5-haiku-20241022",
123-
"anthropic/claude-3.5-haiku-20241022:beta",
124-
"anthropic/claude-3.5-haiku:beta",
125-
"anthropic/claude-3.5-sonnet",
126-
"anthropic/claude-3.5-sonnet-20240620",
127-
"anthropic/claude-3.5-sonnet-20240620:beta",
128-
"anthropic/claude-3.5-sonnet:beta",
129-
"anthropic/claude-3.7-sonnet",
130-
"anthropic/claude-3.7-sonnet:beta",
131-
"anthropic/claude-3.7-sonnet:thinking",
132-
// "google/gemini-2.0-flash-001",
133-
// "google/gemini-flash-1.5",
134-
// "google/gemini-flash-1.5-8b",
135-
])

src/api/providers/openai.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@ import { convertToSimpleMessages } from "../transform/simple-format"
1515
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
1616
import { BaseProvider } from "./base-provider"
1717
import { XmlMatcher } from "../../utils/xml-matcher"
18-
import { DEEP_SEEK_DEFAULT_TEMPERATURE, DEFAULT_HEADERS, AZURE_AI_INFERENCE_PATH } from "./constants"
18+
import { DEFAULT_HEADERS, DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants"
19+
20+
export const AZURE_AI_INFERENCE_PATH = "/models/chat/completions"
1921

2022
export interface OpenAiHandlerOptions extends ApiHandlerOptions {}
2123

src/api/providers/openrouter.ts

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,19 @@ import { Anthropic } from "@anthropic-ai/sdk"
22
import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta"
33
import OpenAI from "openai"
44

5-
import { ApiHandlerOptions, ModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo } from "../../shared/api"
5+
import {
6+
ApiHandlerOptions,
7+
openRouterDefaultModelId,
8+
openRouterDefaultModelInfo,
9+
PROMPT_CACHING_MODELS,
10+
} from "../../shared/api"
611
import { convertToOpenAiMessages } from "../transform/openai-format"
712
import { ApiStreamChunk } from "../transform/stream"
813
import { convertToR1Format } from "../transform/r1-format"
914

15+
import { getModelParams, SingleCompletionHandler } from "../index"
1016
import { DEFAULT_HEADERS, DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants"
11-
import { getModelParams, SingleCompletionHandler } from ".."
1217
import { BaseProvider } from "./base-provider"
13-
import { modelsSupportingPromptCache } from "./fetchers/openrouter"
1418

1519
const OPENROUTER_DEFAULT_PROVIDER_NAME = "[default]"
1620

@@ -78,7 +82,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
7882
// Now with Gemini support: https://openrouter.ai/docs/features/prompt-caching
7983
// Note that we don't check the `ModelInfo` object because it is cached
8084
// in the settings for OpenRouter and the value could be stale.
81-
if (modelsSupportingPromptCache.has(modelId)) {
85+
if (PROMPT_CACHING_MODELS.has(modelId)) {
8286
openAiMessages[0] = {
8387
role: "system",
8488
// @ts-ignore-next-line

src/api/providers/xai.ts

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3-
import { ApiHandlerOptions, XAIModelId, ModelInfo, xaiDefaultModelId, xaiModels } from "../../shared/api"
3+
4+
import { ApiHandlerOptions, XAIModelId, xaiDefaultModelId, xaiModels, REASONING_MODELS } from "../../shared/api"
45
import { ApiStream } from "../transform/stream"
56
import { convertToOpenAiMessages } from "../transform/openai-format"
6-
import { DEFAULT_HEADERS, REASONING_MODELS } from "./constants"
7+
8+
import { SingleCompletionHandler } from "../index"
9+
import { DEFAULT_HEADERS } from "./constants"
710
import { BaseProvider } from "./base-provider"
8-
import { SingleCompletionHandler } from ".."
911

1012
const XAI_DEFAULT_TEMPERATURE = 0
1113

src/shared/api.ts

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
11
import { ModelInfo, ProviderName, ProviderSettings } from "../schemas"
2-
import { REASONING_MODELS } from "../api/providers/constants"
3-
4-
export { REASONING_MODELS }
52

63
export type { ModelInfo, ProviderName as ApiProvider }
74

@@ -1396,3 +1393,32 @@ export const vscodeLlmModels = {
13961393
maxInputTokens: number
13971394
}
13981395
>
1396+
1397+
/**
1398+
* Constants
1399+
*/
1400+
1401+
export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta", "grok-3-mini-beta", "grok-3-mini-fast-beta"])
1402+
1403+
export const PROMPT_CACHING_MODELS = new Set([
1404+
"anthropic/claude-3-haiku",
1405+
"anthropic/claude-3-haiku:beta",
1406+
"anthropic/claude-3-opus",
1407+
"anthropic/claude-3-opus:beta",
1408+
"anthropic/claude-3-sonnet",
1409+
"anthropic/claude-3-sonnet:beta",
1410+
"anthropic/claude-3.5-haiku",
1411+
"anthropic/claude-3.5-haiku-20241022",
1412+
"anthropic/claude-3.5-haiku-20241022:beta",
1413+
"anthropic/claude-3.5-haiku:beta",
1414+
"anthropic/claude-3.5-sonnet",
1415+
"anthropic/claude-3.5-sonnet-20240620",
1416+
"anthropic/claude-3.5-sonnet-20240620:beta",
1417+
"anthropic/claude-3.5-sonnet:beta",
1418+
"anthropic/claude-3.7-sonnet",
1419+
"anthropic/claude-3.7-sonnet:beta",
1420+
"anthropic/claude-3.7-sonnet:thinking",
1421+
// "google/gemini-2.0-flash-001",
1422+
// "google/gemini-flash-1.5",
1423+
// "google/gemini-flash-1.5-8b",
1424+
])

webview-ui/src/components/settings/constants.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,9 @@ import {
99
openAiNativeModels,
1010
vertexModels,
1111
xaiModels,
12-
REASONING_MODELS,
1312
} from "@roo/shared/api"
1413

15-
export { REASONING_MODELS }
14+
export { REASONING_MODELS, PROMPT_CACHING_MODELS } from "@roo/shared/api"
1615

1716
export const MODELS_BY_PROVIDER: Partial<Record<ApiProvider, Record<string, ModelInfo>>> = {
1817
anthropic: anthropicModels,

0 commit comments

Comments
 (0)