Skip to content

Commit 75f93c4

Browse files
authored
feat: add SambaNova provider integration (#6188)
1 parent f3d2504 commit 75f93c4

File tree

34 files changed

+385
-0
lines changed

34 files changed

+385
-0
lines changed

.github/ISSUE_TEMPLATE/bug_report.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ body:
3838
- OpenAI Compatible
3939
- OpenRouter
4040
- Requesty
41+
- SambaNova
4142
- Unbound
4243
- VS Code Language Model API
4344
- xAI (Grok)

packages/types/src/global-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -187,6 +187,7 @@ export const SECRET_STATE_KEYS = [
187187
"codebaseIndexGeminiApiKey",
188188
"codebaseIndexMistralApiKey",
189189
"huggingFaceApiKey",
190+
"sambaNovaApiKey",
190191
] as const satisfies readonly (keyof ProviderSettings)[]
191192
export type SecretState = Pick<ProviderSettings, (typeof SECRET_STATE_KEYS)[number]>
192193

packages/types/src/provider-settings.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ export const providerNames = [
3333
"chutes",
3434
"litellm",
3535
"huggingface",
36+
"sambanova",
3637
] as const
3738

3839
export const providerNamesSchema = z.enum(providerNames)
@@ -241,6 +242,10 @@ const litellmSchema = baseProviderSettingsSchema.extend({
241242
litellmUsePromptCache: z.boolean().optional(),
242243
})
243244

245+
const sambaNovaSchema = apiModelIdProviderModelSchema.extend({
246+
sambaNovaApiKey: z.string().optional(),
247+
})
248+
244249
const defaultSchema = z.object({
245250
apiProvider: z.undefined(),
246251
})
@@ -271,6 +276,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
271276
huggingFaceSchema.merge(z.object({ apiProvider: z.literal("huggingface") })),
272277
chutesSchema.merge(z.object({ apiProvider: z.literal("chutes") })),
273278
litellmSchema.merge(z.object({ apiProvider: z.literal("litellm") })),
279+
sambaNovaSchema.merge(z.object({ apiProvider: z.literal("sambanova") })),
274280
defaultSchema,
275281
])
276282

@@ -301,6 +307,7 @@ export const providerSettingsSchema = z.object({
301307
...huggingFaceSchema.shape,
302308
...chutesSchema.shape,
303309
...litellmSchema.shape,
310+
...sambaNovaSchema.shape,
304311
...codebaseIndexProviderSchema.shape,
305312
})
306313

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ export * from "./ollama.js"
1515
export * from "./openai.js"
1616
export * from "./openrouter.js"
1717
export * from "./requesty.js"
18+
export * from "./sambanova.js"
1819
export * from "./unbound.js"
1920
export * from "./vertex.js"
2021
export * from "./vscode-llm.js"
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
// https://docs.sambanova.ai/cloud/docs/get-started/supported-models
4+
export type SambaNovaModelId =
5+
| "Meta-Llama-3.1-8B-Instruct"
6+
| "Meta-Llama-3.3-70B-Instruct"
7+
| "DeepSeek-R1"
8+
| "DeepSeek-V3-0324"
9+
| "DeepSeek-R1-Distill-Llama-70B"
10+
| "Llama-4-Maverick-17B-128E-Instruct"
11+
| "Llama-3.3-Swallow-70B-Instruct-v0.4"
12+
| "Qwen3-32B"
13+
14+
export const sambaNovaDefaultModelId: SambaNovaModelId = "Meta-Llama-3.3-70B-Instruct"
15+
16+
export const sambaNovaModels = {
17+
"Meta-Llama-3.1-8B-Instruct": {
18+
maxTokens: 8192,
19+
contextWindow: 16384,
20+
supportsImages: false,
21+
supportsPromptCache: false,
22+
inputPrice: 0.1,
23+
outputPrice: 0.2,
24+
description: "Meta Llama 3.1 8B Instruct model with 16K context window.",
25+
},
26+
"Meta-Llama-3.3-70B-Instruct": {
27+
maxTokens: 8192,
28+
contextWindow: 131072,
29+
supportsImages: false,
30+
supportsPromptCache: false,
31+
inputPrice: 0.6,
32+
outputPrice: 1.2,
33+
description: "Meta Llama 3.3 70B Instruct model with 128K context window.",
34+
},
35+
"DeepSeek-R1": {
36+
maxTokens: 8192,
37+
contextWindow: 32768,
38+
supportsImages: false,
39+
supportsPromptCache: false,
40+
supportsReasoningBudget: true,
41+
inputPrice: 5.0,
42+
outputPrice: 7.0,
43+
description: "DeepSeek R1 reasoning model with 32K context window.",
44+
},
45+
"DeepSeek-V3-0324": {
46+
maxTokens: 8192,
47+
contextWindow: 32768,
48+
supportsImages: false,
49+
supportsPromptCache: false,
50+
inputPrice: 3.0,
51+
outputPrice: 4.5,
52+
description: "DeepSeek V3 model with 32K context window.",
53+
},
54+
"DeepSeek-R1-Distill-Llama-70B": {
55+
maxTokens: 8192,
56+
contextWindow: 131072,
57+
supportsImages: false,
58+
supportsPromptCache: false,
59+
inputPrice: 0.7,
60+
outputPrice: 1.4,
61+
description: "DeepSeek R1 distilled Llama 70B model with 128K context window.",
62+
},
63+
"Llama-4-Maverick-17B-128E-Instruct": {
64+
maxTokens: 8192,
65+
contextWindow: 131072,
66+
supportsImages: true,
67+
supportsPromptCache: false,
68+
inputPrice: 0.63,
69+
outputPrice: 1.8,
70+
description: "Meta Llama 4 Maverick 17B 128E Instruct model with 128K context window.",
71+
},
72+
"Llama-3.3-Swallow-70B-Instruct-v0.4": {
73+
maxTokens: 8192,
74+
contextWindow: 16384,
75+
supportsImages: false,
76+
supportsPromptCache: false,
77+
inputPrice: 0.6,
78+
outputPrice: 1.2,
79+
description: "Tokyotech Llama 3.3 Swallow 70B Instruct v0.4 model with 16K context window.",
80+
},
81+
"Qwen3-32B": {
82+
maxTokens: 8192,
83+
contextWindow: 8192,
84+
supportsImages: false,
85+
supportsPromptCache: false,
86+
inputPrice: 0.4,
87+
outputPrice: 0.8,
88+
description: "Alibaba Qwen 3 32B model with 8K context window.",
89+
},
90+
} as const satisfies Record<string, ModelInfo>

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import {
3030
ChutesHandler,
3131
LiteLLMHandler,
3232
ClaudeCodeHandler,
33+
SambaNovaHandler,
3334
} from "./providers"
3435

3536
export interface SingleCompletionHandler {
@@ -115,6 +116,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
115116
return new ChutesHandler(options)
116117
case "litellm":
117118
return new LiteLLMHandler(options)
119+
case "sambanova":
120+
return new SambaNovaHandler(options)
118121
default:
119122
apiProvider satisfies "gemini-cli" | undefined
120123
return new AnthropicHandler(options)
Lines changed: 154 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
// npx vitest run src/api/providers/__tests__/sambanova.spec.ts
2+
3+
// Mock vscode first to avoid import errors
4+
vitest.mock("vscode", () => ({}))
5+
6+
import OpenAI from "openai"
7+
import { Anthropic } from "@anthropic-ai/sdk"
8+
9+
import { type SambaNovaModelId, sambaNovaDefaultModelId, sambaNovaModels } from "@roo-code/types"
10+
11+
import { SambaNovaHandler } from "../sambanova"
12+
13+
vitest.mock("openai", () => {
14+
const createMock = vitest.fn()
15+
return {
16+
default: vitest.fn(() => ({ chat: { completions: { create: createMock } } })),
17+
}
18+
})
19+
20+
describe("SambaNovaHandler", () => {
21+
let handler: SambaNovaHandler
22+
let mockCreate: any
23+
24+
beforeEach(() => {
25+
vitest.clearAllMocks()
26+
mockCreate = (OpenAI as unknown as any)().chat.completions.create
27+
handler = new SambaNovaHandler({ sambaNovaApiKey: "test-sambanova-api-key" })
28+
})
29+
30+
it("should use the correct SambaNova base URL", () => {
31+
new SambaNovaHandler({ sambaNovaApiKey: "test-sambanova-api-key" })
32+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.sambanova.ai/v1" }))
33+
})
34+
35+
it("should use the provided API key", () => {
36+
const sambaNovaApiKey = "test-sambanova-api-key"
37+
new SambaNovaHandler({ sambaNovaApiKey })
38+
expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: sambaNovaApiKey }))
39+
})
40+
41+
it("should return default model when no model is specified", () => {
42+
const model = handler.getModel()
43+
expect(model.id).toBe(sambaNovaDefaultModelId)
44+
expect(model.info).toEqual(sambaNovaModels[sambaNovaDefaultModelId])
45+
})
46+
47+
it("should return specified model when valid model is provided", () => {
48+
const testModelId: SambaNovaModelId = "Meta-Llama-3.3-70B-Instruct"
49+
const handlerWithModel = new SambaNovaHandler({
50+
apiModelId: testModelId,
51+
sambaNovaApiKey: "test-sambanova-api-key",
52+
})
53+
const model = handlerWithModel.getModel()
54+
expect(model.id).toBe(testModelId)
55+
expect(model.info).toEqual(sambaNovaModels[testModelId])
56+
})
57+
58+
it("completePrompt method should return text from SambaNova API", async () => {
59+
const expectedResponse = "This is a test response from SambaNova"
60+
mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] })
61+
const result = await handler.completePrompt("test prompt")
62+
expect(result).toBe(expectedResponse)
63+
})
64+
65+
it("should handle errors in completePrompt", async () => {
66+
const errorMessage = "SambaNova API error"
67+
mockCreate.mockRejectedValueOnce(new Error(errorMessage))
68+
await expect(handler.completePrompt("test prompt")).rejects.toThrow(
69+
`SambaNova completion error: ${errorMessage}`,
70+
)
71+
})
72+
73+
it("createMessage should yield text content from stream", async () => {
74+
const testContent = "This is test content from SambaNova stream"
75+
76+
mockCreate.mockImplementationOnce(() => {
77+
return {
78+
[Symbol.asyncIterator]: () => ({
79+
next: vitest
80+
.fn()
81+
.mockResolvedValueOnce({
82+
done: false,
83+
value: { choices: [{ delta: { content: testContent } }] },
84+
})
85+
.mockResolvedValueOnce({ done: true }),
86+
}),
87+
}
88+
})
89+
90+
const stream = handler.createMessage("system prompt", [])
91+
const firstChunk = await stream.next()
92+
93+
expect(firstChunk.done).toBe(false)
94+
expect(firstChunk.value).toEqual({ type: "text", text: testContent })
95+
})
96+
97+
it("createMessage should yield usage data from stream", async () => {
98+
mockCreate.mockImplementationOnce(() => {
99+
return {
100+
[Symbol.asyncIterator]: () => ({
101+
next: vitest
102+
.fn()
103+
.mockResolvedValueOnce({
104+
done: false,
105+
value: { choices: [{ delta: {} }], usage: { prompt_tokens: 10, completion_tokens: 20 } },
106+
})
107+
.mockResolvedValueOnce({ done: true }),
108+
}),
109+
}
110+
})
111+
112+
const stream = handler.createMessage("system prompt", [])
113+
const firstChunk = await stream.next()
114+
115+
expect(firstChunk.done).toBe(false)
116+
expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 })
117+
})
118+
119+
it("createMessage should pass correct parameters to SambaNova client", async () => {
120+
const modelId: SambaNovaModelId = "Meta-Llama-3.3-70B-Instruct"
121+
const modelInfo = sambaNovaModels[modelId]
122+
const handlerWithModel = new SambaNovaHandler({
123+
apiModelId: modelId,
124+
sambaNovaApiKey: "test-sambanova-api-key",
125+
})
126+
127+
mockCreate.mockImplementationOnce(() => {
128+
return {
129+
[Symbol.asyncIterator]: () => ({
130+
async next() {
131+
return { done: true }
132+
},
133+
}),
134+
}
135+
})
136+
137+
const systemPrompt = "Test system prompt for SambaNova"
138+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for SambaNova" }]
139+
140+
const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
141+
await messageGenerator.next()
142+
143+
expect(mockCreate).toHaveBeenCalledWith(
144+
expect.objectContaining({
145+
model: modelId,
146+
max_tokens: modelInfo.maxTokens,
147+
temperature: 0.7,
148+
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
149+
stream: true,
150+
stream_options: { include_usage: true },
151+
}),
152+
)
153+
})
154+
})

src/api/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ export { OpenAiNativeHandler } from "./openai-native"
1919
export { OpenAiHandler } from "./openai"
2020
export { OpenRouterHandler } from "./openrouter"
2121
export { RequestyHandler } from "./requesty"
22+
export { SambaNovaHandler } from "./sambanova"
2223
export { UnboundHandler } from "./unbound"
2324
export { VertexHandler } from "./vertex"
2425
export { VsCodeLmHandler } from "./vscode-lm"

src/api/providers/sambanova.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import { type SambaNovaModelId, sambaNovaDefaultModelId, sambaNovaModels } from "@roo-code/types"
2+
3+
import type { ApiHandlerOptions } from "../../shared/api"
4+
5+
import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider"
6+
7+
export class SambaNovaHandler extends BaseOpenAiCompatibleProvider<SambaNovaModelId> {
8+
constructor(options: ApiHandlerOptions) {
9+
super({
10+
...options,
11+
providerName: "SambaNova",
12+
baseURL: "https://api.sambanova.ai/v1",
13+
apiKey: options.sambaNovaApiKey,
14+
defaultProviderModelId: sambaNovaDefaultModelId,
15+
providerModels: sambaNovaModels,
16+
defaultTemperature: 0.7,
17+
})
18+
}
19+
}

src/shared/ProfileValidator.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ export class ProfileValidator {
6565
case "deepseek":
6666
case "xai":
6767
case "groq":
68+
case "sambanova":
6869
case "chutes":
6970
return profile.apiModelId
7071
case "litellm":

0 commit comments

Comments
 (0)