Skip to content

Commit c78fbed

Browse files
authored
Merge pull request RooCodeInc#658 from samhvw8/feat/roo-code-requesty-provider
feat: add Requesty API provider support
2 parents 059de0f + 2449c49 commit c78fbed

File tree

15 files changed

+545
-21
lines changed

15 files changed

+545
-21
lines changed

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import { MistralHandler } from "./providers/mistral"
1515
import { VsCodeLmHandler } from "./providers/vscode-lm"
1616
import { ApiStream } from "./transform/stream"
1717
import { UnboundHandler } from "./providers/unbound"
18+
import { RequestyHandler } from "./providers/requesty"
1819

1920
export interface SingleCompletionHandler {
2021
completePrompt(prompt: string): Promise<string>
@@ -56,6 +57,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
5657
return new MistralHandler(options)
5758
case "unbound":
5859
return new UnboundHandler(options)
60+
case "requesty":
61+
return new RequestyHandler(options)
5962
default:
6063
return new AnthropicHandler(options)
6164
}
Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
import { Anthropic } from "@anthropic-ai/sdk"
2+
import OpenAI from "openai"
3+
import { ApiHandlerOptions, ModelInfo, requestyModelInfoSaneDefaults } from "../../../shared/api"
4+
import { RequestyHandler } from "../requesty"
5+
import { convertToOpenAiMessages } from "../../transform/openai-format"
6+
import { convertToR1Format } from "../../transform/r1-format"
7+
8+
// Mock OpenAI and transform functions
9+
jest.mock("openai")
10+
jest.mock("../../transform/openai-format")
11+
jest.mock("../../transform/r1-format")
12+
13+
describe("RequestyHandler", () => {
14+
let handler: RequestyHandler
15+
let mockCreate: jest.Mock
16+
17+
const defaultOptions: ApiHandlerOptions = {
18+
requestyApiKey: "test-key",
19+
requestyModelId: "test-model",
20+
requestyModelInfo: {
21+
maxTokens: 1000,
22+
contextWindow: 4000,
23+
supportsPromptCache: false,
24+
supportsImages: true,
25+
inputPrice: 0,
26+
outputPrice: 0,
27+
},
28+
openAiStreamingEnabled: true,
29+
includeMaxTokens: true, // Add this to match the implementation
30+
}
31+
32+
beforeEach(() => {
33+
// Clear mocks
34+
jest.clearAllMocks()
35+
36+
// Setup mock create function
37+
mockCreate = jest.fn()
38+
39+
// Mock OpenAI constructor
40+
;(OpenAI as jest.MockedClass<typeof OpenAI>).mockImplementation(
41+
() =>
42+
({
43+
chat: {
44+
completions: {
45+
create: mockCreate,
46+
},
47+
},
48+
}) as unknown as OpenAI,
49+
)
50+
51+
// Mock transform functions
52+
;(convertToOpenAiMessages as jest.Mock).mockImplementation((messages) => messages)
53+
;(convertToR1Format as jest.Mock).mockImplementation((messages) => messages)
54+
55+
// Create handler instance
56+
handler = new RequestyHandler(defaultOptions)
57+
})
58+
59+
describe("constructor", () => {
60+
it("should initialize with correct options", () => {
61+
expect(OpenAI).toHaveBeenCalledWith({
62+
baseURL: "https://router.requesty.ai/v1",
63+
apiKey: defaultOptions.requestyApiKey,
64+
defaultHeaders: {
65+
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
66+
"X-Title": "Roo Code",
67+
},
68+
})
69+
})
70+
})
71+
72+
describe("createMessage", () => {
73+
const systemPrompt = "You are a helpful assistant"
74+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
75+
76+
describe("with streaming enabled", () => {
77+
beforeEach(() => {
78+
const stream = {
79+
[Symbol.asyncIterator]: async function* () {
80+
yield {
81+
choices: [{ delta: { content: "Hello" } }],
82+
}
83+
yield {
84+
choices: [{ delta: { content: " world" } }],
85+
usage: {
86+
prompt_tokens: 10,
87+
completion_tokens: 5,
88+
},
89+
}
90+
},
91+
}
92+
mockCreate.mockResolvedValue(stream)
93+
})
94+
95+
it("should handle streaming response correctly", async () => {
96+
const stream = handler.createMessage(systemPrompt, messages)
97+
const results = []
98+
99+
for await (const chunk of stream) {
100+
results.push(chunk)
101+
}
102+
103+
expect(results).toEqual([
104+
{ type: "text", text: "Hello" },
105+
{ type: "text", text: " world" },
106+
{
107+
type: "usage",
108+
inputTokens: 10,
109+
outputTokens: 5,
110+
cacheWriteTokens: undefined,
111+
cacheReadTokens: undefined,
112+
},
113+
])
114+
115+
expect(mockCreate).toHaveBeenCalledWith({
116+
model: defaultOptions.requestyModelId,
117+
temperature: 0,
118+
messages: [
119+
{ role: "system", content: systemPrompt },
120+
{ role: "user", content: "Hello" },
121+
],
122+
stream: true,
123+
stream_options: { include_usage: true },
124+
max_tokens: defaultOptions.requestyModelInfo?.maxTokens,
125+
})
126+
})
127+
128+
it("should not include max_tokens when includeMaxTokens is false", async () => {
129+
handler = new RequestyHandler({
130+
...defaultOptions,
131+
includeMaxTokens: false,
132+
})
133+
134+
await handler.createMessage(systemPrompt, messages).next()
135+
136+
expect(mockCreate).toHaveBeenCalledWith(
137+
expect.not.objectContaining({
138+
max_tokens: expect.any(Number),
139+
}),
140+
)
141+
})
142+
143+
it("should handle deepseek-reasoner model format", async () => {
144+
handler = new RequestyHandler({
145+
...defaultOptions,
146+
requestyModelId: "deepseek-reasoner",
147+
})
148+
149+
await handler.createMessage(systemPrompt, messages).next()
150+
151+
expect(convertToR1Format).toHaveBeenCalledWith([{ role: "user", content: systemPrompt }, ...messages])
152+
})
153+
})
154+
155+
describe("with streaming disabled", () => {
156+
beforeEach(() => {
157+
handler = new RequestyHandler({
158+
...defaultOptions,
159+
openAiStreamingEnabled: false,
160+
})
161+
162+
mockCreate.mockResolvedValue({
163+
choices: [{ message: { content: "Hello world" } }],
164+
usage: {
165+
prompt_tokens: 10,
166+
completion_tokens: 5,
167+
},
168+
})
169+
})
170+
171+
it("should handle non-streaming response correctly", async () => {
172+
const stream = handler.createMessage(systemPrompt, messages)
173+
const results = []
174+
175+
for await (const chunk of stream) {
176+
results.push(chunk)
177+
}
178+
179+
expect(results).toEqual([
180+
{ type: "text", text: "Hello world" },
181+
{
182+
type: "usage",
183+
inputTokens: 10,
184+
outputTokens: 5,
185+
},
186+
])
187+
188+
expect(mockCreate).toHaveBeenCalledWith({
189+
model: defaultOptions.requestyModelId,
190+
messages: [
191+
{ role: "user", content: systemPrompt },
192+
{ role: "user", content: "Hello" },
193+
],
194+
})
195+
})
196+
})
197+
})
198+
199+
describe("getModel", () => {
200+
it("should return correct model information", () => {
201+
const result = handler.getModel()
202+
expect(result).toEqual({
203+
id: defaultOptions.requestyModelId,
204+
info: defaultOptions.requestyModelInfo,
205+
})
206+
})
207+
208+
it("should use sane defaults when no model info provided", () => {
209+
handler = new RequestyHandler({
210+
...defaultOptions,
211+
requestyModelInfo: undefined,
212+
})
213+
214+
const result = handler.getModel()
215+
expect(result).toEqual({
216+
id: defaultOptions.requestyModelId,
217+
info: requestyModelInfoSaneDefaults,
218+
})
219+
})
220+
})
221+
222+
describe("completePrompt", () => {
223+
beforeEach(() => {
224+
mockCreate.mockResolvedValue({
225+
choices: [{ message: { content: "Completed response" } }],
226+
})
227+
})
228+
229+
it("should complete prompt successfully", async () => {
230+
const result = await handler.completePrompt("Test prompt")
231+
expect(result).toBe("Completed response")
232+
expect(mockCreate).toHaveBeenCalledWith({
233+
model: defaultOptions.requestyModelId,
234+
messages: [{ role: "user", content: "Test prompt" }],
235+
})
236+
})
237+
238+
it("should handle errors correctly", async () => {
239+
const errorMessage = "API error"
240+
mockCreate.mockRejectedValue(new Error(errorMessage))
241+
242+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
243+
`OpenAI completion error: ${errorMessage}`,
244+
)
245+
})
246+
})
247+
})

src/api/providers/deepseek.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import { OpenAiHandler } from "./openai"
2-
import { ApiHandlerOptions, ModelInfo } from "../../shared/api"
1+
import { OpenAiHandler, OpenAiHandlerOptions } from "./openai"
2+
import { ModelInfo } from "../../shared/api"
33
import { deepSeekModels, deepSeekDefaultModelId } from "../../shared/api"
44

55
export class DeepSeekHandler extends OpenAiHandler {
6-
constructor(options: ApiHandlerOptions) {
6+
constructor(options: OpenAiHandlerOptions) {
77
super({
88
...options,
99
openAiApiKey: options.deepSeekApiKey ?? "not-provided",

src/api/providers/openai.ts

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,16 +11,20 @@ import { ApiHandler, SingleCompletionHandler } from "../index"
1111
import { convertToOpenAiMessages } from "../transform/openai-format"
1212
import { convertToR1Format } from "../transform/r1-format"
1313
import { convertToSimpleMessages } from "../transform/simple-format"
14-
import { ApiStream } from "../transform/stream"
14+
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
15+
16+
export interface OpenAiHandlerOptions extends ApiHandlerOptions {
17+
defaultHeaders?: Record<string, string>
18+
}
1519

1620
export const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6
1721
const OPENAI_DEFAULT_TEMPERATURE = 0
1822

1923
export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
20-
protected options: ApiHandlerOptions
24+
protected options: OpenAiHandlerOptions
2125
private client: OpenAI
2226

23-
constructor(options: ApiHandlerOptions) {
27+
constructor(options: OpenAiHandlerOptions) {
2428
this.options = options
2529

2630
const baseURL = this.options.openAiBaseUrl ?? "https://api.openai.com/v1"
@@ -44,7 +48,7 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
4448
apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion,
4549
})
4650
} else {
47-
this.client = new OpenAI({ baseURL, apiKey })
51+
this.client = new OpenAI({ baseURL, apiKey, defaultHeaders: this.options.defaultHeaders })
4852
}
4953
}
5054

@@ -103,11 +107,7 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
103107
}
104108
}
105109
if (chunk.usage) {
106-
yield {
107-
type: "usage",
108-
inputTokens: chunk.usage.prompt_tokens || 0,
109-
outputTokens: chunk.usage.completion_tokens || 0,
110-
}
110+
yield this.processUsageMetrics(chunk.usage)
111111
}
112112
}
113113
} else {
@@ -130,11 +130,15 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
130130
type: "text",
131131
text: response.choices[0]?.message.content || "",
132132
}
133-
yield {
134-
type: "usage",
135-
inputTokens: response.usage?.prompt_tokens || 0,
136-
outputTokens: response.usage?.completion_tokens || 0,
137-
}
133+
yield this.processUsageMetrics(response.usage)
134+
}
135+
}
136+
137+
protected processUsageMetrics(usage: any): ApiStreamUsageChunk {
138+
return {
139+
type: "usage",
140+
inputTokens: usage?.prompt_tokens || 0,
141+
outputTokens: usage?.completion_tokens || 0,
138142
}
139143
}
140144

src/api/providers/requesty.ts

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
import { OpenAiHandler, OpenAiHandlerOptions } from "./openai"
2+
import { ModelInfo, requestyModelInfoSaneDefaults, requestyDefaultModelId } from "../../shared/api"
3+
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
4+
5+
export class RequestyHandler extends OpenAiHandler {
6+
constructor(options: OpenAiHandlerOptions) {
7+
if (!options.requestyApiKey) {
8+
throw new Error("Requesty API key is required. Please provide it in the settings.")
9+
}
10+
super({
11+
...options,
12+
openAiApiKey: options.requestyApiKey,
13+
openAiModelId: options.requestyModelId ?? requestyDefaultModelId,
14+
openAiBaseUrl: "https://router.requesty.ai/v1",
15+
openAiCustomModelInfo: options.requestyModelInfo ?? requestyModelInfoSaneDefaults,
16+
defaultHeaders: {
17+
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
18+
"X-Title": "Roo Code",
19+
},
20+
})
21+
}
22+
23+
override getModel(): { id: string; info: ModelInfo } {
24+
const modelId = this.options.requestyModelId ?? requestyDefaultModelId
25+
return {
26+
id: modelId,
27+
info: this.options.requestyModelInfo ?? requestyModelInfoSaneDefaults,
28+
}
29+
}
30+
31+
protected override processUsageMetrics(usage: any): ApiStreamUsageChunk {
32+
return {
33+
type: "usage",
34+
inputTokens: usage?.prompt_tokens || 0,
35+
outputTokens: usage?.completion_tokens || 0,
36+
cacheWriteTokens: usage?.cache_creation_input_tokens,
37+
cacheReadTokens: usage?.cache_read_input_tokens,
38+
}
39+
}
40+
}

0 commit comments

Comments
 (0)