Skip to content

Commit 91fe764

Browse files
committed
feat: add Requesty API provider support
- Add RequestyHandler implementation for API integration - Add RequestyModelPicker component for model selection - Update shared types and messages for Requesty support - Update API options to include Requesty provider
1 parent 60ea3c2 commit 91fe764

File tree

12 files changed

+1016
-0
lines changed

12 files changed

+1016
-0
lines changed

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import { MistralHandler } from "./providers/mistral"
1515
import { VsCodeLmHandler } from "./providers/vscode-lm"
1616
import { ApiStream } from "./transform/stream"
1717
import { UnboundHandler } from "./providers/unbound"
18+
import { RequestyHandler } from "./providers/requesty"
1819

1920
export interface SingleCompletionHandler {
2021
completePrompt(prompt: string): Promise<string>
@@ -56,6 +57,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
5657
return new MistralHandler(options)
5758
case "unbound":
5859
return new UnboundHandler(options)
60+
case "requesty":
61+
return new RequestyHandler(options)
5962
default:
6063
return new AnthropicHandler(options)
6164
}
Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,247 @@
1+
import { Anthropic } from "@anthropic-ai/sdk"
2+
import OpenAI from "openai"
3+
import { ApiHandlerOptions, ModelInfo, requestyModelInfoSaneDefaults } from "../../../shared/api"
4+
import { RequestyHandler } from "../requesty"
5+
import { convertToOpenAiMessages } from "../../transform/openai-format"
6+
import { convertToR1Format } from "../../transform/r1-format"
7+
8+
// Mock OpenAI and transform functions
9+
jest.mock("openai")
10+
jest.mock("../../transform/openai-format")
11+
jest.mock("../../transform/r1-format")
12+
13+
describe("RequestyHandler", () => {
14+
let handler: RequestyHandler
15+
let mockCreate: jest.Mock
16+
17+
const defaultOptions: ApiHandlerOptions = {
18+
requestyApiKey: "test-key",
19+
requestyModelId: "test-model",
20+
requestyModelInfo: {
21+
maxTokens: 1000,
22+
contextWindow: 4000,
23+
supportsPromptCache: false,
24+
supportsImages: true,
25+
inputPrice: 0,
26+
outputPrice: 0,
27+
},
28+
openAiStreamingEnabled: true,
29+
includeMaxTokens: true, // Add this to match the implementation
30+
}
31+
32+
beforeEach(() => {
33+
// Clear mocks
34+
jest.clearAllMocks()
35+
36+
// Setup mock create function
37+
mockCreate = jest.fn()
38+
39+
// Mock OpenAI constructor
40+
;(OpenAI as jest.MockedClass<typeof OpenAI>).mockImplementation(
41+
() =>
42+
({
43+
chat: {
44+
completions: {
45+
create: mockCreate,
46+
},
47+
},
48+
}) as unknown as OpenAI,
49+
)
50+
51+
// Mock transform functions
52+
;(convertToOpenAiMessages as jest.Mock).mockImplementation((messages) => messages)
53+
;(convertToR1Format as jest.Mock).mockImplementation((messages) => messages)
54+
55+
// Create handler instance
56+
handler = new RequestyHandler(defaultOptions)
57+
})
58+
59+
describe("constructor", () => {
60+
it("should initialize with correct options", () => {
61+
expect(OpenAI).toHaveBeenCalledWith({
62+
baseURL: "https://router.requesty.ai/v1",
63+
apiKey: defaultOptions.requestyApiKey,
64+
defaultHeaders: {
65+
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
66+
"X-Title": "Roo Code",
67+
},
68+
})
69+
})
70+
})
71+
72+
describe("createMessage", () => {
73+
const systemPrompt = "You are a helpful assistant"
74+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
75+
76+
describe("with streaming enabled", () => {
77+
beforeEach(() => {
78+
const stream = {
79+
[Symbol.asyncIterator]: async function* () {
80+
yield {
81+
choices: [{ delta: { content: "Hello" } }],
82+
}
83+
yield {
84+
choices: [{ delta: { content: " world" } }],
85+
usage: {
86+
prompt_tokens: 10,
87+
completion_tokens: 5,
88+
},
89+
}
90+
},
91+
}
92+
mockCreate.mockResolvedValue(stream)
93+
})
94+
95+
it("should handle streaming response correctly", async () => {
96+
const stream = handler.createMessage(systemPrompt, messages)
97+
const results = []
98+
99+
for await (const chunk of stream) {
100+
results.push(chunk)
101+
}
102+
103+
expect(results).toEqual([
104+
{ type: "text", text: "Hello" },
105+
{ type: "text", text: " world" },
106+
{
107+
type: "usage",
108+
inputTokens: 10,
109+
outputTokens: 5,
110+
cacheWriteTokens: undefined,
111+
cacheReadTokens: undefined,
112+
},
113+
])
114+
115+
expect(mockCreate).toHaveBeenCalledWith({
116+
model: defaultOptions.requestyModelId,
117+
temperature: 0,
118+
messages: [
119+
{ role: "system", content: systemPrompt },
120+
{ role: "user", content: "Hello" },
121+
],
122+
stream: true,
123+
stream_options: { include_usage: true },
124+
max_tokens: defaultOptions.requestyModelInfo?.maxTokens,
125+
})
126+
})
127+
128+
it("should not include max_tokens when includeMaxTokens is false", async () => {
129+
handler = new RequestyHandler({
130+
...defaultOptions,
131+
includeMaxTokens: false,
132+
})
133+
134+
await handler.createMessage(systemPrompt, messages).next()
135+
136+
expect(mockCreate).toHaveBeenCalledWith(
137+
expect.not.objectContaining({
138+
max_tokens: expect.any(Number),
139+
}),
140+
)
141+
})
142+
143+
it("should handle deepseek-reasoner model format", async () => {
144+
handler = new RequestyHandler({
145+
...defaultOptions,
146+
requestyModelId: "deepseek-reasoner",
147+
})
148+
149+
await handler.createMessage(systemPrompt, messages).next()
150+
151+
expect(convertToR1Format).toHaveBeenCalledWith([{ role: "user", content: systemPrompt }, ...messages])
152+
})
153+
})
154+
155+
describe("with streaming disabled", () => {
156+
beforeEach(() => {
157+
handler = new RequestyHandler({
158+
...defaultOptions,
159+
openAiStreamingEnabled: false,
160+
})
161+
162+
mockCreate.mockResolvedValue({
163+
choices: [{ message: { content: "Hello world" } }],
164+
usage: {
165+
prompt_tokens: 10,
166+
completion_tokens: 5,
167+
},
168+
})
169+
})
170+
171+
it("should handle non-streaming response correctly", async () => {
172+
const stream = handler.createMessage(systemPrompt, messages)
173+
const results = []
174+
175+
for await (const chunk of stream) {
176+
results.push(chunk)
177+
}
178+
179+
expect(results).toEqual([
180+
{ type: "text", text: "Hello world" },
181+
{
182+
type: "usage",
183+
inputTokens: 10,
184+
outputTokens: 5,
185+
},
186+
])
187+
188+
expect(mockCreate).toHaveBeenCalledWith({
189+
model: defaultOptions.requestyModelId,
190+
messages: [
191+
{ role: "user", content: systemPrompt },
192+
{ role: "user", content: "Hello" },
193+
],
194+
})
195+
})
196+
})
197+
})
198+
199+
describe("getModel", () => {
200+
it("should return correct model information", () => {
201+
const result = handler.getModel()
202+
expect(result).toEqual({
203+
id: defaultOptions.requestyModelId,
204+
info: defaultOptions.requestyModelInfo,
205+
})
206+
})
207+
208+
it("should use sane defaults when no model info provided", () => {
209+
handler = new RequestyHandler({
210+
...defaultOptions,
211+
requestyModelInfo: undefined,
212+
})
213+
214+
const result = handler.getModel()
215+
expect(result).toEqual({
216+
id: defaultOptions.requestyModelId,
217+
info: requestyModelInfoSaneDefaults,
218+
})
219+
})
220+
})
221+
222+
describe("completePrompt", () => {
223+
beforeEach(() => {
224+
mockCreate.mockResolvedValue({
225+
choices: [{ message: { content: "Completed response" } }],
226+
})
227+
})
228+
229+
it("should complete prompt successfully", async () => {
230+
const result = await handler.completePrompt("Test prompt")
231+
expect(result).toBe("Completed response")
232+
expect(mockCreate).toHaveBeenCalledWith({
233+
model: defaultOptions.requestyModelId,
234+
messages: [{ role: "user", content: "Test prompt" }],
235+
})
236+
})
237+
238+
it("should handle errors correctly", async () => {
239+
const errorMessage = "API error"
240+
mockCreate.mockRejectedValue(new Error(errorMessage))
241+
242+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
243+
`OpenAI completion error: ${errorMessage}`,
244+
)
245+
})
246+
})
247+
})

src/api/providers/requesty.ts

Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
import { Anthropic } from "@anthropic-ai/sdk"
2+
import OpenAI from "openai"
3+
4+
import { ApiHandlerOptions, ModelInfo, requestyModelInfoSaneDefaults } from "../../shared/api"
5+
import { ApiHandler, SingleCompletionHandler } from "../index"
6+
import { convertToOpenAiMessages } from "../transform/openai-format"
7+
import { convertToR1Format } from "../transform/r1-format"
8+
import { ApiStream } from "../transform/stream"
9+
10+
export class RequestyHandler implements ApiHandler, SingleCompletionHandler {
11+
protected options: ApiHandlerOptions
12+
private client: OpenAI
13+
14+
constructor(options: ApiHandlerOptions) {
15+
this.options = options
16+
this.client = new OpenAI({
17+
baseURL: "https://router.requesty.ai/v1",
18+
apiKey: this.options.requestyApiKey,
19+
defaultHeaders: {
20+
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
21+
"X-Title": "Roo Code",
22+
},
23+
})
24+
}
25+
26+
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
27+
const modelInfo = this.getModel().info
28+
const modelId = this.options.requestyModelId ?? ""
29+
30+
const deepseekReasoner = modelId.includes("deepseek-reasoner")
31+
32+
if (this.options.openAiStreamingEnabled ?? true) {
33+
const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
34+
role: "system",
35+
content: systemPrompt,
36+
}
37+
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
38+
model: modelId,
39+
temperature: 0,
40+
messages: deepseekReasoner
41+
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
42+
: [systemMessage, ...convertToOpenAiMessages(messages)],
43+
stream: true as const,
44+
stream_options: { include_usage: true },
45+
}
46+
if (this.options.includeMaxTokens) {
47+
requestOptions.max_tokens = modelInfo.maxTokens
48+
}
49+
50+
const stream = await this.client.chat.completions.create(requestOptions)
51+
52+
for await (const chunk of stream) {
53+
const delta = chunk.choices[0]?.delta ?? {}
54+
55+
if (delta.content) {
56+
yield {
57+
type: "text",
58+
text: delta.content,
59+
}
60+
}
61+
62+
if ("reasoning_content" in delta && delta.reasoning_content) {
63+
yield {
64+
type: "reasoning",
65+
text: (delta.reasoning_content as string | undefined) || "",
66+
}
67+
}
68+
if (chunk.usage) {
69+
yield {
70+
type: "usage",
71+
inputTokens: chunk.usage.prompt_tokens || 0,
72+
outputTokens: chunk.usage.completion_tokens || 0,
73+
cacheWriteTokens: (chunk.usage as any).cache_creation_input_tokens || undefined,
74+
cacheReadTokens: (chunk.usage as any).cache_read_input_tokens || undefined,
75+
}
76+
}
77+
}
78+
} else {
79+
// o1 for instance doesnt support streaming, non-1 temp, or system prompt
80+
const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = {
81+
role: "user",
82+
content: systemPrompt,
83+
}
84+
85+
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
86+
model: modelId,
87+
messages: deepseekReasoner
88+
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
89+
: [systemMessage, ...convertToOpenAiMessages(messages)],
90+
}
91+
92+
const response = await this.client.chat.completions.create(requestOptions)
93+
94+
yield {
95+
type: "text",
96+
text: response.choices[0]?.message.content || "",
97+
}
98+
yield {
99+
type: "usage",
100+
inputTokens: response.usage?.prompt_tokens || 0,
101+
outputTokens: response.usage?.completion_tokens || 0,
102+
}
103+
}
104+
}
105+
106+
getModel(): { id: string; info: ModelInfo } {
107+
return {
108+
id: this.options.requestyModelId ?? "",
109+
info: this.options.requestyModelInfo ?? requestyModelInfoSaneDefaults,
110+
}
111+
}
112+
113+
async completePrompt(prompt: string): Promise<string> {
114+
try {
115+
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
116+
model: this.getModel().id,
117+
messages: [{ role: "user", content: prompt }],
118+
}
119+
120+
const response = await this.client.chat.completions.create(requestOptions)
121+
return response.choices[0]?.message.content || ""
122+
} catch (error) {
123+
if (error instanceof Error) {
124+
throw new Error(`OpenAI completion error: ${error.message}`)
125+
}
126+
throw error
127+
}
128+
}
129+
}

0 commit comments

Comments
 (0)