Skip to content

Commit 41fcf85

Browse files
committed
refactor(api): improve OpenAI handler inheritance
- Add OpenAiHandlerOptions interface for configuration - Extract processUsageMetrics to base class for reuse - Update RequestyHandler to extend OpenAiHandler - Add proper type safety for metrics handling - Clean up code duplication across handlers
1 parent 91fe764 commit 41fcf85

File tree

3 files changed

+46
-131
lines changed

3 files changed

+46
-131
lines changed

src/api/providers/deepseek.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import { OpenAiHandler } from "./openai"
2-
import { ApiHandlerOptions, ModelInfo } from "../../shared/api"
1+
import { OpenAiHandler, OpenAiHandlerOptions } from "./openai"
2+
import { ModelInfo } from "../../shared/api"
33
import { deepSeekModels, deepSeekDefaultModelId } from "../../shared/api"
44

55
export class DeepSeekHandler extends OpenAiHandler {
6-
constructor(options: ApiHandlerOptions) {
6+
constructor(options: OpenAiHandlerOptions) {
77
super({
88
...options,
99
openAiApiKey: options.deepSeekApiKey ?? "not-provided",

src/api/providers/openai.ts

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,17 @@ import { ApiHandler, SingleCompletionHandler } from "../index"
1111
import { convertToOpenAiMessages } from "../transform/openai-format"
1212
import { convertToR1Format } from "../transform/r1-format"
1313
import { convertToSimpleMessages } from "../transform/simple-format"
14-
import { ApiStream } from "../transform/stream"
14+
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
15+
16+
export interface OpenAiHandlerOptions extends ApiHandlerOptions {
17+
defaultHeaders?: Record<string, string>
18+
}
1519

1620
export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
17-
protected options: ApiHandlerOptions
21+
protected options: OpenAiHandlerOptions
1822
private client: OpenAI
1923

20-
constructor(options: ApiHandlerOptions) {
24+
constructor(options: OpenAiHandlerOptions) {
2125
this.options = options
2226

2327
const baseURL = this.options.openAiBaseUrl ?? "https://api.openai.com/v1"
@@ -41,7 +45,7 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
4145
apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion,
4246
})
4347
} else {
44-
this.client = new OpenAI({ baseURL, apiKey })
48+
this.client = new OpenAI({ baseURL, apiKey, defaultHeaders: this.options.defaultHeaders })
4549
}
4650
}
4751

@@ -98,11 +102,7 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
98102
}
99103
}
100104
if (chunk.usage) {
101-
yield {
102-
type: "usage",
103-
inputTokens: chunk.usage.prompt_tokens || 0,
104-
outputTokens: chunk.usage.completion_tokens || 0,
105-
}
105+
yield this.processUsageMetrics(chunk.usage)
106106
}
107107
}
108108
} else {
@@ -125,11 +125,15 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
125125
type: "text",
126126
text: response.choices[0]?.message.content || "",
127127
}
128-
yield {
129-
type: "usage",
130-
inputTokens: response.usage?.prompt_tokens || 0,
131-
outputTokens: response.usage?.completion_tokens || 0,
132-
}
128+
yield this.processUsageMetrics(response.usage)
129+
}
130+
}
131+
132+
protected processUsageMetrics(usage: any): ApiStreamUsageChunk {
133+
return {
134+
type: "usage",
135+
inputTokens: usage?.prompt_tokens || 0,
136+
outputTokens: usage?.completion_tokens || 0,
133137
}
134138
}
135139

src/api/providers/requesty.ts

Lines changed: 25 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -1,129 +1,40 @@
1-
import { Anthropic } from "@anthropic-ai/sdk"
2-
import OpenAI from "openai"
3-
4-
import { ApiHandlerOptions, ModelInfo, requestyModelInfoSaneDefaults } from "../../shared/api"
5-
import { ApiHandler, SingleCompletionHandler } from "../index"
6-
import { convertToOpenAiMessages } from "../transform/openai-format"
7-
import { convertToR1Format } from "../transform/r1-format"
8-
import { ApiStream } from "../transform/stream"
9-
10-
export class RequestyHandler implements ApiHandler, SingleCompletionHandler {
11-
protected options: ApiHandlerOptions
12-
private client: OpenAI
13-
14-
constructor(options: ApiHandlerOptions) {
15-
this.options = options
16-
this.client = new OpenAI({
17-
baseURL: "https://router.requesty.ai/v1",
18-
apiKey: this.options.requestyApiKey,
1+
import { OpenAiHandler, OpenAiHandlerOptions } from "./openai"
2+
import { ModelInfo, requestyModelInfoSaneDefaults, requestyDefaultModelId } from "../../shared/api"
3+
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
4+
5+
export class RequestyHandler extends OpenAiHandler {
6+
constructor(options: OpenAiHandlerOptions) {
7+
if (!options.requestyApiKey) {
8+
throw new Error("Requesty API key is required. Please provide it in the settings.")
9+
}
10+
super({
11+
...options,
12+
openAiApiKey: options.requestyApiKey,
13+
openAiModelId: options.requestyModelId ?? requestyDefaultModelId,
14+
openAiBaseUrl: "https://router.requesty.ai/v1",
15+
openAiCustomModelInfo: options.requestyModelInfo ?? requestyModelInfoSaneDefaults,
1916
defaultHeaders: {
2017
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
2118
"X-Title": "Roo Code",
2219
},
2320
})
2421
}
2522

26-
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
27-
const modelInfo = this.getModel().info
28-
const modelId = this.options.requestyModelId ?? ""
29-
30-
const deepseekReasoner = modelId.includes("deepseek-reasoner")
31-
32-
if (this.options.openAiStreamingEnabled ?? true) {
33-
const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
34-
role: "system",
35-
content: systemPrompt,
36-
}
37-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
38-
model: modelId,
39-
temperature: 0,
40-
messages: deepseekReasoner
41-
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
42-
: [systemMessage, ...convertToOpenAiMessages(messages)],
43-
stream: true as const,
44-
stream_options: { include_usage: true },
45-
}
46-
if (this.options.includeMaxTokens) {
47-
requestOptions.max_tokens = modelInfo.maxTokens
48-
}
49-
50-
const stream = await this.client.chat.completions.create(requestOptions)
51-
52-
for await (const chunk of stream) {
53-
const delta = chunk.choices[0]?.delta ?? {}
54-
55-
if (delta.content) {
56-
yield {
57-
type: "text",
58-
text: delta.content,
59-
}
60-
}
61-
62-
if ("reasoning_content" in delta && delta.reasoning_content) {
63-
yield {
64-
type: "reasoning",
65-
text: (delta.reasoning_content as string | undefined) || "",
66-
}
67-
}
68-
if (chunk.usage) {
69-
yield {
70-
type: "usage",
71-
inputTokens: chunk.usage.prompt_tokens || 0,
72-
outputTokens: chunk.usage.completion_tokens || 0,
73-
cacheWriteTokens: (chunk.usage as any).cache_creation_input_tokens || undefined,
74-
cacheReadTokens: (chunk.usage as any).cache_read_input_tokens || undefined,
75-
}
76-
}
77-
}
78-
} else {
79-
// o1 for instance doesnt support streaming, non-1 temp, or system prompt
80-
const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = {
81-
role: "user",
82-
content: systemPrompt,
83-
}
84-
85-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
86-
model: modelId,
87-
messages: deepseekReasoner
88-
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
89-
: [systemMessage, ...convertToOpenAiMessages(messages)],
90-
}
91-
92-
const response = await this.client.chat.completions.create(requestOptions)
93-
94-
yield {
95-
type: "text",
96-
text: response.choices[0]?.message.content || "",
97-
}
98-
yield {
99-
type: "usage",
100-
inputTokens: response.usage?.prompt_tokens || 0,
101-
outputTokens: response.usage?.completion_tokens || 0,
102-
}
103-
}
104-
}
105-
106-
getModel(): { id: string; info: ModelInfo } {
23+
override getModel(): { id: string; info: ModelInfo } {
24+
const modelId = this.options.requestyModelId ?? requestyDefaultModelId
10725
return {
108-
id: this.options.requestyModelId ?? "",
26+
id: modelId,
10927
info: this.options.requestyModelInfo ?? requestyModelInfoSaneDefaults,
11028
}
11129
}
11230

113-
async completePrompt(prompt: string): Promise<string> {
114-
try {
115-
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
116-
model: this.getModel().id,
117-
messages: [{ role: "user", content: prompt }],
118-
}
119-
120-
const response = await this.client.chat.completions.create(requestOptions)
121-
return response.choices[0]?.message.content || ""
122-
} catch (error) {
123-
if (error instanceof Error) {
124-
throw new Error(`OpenAI completion error: ${error.message}`)
125-
}
126-
throw error
31+
protected override processUsageMetrics(usage: any): ApiStreamUsageChunk {
32+
return {
33+
type: "usage",
34+
inputTokens: usage?.prompt_tokens || 0,
35+
outputTokens: usage?.completion_tokens || 0,
36+
cacheWriteTokens: usage?.cache_creation_input_tokens,
37+
cacheReadTokens: usage?.cache_read_input_tokens,
12738
}
12839
}
12940
}

0 commit comments

Comments
 (0)