Skip to content

Commit 7383a05

Browse files
authored
Merge pull request #282 from RooVetGit/open_ai_streaming_toggle
Streaming checkbox for OpenAI-compatible providers
2 parents 124dc46 + 38df02c commit 7383a05

File tree

7 files changed

+260
-45
lines changed

7 files changed

+260
-45
lines changed

.changeset/light-shoes-rescue.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Checkbox to disable streaming for OpenAI-compatible providers

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f
1818
- Support for Amazon Nova and Meta 3, 3.1, and 3.2 models via AWS Bedrock
1919
- Support for Glama
2020
- Support for listing models from OpenAI-compatible providers
21+
- Support for adding OpenAI-compatible models with or without streaming
2122
- Per-tool MCP auto-approval
2223
- Enable/disable individual MCP servers
2324
- Enable/disable the MCP feature overall
Lines changed: 192 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
import { OpenAiHandler } from '../openai'
2+
import { ApiHandlerOptions, openAiModelInfoSaneDefaults } from '../../../shared/api'
3+
import OpenAI, { AzureOpenAI } from 'openai'
4+
import { Anthropic } from '@anthropic-ai/sdk'
5+
6+
// Mock dependencies
7+
jest.mock('openai')
8+
9+
describe('OpenAiHandler', () => {
10+
const mockOptions: ApiHandlerOptions = {
11+
openAiApiKey: 'test-key',
12+
openAiModelId: 'gpt-4',
13+
openAiStreamingEnabled: true,
14+
openAiBaseUrl: 'https://api.openai.com/v1'
15+
}
16+
17+
beforeEach(() => {
18+
jest.clearAllMocks()
19+
})
20+
21+
test('constructor initializes with correct options', () => {
22+
const handler = new OpenAiHandler(mockOptions)
23+
expect(handler).toBeInstanceOf(OpenAiHandler)
24+
expect(OpenAI).toHaveBeenCalledWith({
25+
apiKey: mockOptions.openAiApiKey,
26+
baseURL: mockOptions.openAiBaseUrl
27+
})
28+
})
29+
30+
test('constructor initializes Azure client when Azure URL is provided', () => {
31+
const azureOptions: ApiHandlerOptions = {
32+
...mockOptions,
33+
openAiBaseUrl: 'https://example.azure.com',
34+
azureApiVersion: '2023-05-15'
35+
}
36+
const handler = new OpenAiHandler(azureOptions)
37+
expect(handler).toBeInstanceOf(OpenAiHandler)
38+
expect(AzureOpenAI).toHaveBeenCalledWith({
39+
baseURL: azureOptions.openAiBaseUrl,
40+
apiKey: azureOptions.openAiApiKey,
41+
apiVersion: azureOptions.azureApiVersion
42+
})
43+
})
44+
45+
test('getModel returns correct model info', () => {
46+
const handler = new OpenAiHandler(mockOptions)
47+
const result = handler.getModel()
48+
49+
expect(result).toEqual({
50+
id: mockOptions.openAiModelId,
51+
info: openAiModelInfoSaneDefaults
52+
})
53+
})
54+
55+
test('createMessage handles streaming correctly when enabled', async () => {
56+
const handler = new OpenAiHandler({
57+
...mockOptions,
58+
openAiStreamingEnabled: true,
59+
includeMaxTokens: true
60+
})
61+
62+
const mockStream = {
63+
async *[Symbol.asyncIterator]() {
64+
yield {
65+
choices: [{
66+
delta: {
67+
content: 'test response'
68+
}
69+
}],
70+
usage: {
71+
prompt_tokens: 10,
72+
completion_tokens: 5
73+
}
74+
}
75+
}
76+
}
77+
78+
const mockCreate = jest.fn().mockResolvedValue(mockStream)
79+
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
80+
completions: { create: mockCreate }
81+
} as any
82+
83+
const systemPrompt = 'test system prompt'
84+
const messages: Anthropic.Messages.MessageParam[] = [
85+
{ role: 'user', content: 'test message' }
86+
]
87+
88+
const generator = handler.createMessage(systemPrompt, messages)
89+
const chunks = []
90+
91+
for await (const chunk of generator) {
92+
chunks.push(chunk)
93+
}
94+
95+
expect(chunks).toEqual([
96+
{
97+
type: 'text',
98+
text: 'test response'
99+
},
100+
{
101+
type: 'usage',
102+
inputTokens: 10,
103+
outputTokens: 5
104+
}
105+
])
106+
107+
expect(mockCreate).toHaveBeenCalledWith({
108+
model: mockOptions.openAiModelId,
109+
messages: [
110+
{ role: 'system', content: systemPrompt },
111+
{ role: 'user', content: 'test message' }
112+
],
113+
temperature: 0,
114+
stream: true,
115+
stream_options: { include_usage: true },
116+
max_tokens: openAiModelInfoSaneDefaults.maxTokens
117+
})
118+
})
119+
120+
test('createMessage handles non-streaming correctly when disabled', async () => {
121+
const handler = new OpenAiHandler({
122+
...mockOptions,
123+
openAiStreamingEnabled: false
124+
})
125+
126+
const mockResponse = {
127+
choices: [{
128+
message: {
129+
content: 'test response'
130+
}
131+
}],
132+
usage: {
133+
prompt_tokens: 10,
134+
completion_tokens: 5
135+
}
136+
}
137+
138+
const mockCreate = jest.fn().mockResolvedValue(mockResponse)
139+
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
140+
completions: { create: mockCreate }
141+
} as any
142+
143+
const systemPrompt = 'test system prompt'
144+
const messages: Anthropic.Messages.MessageParam[] = [
145+
{ role: 'user', content: 'test message' }
146+
]
147+
148+
const generator = handler.createMessage(systemPrompt, messages)
149+
const chunks = []
150+
151+
for await (const chunk of generator) {
152+
chunks.push(chunk)
153+
}
154+
155+
expect(chunks).toEqual([
156+
{
157+
type: 'text',
158+
text: 'test response'
159+
},
160+
{
161+
type: 'usage',
162+
inputTokens: 10,
163+
outputTokens: 5
164+
}
165+
])
166+
167+
expect(mockCreate).toHaveBeenCalledWith({
168+
model: mockOptions.openAiModelId,
169+
messages: [
170+
{ role: 'user', content: systemPrompt },
171+
{ role: 'user', content: 'test message' }
172+
]
173+
})
174+
})
175+
176+
test('createMessage handles API errors', async () => {
177+
const handler = new OpenAiHandler(mockOptions)
178+
const mockStream = {
179+
async *[Symbol.asyncIterator]() {
180+
throw new Error('API Error')
181+
}
182+
}
183+
184+
const mockCreate = jest.fn().mockResolvedValue(mockStream)
185+
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
186+
completions: { create: mockCreate }
187+
} as any
188+
189+
const generator = handler.createMessage('test', [])
190+
await expect(generator.next()).rejects.toThrow('API Error')
191+
})
192+
})

src/api/providers/openai.ts

Lines changed: 52 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -32,43 +32,65 @@ export class OpenAiHandler implements ApiHandler {
3232
}
3333
}
3434

35-
// Include stream_options for OpenAI Compatible providers if the checkbox is checked
3635
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
37-
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
38-
{ role: "system", content: systemPrompt },
39-
...convertToOpenAiMessages(messages),
40-
]
4136
const modelInfo = this.getModel().info
42-
const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = {
43-
model: this.options.openAiModelId ?? "",
44-
messages: openAiMessages,
45-
temperature: 0,
46-
stream: true,
47-
}
48-
if (this.options.includeMaxTokens) {
49-
requestOptions.max_tokens = modelInfo.maxTokens
50-
}
37+
const modelId = this.options.openAiModelId ?? ""
5138

52-
if (this.options.includeStreamOptions ?? true) {
53-
requestOptions.stream_options = { include_usage: true }
54-
}
39+
if (this.options.openAiStreamingEnabled ?? true) {
40+
const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
41+
role: "system",
42+
content: systemPrompt
43+
}
44+
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
45+
model: modelId,
46+
temperature: 0,
47+
messages: [systemMessage, ...convertToOpenAiMessages(messages)],
48+
stream: true as const,
49+
stream_options: { include_usage: true },
50+
}
51+
if (this.options.includeMaxTokens) {
52+
requestOptions.max_tokens = modelInfo.maxTokens
53+
}
54+
55+
const stream = await this.client.chat.completions.create(requestOptions)
5556

56-
const stream = await this.client.chat.completions.create(requestOptions)
57-
for await (const chunk of stream) {
58-
const delta = chunk.choices[0]?.delta
59-
if (delta?.content) {
60-
yield {
61-
type: "text",
62-
text: delta.content,
57+
for await (const chunk of stream) {
58+
const delta = chunk.choices[0]?.delta
59+
if (delta?.content) {
60+
yield {
61+
type: "text",
62+
text: delta.content,
63+
}
6364
}
64-
}
65-
if (chunk.usage) {
66-
yield {
67-
type: "usage",
68-
inputTokens: chunk.usage.prompt_tokens || 0,
69-
outputTokens: chunk.usage.completion_tokens || 0,
65+
if (chunk.usage) {
66+
yield {
67+
type: "usage",
68+
inputTokens: chunk.usage.prompt_tokens || 0,
69+
outputTokens: chunk.usage.completion_tokens || 0,
70+
}
7071
}
7172
}
73+
} else {
74+
// o1 for instance doesnt support streaming, non-1 temp, or system prompt
75+
const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = {
76+
role: "user",
77+
content: systemPrompt
78+
}
79+
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
80+
model: modelId,
81+
messages: [systemMessage, ...convertToOpenAiMessages(messages)],
82+
}
83+
const response = await this.client.chat.completions.create(requestOptions)
84+
85+
yield {
86+
type: "text",
87+
text: response.choices[0]?.message.content || "",
88+
}
89+
yield {
90+
type: "usage",
91+
inputTokens: response.usage?.prompt_tokens || 0,
92+
outputTokens: response.usage?.completion_tokens || 0,
93+
}
7294
}
7395
}
7496

src/core/webview/ClineProvider.ts

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ type GlobalStateKey =
6666
| "lmStudioBaseUrl"
6767
| "anthropicBaseUrl"
6868
| "azureApiVersion"
69-
| "includeStreamOptions"
69+
| "openAiStreamingEnabled"
7070
| "openRouterModelId"
7171
| "openRouterModelInfo"
7272
| "openRouterUseMiddleOutTransform"
@@ -447,7 +447,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
447447
geminiApiKey,
448448
openAiNativeApiKey,
449449
azureApiVersion,
450-
includeStreamOptions,
450+
openAiStreamingEnabled,
451451
openRouterModelId,
452452
openRouterModelInfo,
453453
openRouterUseMiddleOutTransform,
@@ -478,7 +478,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
478478
await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey)
479479
await this.storeSecret("deepSeekApiKey", message.apiConfiguration.deepSeekApiKey)
480480
await this.updateGlobalState("azureApiVersion", azureApiVersion)
481-
await this.updateGlobalState("includeStreamOptions", includeStreamOptions)
481+
await this.updateGlobalState("openAiStreamingEnabled", openAiStreamingEnabled)
482482
await this.updateGlobalState("openRouterModelId", openRouterModelId)
483483
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
484484
await this.updateGlobalState("openRouterUseMiddleOutTransform", openRouterUseMiddleOutTransform)
@@ -1295,7 +1295,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
12951295
openAiNativeApiKey,
12961296
deepSeekApiKey,
12971297
azureApiVersion,
1298-
includeStreamOptions,
1298+
openAiStreamingEnabled,
12991299
openRouterModelId,
13001300
openRouterModelInfo,
13011301
openRouterUseMiddleOutTransform,
@@ -1345,7 +1345,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
13451345
this.getSecret("openAiNativeApiKey") as Promise<string | undefined>,
13461346
this.getSecret("deepSeekApiKey") as Promise<string | undefined>,
13471347
this.getGlobalState("azureApiVersion") as Promise<string | undefined>,
1348-
this.getGlobalState("includeStreamOptions") as Promise<boolean | undefined>,
1348+
this.getGlobalState("openAiStreamingEnabled") as Promise<boolean | undefined>,
13491349
this.getGlobalState("openRouterModelId") as Promise<string | undefined>,
13501350
this.getGlobalState("openRouterModelInfo") as Promise<ModelInfo | undefined>,
13511351
this.getGlobalState("openRouterUseMiddleOutTransform") as Promise<boolean | undefined>,
@@ -1412,7 +1412,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
14121412
openAiNativeApiKey,
14131413
deepSeekApiKey,
14141414
azureApiVersion,
1415-
includeStreamOptions,
1415+
openAiStreamingEnabled,
14161416
openRouterModelId,
14171417
openRouterModelInfo,
14181418
openRouterUseMiddleOutTransform,

src/shared/api.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ export interface ApiHandlerOptions {
4141
openAiNativeApiKey?: string
4242
azureApiVersion?: string
4343
openRouterUseMiddleOutTransform?: boolean
44-
includeStreamOptions?: boolean
44+
openAiStreamingEnabled?: boolean
4545
setAzureApiVersion?: boolean
4646
deepSeekBaseUrl?: string
4747
deepSeekApiKey?: string

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -477,21 +477,16 @@ const ApiOptions = ({ showModelOptions, apiErrorMessage, modelIdErrorMessage }:
477477
<OpenAiModelPicker />
478478
<div style={{ display: 'flex', alignItems: 'center' }}>
479479
<VSCodeCheckbox
480-
checked={apiConfiguration?.includeStreamOptions ?? true}
480+
checked={apiConfiguration?.openAiStreamingEnabled ?? true}
481481
onChange={(e: any) => {
482482
const isChecked = e.target.checked
483483
setApiConfiguration({
484484
...apiConfiguration,
485-
includeStreamOptions: isChecked
485+
openAiStreamingEnabled: isChecked
486486
})
487487
}}>
488-
Include stream options
488+
Enable streaming
489489
</VSCodeCheckbox>
490-
<span
491-
className="codicon codicon-info"
492-
title="Stream options are for { include_usage: true }. Some providers may not support this option."
493-
style={{ marginLeft: '5px', cursor: 'help' }}
494-
></span>
495490
</div>
496491
<VSCodeCheckbox
497492
checked={azureApiVersionSelected}

0 commit comments

Comments
 (0)