Skip to content

Commit dc3f06c

Browse files
committed
Make Grok3 streaming work with OpenAI Compatible
1 parent 5fa555e commit dc3f06c

File tree

2 files changed

+51
-2
lines changed

2 files changed

+51
-2
lines changed

src/api/providers/__tests__/openai.test.ts

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,4 +352,44 @@ describe("OpenAiHandler", () => {
352352
)
353353
})
354354
})
355+
356+
describe("Grok xAI Provider", () => {
357+
const grokOptions = {
358+
...mockOptions,
359+
openAiBaseUrl: "https://api.x.ai/v1",
360+
openAiModelId: "grok-1",
361+
}
362+
363+
it("should initialize with Grok xAI configuration", () => {
364+
const grokHandler = new OpenAiHandler(grokOptions)
365+
expect(grokHandler).toBeInstanceOf(OpenAiHandler)
366+
expect(grokHandler.getModel().id).toBe(grokOptions.openAiModelId)
367+
})
368+
369+
it("should exclude stream_options when streaming with Grok xAI", async () => {
370+
const grokHandler = new OpenAiHandler(grokOptions)
371+
const systemPrompt = "You are a helpful assistant."
372+
const messages: Anthropic.Messages.MessageParam[] = [
373+
{
374+
role: "user",
375+
content: "Hello!",
376+
},
377+
]
378+
379+
const stream = grokHandler.createMessage(systemPrompt, messages)
380+
await stream.next()
381+
382+
expect(mockCreate).toHaveBeenCalledWith(
383+
expect.objectContaining({
384+
model: grokOptions.openAiModelId,
385+
stream: true,
386+
}),
387+
{},
388+
)
389+
390+
const mockCalls = mockCreate.mock.calls
391+
const lastCall = mockCalls[mockCalls.length - 1]
392+
expect(lastCall[0]).not.toHaveProperty("stream_options")
393+
})
394+
})
355395
})

src/api/providers/openai.ts

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,12 +137,14 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
137137
}
138138
}
139139

140+
const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl)
141+
140142
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
141143
model: modelId,
142144
temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
143145
messages: convertedMessages,
144146
stream: true as const,
145-
stream_options: { include_usage: true },
147+
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
146148
}
147149
if (this.options.includeMaxTokens) {
148150
requestOptions.max_tokens = modelInfo.maxTokens
@@ -265,6 +267,8 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
265267
if (this.options.openAiStreamingEnabled ?? true) {
266268
const methodIsAzureAiInference = this._isAzureAiInference(this.options.openAiBaseUrl)
267269

270+
const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl)
271+
268272
const stream = await this.client.chat.completions.create(
269273
{
270274
model: modelId,
@@ -276,7 +280,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
276280
...convertToOpenAiMessages(messages),
277281
],
278282
stream: true,
279-
stream_options: { include_usage: true },
283+
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
280284
reasoning_effort: this.getModel().info.reasoningEffort,
281285
},
282286
methodIsAzureAiInference ? { path: AZURE_AI_INFERENCE_PATH } : {},
@@ -337,6 +341,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
337341
}
338342
}
339343

344+
private _isGrokXAI(baseUrl?: string): boolean {
345+
const urlHost = this._getUrlHost(baseUrl)
346+
return urlHost.includes("x.ai")
347+
}
348+
340349
private _isAzureAiInference(baseUrl?: string): boolean {
341350
const urlHost = this._getUrlHost(baseUrl)
342351
return urlHost.endsWith(".services.ai.azure.com")

0 commit comments

Comments
 (0)