@@ -2,11 +2,10 @@ import { Anthropic } from "@anthropic-ai/sdk"
22import OpenAI from "openai"
33import { ApiHandlerOptions , XAIModelId , ModelInfo , xaiDefaultModelId , xaiModels } from "../../shared/api"
44import { ApiStream } from "../transform/stream"
5- import { BaseProvider } from "./base-provider"
65import { convertToOpenAiMessages } from "../transform/openai-format"
6+ import { DEFAULT_HEADERS , REASONING_MODELS } from "./constants"
7+ import { BaseProvider } from "./base-provider"
78import { SingleCompletionHandler } from ".."
8- import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions.mjs"
9- import { defaultHeaders } from "./openai"
109
1110export class XAIHandler extends BaseProvider implements SingleCompletionHandler {
1211 protected options : ApiHandlerOptions
@@ -18,35 +17,29 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
1817 this . client = new OpenAI ( {
1918 baseURL : "https://api.x.ai/v1" ,
2019 apiKey : this . options . xaiApiKey ?? "not-provided" ,
21- defaultHeaders : defaultHeaders ,
20+ defaultHeaders : DEFAULT_HEADERS ,
2221 } )
2322 }
2423
25- override getModel ( ) : { id : string ; info : ModelInfo } {
24+ override getModel ( ) {
2625 const modelId = this . options . apiModelId
26+
2727 if ( modelId && modelId in xaiModels ) {
2828 const id = modelId as XAIModelId
2929 return { id, info : xaiModels [ id ] }
3030 }
31+
3132 return {
3233 id : xaiDefaultModelId ,
3334 info : xaiModels [ xaiDefaultModelId ] ,
35+ reasoningEffort : REASONING_MODELS . has ( xaiDefaultModelId ) ? this . options . reasoningEffort : undefined ,
3436 }
3537 }
3638
3739 override async * createMessage ( systemPrompt : string , messages : Anthropic . Messages . MessageParam [ ] ) : ApiStream {
38- const modelId = this . getModel ( ) . id
39- const modelInfo = this . getModel ( ) . info
40+ const { id : modelId , info : modelInfo , reasoningEffort } = this . getModel ( )
4041
41- // Special handling for Grok-3-mini models which support reasoning_effort
42- let reasoningEffort : ChatCompletionReasoningEffort | undefined
43- if ( modelId . includes ( "3-mini" ) && this . options . reasoningEffort ) {
44- if ( [ "low" , "high" ] . includes ( this . options . reasoningEffort ) ) {
45- reasoningEffort = this . options . reasoningEffort as ChatCompletionReasoningEffort
46- }
47- }
48-
49- // Use the OpenAI-compatible API
42+ // Use the OpenAI-compatible API.
5043 const stream = await this . client . chat . completions . create ( {
5144 model : modelId ,
5245 max_tokens : modelInfo . maxTokens ,
@@ -59,6 +52,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
5952
6053 for await ( const chunk of stream ) {
6154 const delta = chunk . choices [ 0 ] ?. delta
55+
6256 if ( delta ?. content ) {
6357 yield {
6458 type : "text" ,
@@ -78,7 +72,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
7872 type : "usage" ,
7973 inputTokens : chunk . usage . prompt_tokens || 0 ,
8074 outputTokens : chunk . usage . completion_tokens || 0 ,
81- // X.AI might include these fields in the future, handle them if present
75+ // X.AI might include these fields in the future, handle them if present.
8276 cacheReadTokens :
8377 "cache_read_input_tokens" in chunk . usage ? ( chunk . usage as any ) . cache_read_input_tokens : 0 ,
8478 cacheWriteTokens :
@@ -91,16 +85,21 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
9185 }
9286
9387 async completePrompt ( prompt : string ) : Promise < string > {
88+ const { id : modelId , reasoningEffort } = this . getModel ( )
89+
9490 try {
9591 const response = await this . client . chat . completions . create ( {
96- model : this . getModel ( ) . id ,
92+ model : modelId ,
9793 messages : [ { role : "user" , content : prompt } ] ,
94+ ...( reasoningEffort ? { reasoning_effort : reasoningEffort } : { } ) ,
9895 } )
96+
9997 return response . choices [ 0 ] ?. message . content || ""
10098 } catch ( error ) {
10199 if ( error instanceof Error ) {
102100 throw new Error ( `xAI completion error: ${ error . message } ` )
103101 }
102+
104103 throw error
105104 }
106105 }
0 commit comments