@@ -55,10 +55,20 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
5555 baseURL,
5656 apiKey,
5757 apiVersion : this . options . azureApiVersion || azureOpenAiDefaultApiVersion ,
58- defaultHeaders,
58+ defaultHeaders : {
59+ ...defaultHeaders ,
60+ ...( this . options . openAiHostHeader ? { Host : this . options . openAiHostHeader } : { } ) ,
61+ } ,
5962 } )
6063 } else {
61- this . client = new OpenAI ( { baseURL, apiKey, defaultHeaders } )
64+ this . client = new OpenAI ( {
65+ baseURL,
66+ apiKey,
67+ defaultHeaders : {
68+ ...defaultHeaders ,
69+ ...( this . options . openAiHostHeader ? { Host : this . options . openAiHostHeader } : { } ) ,
70+ } ,
71+ } )
6272 }
6373 }
6474
@@ -67,6 +77,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
6777 const modelUrl = this . options . openAiBaseUrl ?? ""
6878 const modelId = this . options . openAiModelId ?? ""
6979 const enabledR1Format = this . options . openAiR1FormatEnabled ?? false
80+ const enabledLegacyFormat = this . options . openAiLegacyFormat ?? false
7081 const isAzureAiInference = this . _isAzureAiInference ( modelUrl )
7182 const urlHost = this . _getUrlHost ( modelUrl )
7283 const deepseekReasoner = modelId . includes ( "deepseek-reasoner" ) || enabledR1Format
@@ -85,7 +96,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
8596 let convertedMessages
8697 if ( deepseekReasoner ) {
8798 convertedMessages = convertToR1Format ( [ { role : "user" , content : systemPrompt } , ...messages ] )
88- } else if ( ark ) {
99+ } else if ( ark || enabledLegacyFormat ) {
89100 convertedMessages = [ systemMessage , ...convertToSimpleMessages ( messages ) ]
90101 } else {
91102 if ( modelInfo . supportsPromptCache ) {
@@ -190,7 +201,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
190201 model : modelId ,
191202 messages : deepseekReasoner
192203 ? convertToR1Format ( [ { role : "user" , content : systemPrompt } , ...messages ] )
193- : [ systemMessage , ...convertToOpenAiMessages ( messages ) ] ,
204+ : enabledLegacyFormat
205+ ? [ systemMessage , ...convertToSimpleMessages ( messages ) ]
206+ : [ systemMessage , ...convertToOpenAiMessages ( messages ) ] ,
194207 }
195208
196209 const response = await this . client . chat . completions . create (
@@ -330,7 +343,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
330343 }
331344}
332345
333- export async function getOpenAiModels ( baseUrl ?: string , apiKey ?: string ) {
346+ export async function getOpenAiModels ( baseUrl ?: string , apiKey ?: string , hostHeader ?: string ) {
334347 try {
335348 if ( ! baseUrl ) {
336349 return [ ]
@@ -341,9 +354,18 @@ export async function getOpenAiModels(baseUrl?: string, apiKey?: string) {
341354 }
342355
343356 const config : Record < string , any > = { }
357+ const headers : Record < string , string > = { }
344358
345359 if ( apiKey ) {
346- config [ "headers" ] = { Authorization : `Bearer ${ apiKey } ` }
360+ headers [ "Authorization" ] = `Bearer ${ apiKey } `
361+ }
362+
363+ if ( hostHeader ) {
364+ headers [ "Host" ] = hostHeader
365+ }
366+
367+ if ( Object . keys ( headers ) . length > 0 ) {
368+ config [ "headers" ] = headers
347369 }
348370
349371 const response = await axios . get ( `${ baseUrl } /models` , config )
0 commit comments