@@ -51,7 +51,7 @@ type LLMResponse = {
5151 reasoningText : string ;
5252 toolCalls ?: ChatCompletionMessageToolCall [ ] ;
5353 finish_reason : CompletionFinishReason ;
54- getEmptyResponseTip : ( ) => string ;
54+ responseEmptyTip ?: string ;
5555 usage : {
5656 inputTokens : number ;
5757 outputTokens : number ;
@@ -92,7 +92,7 @@ export const createLLMResponse = async <T extends CompletionsBodyType>(
9292 } ) ;
9393
9494 // console.log(JSON.stringify(requestBody, null, 2));
95- const { response, isStreamResponse, getEmptyResponseTip } = await createChatCompletion ( {
95+ const { response, isStreamResponse } = await createChatCompletion ( {
9696 body : requestBody ,
9797 modelData,
9898 userKey,
@@ -151,9 +151,33 @@ export const createLLMResponse = async <T extends CompletionsBodyType>(
151151 usage ?. prompt_tokens || ( await countGptMessagesTokens ( requestBody . messages , requestBody . tools ) ) ;
152152 const outputTokens = usage ?. completion_tokens || ( await countGptMessagesTokens ( assistantMessage ) ) ;
153153
154+ const getEmptyResponseTip = ( ) => {
155+ if ( userKey ?. baseUrl ) {
156+ addLog . warn ( `User LLM response empty` , {
157+ baseUrl : userKey ?. baseUrl ,
158+ requestBody,
159+ finish_reason
160+ } ) ;
161+ return `您的 OpenAI key 没有响应: ${ JSON . stringify ( body ) } ` ;
162+ } else {
163+ addLog . error ( `LLM response empty` , {
164+ message : '' ,
165+ data : requestBody ,
166+ finish_reason
167+ } ) ;
168+ }
169+ return i18nT ( 'chat:LLM_model_response_empty' ) ;
170+ } ;
171+ const isNotResponse =
172+ ! answerText &&
173+ ! reasoningText &&
174+ ! toolCalls ?. length &&
175+ ( finish_reason === 'stop' || ! finish_reason ) ;
176+ const responseEmptyTip = isNotResponse ? getEmptyResponseTip ( ) : undefined ;
177+
154178 return {
155179 isStreamResponse,
156- getEmptyResponseTip ,
180+ responseEmptyTip ,
157181 answerText,
158182 reasoningText,
159183 toolCalls,
@@ -535,7 +559,8 @@ const llmCompletionsBodyFormat = async <T extends CompletionsBodyType>({
535559 maxToken : body . max_tokens || undefined
536560 } ) ;
537561
538- const requestBody = {
562+ const formatStop = stop ?. split ( '|' ) . filter ( ( item ) => ! ! item . trim ( ) ) ;
563+ let requestBody = {
539564 ...body ,
540565 max_tokens : maxTokens ,
541566 model : modelData . model ,
@@ -546,16 +571,20 @@ const llmCompletionsBodyFormat = async <T extends CompletionsBodyType>({
546571 temperature : body . temperature
547572 } )
548573 : undefined ,
549- ...modelData ?. defaultConfig ,
550574 response_format,
551- stop : stop ?. split ( '|' ) . filter ( ( item ) => ! ! item . trim ( ) ) ,
575+ stop : formatStop ?. length ? formatStop : undefined ,
552576 ...( toolCallMode === 'toolChoice' && {
553577 tools,
554578 tool_choice,
555579 parallel_tool_calls
556580 } )
557581 } as T ;
558582
583+ // Filter null value
584+ requestBody = Object . fromEntries (
585+ Object . entries ( requestBody ) . filter ( ( [ _ , value ] ) => value !== null )
586+ ) as T ;
587+
559588 // field map
560589 if ( modelData . fieldMap ) {
561590 Object . entries ( modelData . fieldMap ) . forEach ( ( [ sourceKey , targetKey ] ) => {
@@ -566,6 +595,11 @@ const llmCompletionsBodyFormat = async <T extends CompletionsBodyType>({
566595 } ) ;
567596 }
568597
598+ requestBody = {
599+ ...requestBody ,
600+ ...modelData ?. defaultConfig
601+ } ;
602+
569603 return {
570604 requestBody : requestBody as unknown as InferCompletionsBody < T > ,
571605 modelData
@@ -584,18 +618,14 @@ const createChatCompletion = async ({
584618 timeout ?: number ;
585619 options ?: OpenAI . RequestOptions ;
586620} ) : Promise <
587- {
588- getEmptyResponseTip : ( ) => string ;
589- } & (
590- | {
591- response : StreamChatType ;
592- isStreamResponse : true ;
593- }
594- | {
595- response : UnStreamChatType ;
596- isStreamResponse : false ;
597- }
598- )
621+ | {
622+ response : StreamChatType ;
623+ isStreamResponse : true ;
624+ }
625+ | {
626+ response : UnStreamChatType ;
627+ isStreamResponse : false ;
628+ }
599629> => {
600630 try {
601631 if ( ! modelData ) {
@@ -627,34 +657,16 @@ const createChatCompletion = async ({
627657 response !== null &&
628658 ( 'iterator' in response || 'controller' in response ) ;
629659
630- const getEmptyResponseTip = ( ) => {
631- if ( userKey ?. baseUrl ) {
632- addLog . warn ( `User LLM response empty` , {
633- baseUrl : userKey ?. baseUrl ,
634- requestBody : body
635- } ) ;
636- return `您的 OpenAI key 没有响应: ${ JSON . stringify ( body ) } ` ;
637- } else {
638- addLog . error ( `LLM response empty` , {
639- message : '' ,
640- data : body
641- } ) ;
642- }
643- return i18nT ( 'chat:LLM_model_response_empty' ) ;
644- } ;
645-
646660 if ( isStreamResponse ) {
647661 return {
648662 response,
649- isStreamResponse : true ,
650- getEmptyResponseTip
663+ isStreamResponse : true
651664 } ;
652665 }
653666
654667 return {
655668 response,
656- isStreamResponse : false ,
657- getEmptyResponseTip
669+ isStreamResponse : false
658670 } ;
659671 } catch ( error ) {
660672 if ( userKey ?. baseUrl ) {
0 commit comments