Skip to content

Commit 5d3cd58

Browse files
committed
feat: Support reasoning content(WIP)
1 parent b9d5d41 commit 5d3cd58

File tree

2 files changed

+3
-2
lines changed

2 files changed

+3
-2
lines changed

apps/setting/models_provider/impl/base_chat_open_ai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def _stream(
104104
if generation_chunk.message.usage_metadata is not None:
105105
self.usage_metadata = generation_chunk.message.usage_metadata
106106
# custom code
107-
if chunk['choices'][0]['delta']['reasoning_content']:
107+
if 'reasoning_content' in chunk['choices'][0]['delta']:
108108
generation_chunk.message.additional_kwargs["reasoning_content"] = chunk['choices'][0]['delta']['reasoning_content']
109109

110110
default_chunk_class = generation_chunk.message.__class__

apps/setting/models_provider/impl/openai_model_provider/model/llm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,15 @@
1313

1414
from common.config.tokenizer_manage_config import TokenizerManage
1515
from setting.models_provider.base_model_provider import MaxKBBaseModel
16+
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
1617

1718

1819
def custom_get_token_ids(text: str):
1920
tokenizer = TokenizerManage.get_tokenizer()
2021
return tokenizer.encode(text)
2122

2223

23-
class OpenAIChatModel(MaxKBBaseModel, ChatOpenAI):
24+
class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
2425

2526
@staticmethod
2627
def is_cache_model():

0 commit comments

Comments
 (0)