Skip to content

Commit 5be257e

Browse files
committed
refactor: update QwenChatModel to use BaseChatOpenAI and remove unused methods
--bug=1052269 --user=刘瑞斌 【模型】对接千问模型,设置联网参数,maxkb对答不生效。通过python代码调用是可以的。 https://www.tapd.cn/57709429/s/1662132
1 parent ed8173e commit 5be257e

File tree

2 files changed

+11
-86
lines changed
  • apps/setting/models_provider/impl/qwen_model_provider/model

2 files changed

+11
-86
lines changed

apps/setting/models_provider/impl/qwen_model_provider/model/image.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77

88

99
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
10+
@staticmethod
11+
def is_cache_model():
12+
return False
1013

1114
@staticmethod
1215
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):

apps/setting/models_provider/impl/qwen_model_provider/model/llm.py

Lines changed: 8 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -6,20 +6,13 @@
66
@date:2024/4/28 11:44
77
@desc:
88
"""
9-
from typing import List, Dict, Optional, Iterator, Any, cast
10-
11-
from langchain_community.chat_models import ChatTongyi
12-
from langchain_community.llms.tongyi import generate_with_last_element_mark
13-
from langchain_core.callbacks import CallbackManagerForLLMRun
14-
from langchain_core.language_models import LanguageModelInput
15-
from langchain_core.messages import BaseMessage
16-
from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
17-
from langchain_core.runnables import RunnableConfig, ensure_config
9+
from typing import Dict
1810

1911
from setting.models_provider.base_model_provider import MaxKBBaseModel
12+
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
2013

2114

22-
class QwenChatModel(MaxKBBaseModel, ChatTongyi):
15+
class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
2316
@staticmethod
2417
def is_cache_model():
2518
return False
@@ -29,81 +22,10 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
2922
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
3023
chat_tong_yi = QwenChatModel(
3124
model_name=model_name,
32-
dashscope_api_key=model_credential.get('api_key'),
33-
model_kwargs=optional_params,
25+
openai_api_key=model_credential.get('api_key'),
26+
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
27+
streaming=True,
28+
stream_usage=True,
29+
**optional_params,
3430
)
3531
return chat_tong_yi
36-
37-
usage_metadata: dict = {}
38-
39-
def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
40-
return self.usage_metadata
41-
42-
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
43-
return self.usage_metadata.get('input_tokens', 0)
44-
45-
def get_num_tokens(self, text: str) -> int:
46-
return self.usage_metadata.get('output_tokens', 0)
47-
48-
def _stream(
49-
self,
50-
messages: List[BaseMessage],
51-
stop: Optional[List[str]] = None,
52-
run_manager: Optional[CallbackManagerForLLMRun] = None,
53-
**kwargs: Any,
54-
) -> Iterator[ChatGenerationChunk]:
55-
params: Dict[str, Any] = self._invocation_params(
56-
messages=messages, stop=stop, stream=True, **kwargs
57-
)
58-
59-
for stream_resp, is_last_chunk in generate_with_last_element_mark(
60-
self.stream_completion_with_retry(**params)
61-
):
62-
choice = stream_resp["output"]["choices"][0]
63-
message = choice["message"]
64-
if (
65-
choice["finish_reason"] == "stop"
66-
and message["content"] == ""
67-
) or (choice["finish_reason"] == "length"):
68-
token_usage = stream_resp["usage"]
69-
self.usage_metadata = token_usage
70-
if (
71-
choice["finish_reason"] == "null"
72-
and message["content"] == ""
73-
and "tool_calls" not in message
74-
):
75-
continue
76-
77-
chunk = ChatGenerationChunk(
78-
**self._chat_generation_from_qwen_resp(
79-
stream_resp, is_chunk=True, is_last_chunk=is_last_chunk
80-
)
81-
)
82-
if run_manager:
83-
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
84-
yield chunk
85-
86-
def invoke(
87-
self,
88-
input: LanguageModelInput,
89-
config: Optional[RunnableConfig] = None,
90-
*,
91-
stop: Optional[List[str]] = None,
92-
**kwargs: Any,
93-
) -> BaseMessage:
94-
config = ensure_config(config)
95-
chat_result = cast(
96-
ChatGeneration,
97-
self.generate_prompt(
98-
[self._convert_input(input)],
99-
stop=stop,
100-
callbacks=config.get("callbacks"),
101-
tags=config.get("tags"),
102-
metadata=config.get("metadata"),
103-
run_name=config.get("run_name"),
104-
run_id=config.pop("run_id", None),
105-
**kwargs,
106-
).generations[0][0],
107-
).message
108-
self.usage_metadata = chat_result.response_metadata['token_usage']
109-
return chat_result

0 commit comments

Comments
 (0)