Skip to content

Commit 83ace97

Browse files
committed
refactor: model
1 parent 7f492b4 commit 83ace97

File tree

16 files changed

+24
-37
lines changed

16 files changed

+24
-37
lines changed

apps/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,6 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
1818
# stream_options={"include_usage": True},
1919
streaming=True,
2020
stream_usage=True,
21-
**optional_params,
21+
extra_body=optional_params
2222
)
2323
return chat_tong_yi

apps/models_provider/impl/ollama_model_provider/model/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,5 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
2828
# stream_options={"include_usage": True},
2929
streaming=True,
3030
stream_usage=True,
31-
**optional_params,
31+
extra_body=optional_params
3232
)

apps/models_provider/impl/openai_model_provider/model/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,5 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
1616
# stream_options={"include_usage": True},
1717
streaming=True,
1818
stream_usage=True,
19-
**optional_params,
19+
extra_body=optional_params
2020
)

apps/models_provider/impl/openai_model_provider/model/llm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
3535
streaming = False
3636
chat_open_ai = OpenAIChatModel(
3737
model=model_name,
38-
openai_api_base=model_credential.get('api_base'),
39-
openai_api_key=model_credential.get('api_key'),
38+
api_base=model_credential.get('api_base'),
39+
api_key=model_credential.get('api_key'),
4040
extra_body=optional_params,
4141
streaming=streaming,
4242
custom_get_token_ids=custom_get_token_ids

apps/models_provider/impl/qwen_model_provider/__init__.py

Whitespace-only changes.

apps/models_provider/impl/qwen_model_provider/credential/__init__.py

Whitespace-only changes.

apps/models_provider/impl/qwen_model_provider/model/__init__.py

Whitespace-only changes.

apps/models_provider/impl/siliconCloud_model_provider/model/image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,5 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
1616
# stream_options={"include_usage": True},
1717
streaming=True,
1818
stream_usage=True,
19-
**optional_params,
19+
extra_body=optional_params
2020
)

apps/models_provider/impl/tencent_cloud_model_provider/model/llm.py

Lines changed: 1 addition & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,8 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
3333
model=model_name,
3434
openai_api_base=model_credential.get('api_base'),
3535
openai_api_key=model_credential.get('api_key'),
36-
**optional_params,
36+
extra_body=optional_params,
3737
custom_get_token_ids=custom_get_token_ids
3838
)
3939
return azure_chat_open_ai
4040

41-
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
42-
try:
43-
return super().get_num_tokens_from_messages(messages)
44-
except Exception as e:
45-
tokenizer = TokenizerManage.get_tokenizer()
46-
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
47-
48-
def get_num_tokens(self, text: str) -> int:
49-
try:
50-
return super().get_num_tokens(text)
51-
except Exception as e:
52-
tokenizer = TokenizerManage.get_tokenizer()
53-
return len(tokenizer.encode(text))

apps/models_provider/impl/tencent_model_provider/model/image.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,10 @@ def new_instance(model_type, model_name, model_credential: Dict[str, object], **
1111
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
1212
return TencentVision(
1313
model_name=model_name,
14-
openai_api_base='https://api.hunyuan.cloud.tencent.com/v1',
15-
openai_api_key=model_credential.get('api_key'),
14+
api_base='https://api.hunyuan.cloud.tencent.com/v1',
15+
api_key=model_credential.get('api_key'),
1616
# stream_options={"include_usage": True},
1717
streaming=True,
1818
stream_usage=True,
19-
**optional_params,
19+
extra_body=optional_params
2020
)

0 commit comments

Comments
 (0)