77import uuid
88import warnings
99from pydantic import BaseModel
10- from langchain_community .chat_models import ErnieBotChat
11- from langchain_nvidia_ai_endpoints import ChatNVIDIA
1210from langchain .chat_models import init_chat_model
1311from ..helpers import models_tokens
1412from ..models import (
@@ -147,16 +145,17 @@ def handle_model(model_name, provider, token_key, default_token=8192):
147145 warnings .simplefilter ("ignore" )
148146 return init_chat_model (** llm_params )
149147
150- known_models = ["chatgpt" ,"gpt" ,"openai" , "azure_openai" , "google_genai" , "ollama" , "oneapi" , "nvidia" , "groq" , "google_vertexai" , "bedrock" , "mistralai" , "hugging_face" , "deepseek" , "ernie" , "fireworks" ]
148+ known_models = ["chatgpt" ,"gpt" ,"openai" , "azure_openai" , "google_genai" ,
149+ "ollama" , "oneapi" , "nvidia" , "groq" , "google_vertexai" ,
150+ "bedrock" , "mistralai" , "hugging_face" , "deepseek" , "ernie" , "fireworks" ]
151151
152152 if llm_params ["model" ].split ("/" )[0 ] not in known_models and llm_params ["model" ].split ("-" )[0 ] not in known_models :
153153 raise ValueError (f"Model '{ llm_params ['model' ]} ' is not supported" )
154154
155155 try :
156156 if "azure" in llm_params ["model" ]:
157157 model_name = llm_params ["model" ].split ("/" )[- 1 ]
158- return handle_model (model_name , "azure_openai" , model_name )
159-
158+ return handle_model (model_name , "azure_openai" , model_name )
160159 if "fireworks" in llm_params ["model" ]:
161160 model_name = "/" .join (llm_params ["model" ].split ("/" )[1 :])
162161 token_key = llm_params ["model" ].split ("/" )[- 1 ]
@@ -188,7 +187,6 @@ def handle_model(model_name, provider, token_key, default_token=8192):
188187 model_name = llm_params ["model" ].split ("/" )[- 1 ]
189188 return handle_model (model_name , "mistralai" , model_name )
190189
191- # Instantiate the language model based on the model name (models that do not use the common interface)
192190 elif "deepseek" in llm_params ["model" ]:
193191 try :
194192 self .model_token = models_tokens ["deepseek" ][llm_params ["model" ]]
@@ -198,6 +196,8 @@ def handle_model(model_name, provider, token_key, default_token=8192):
198196 return DeepSeek (llm_params )
199197
200198 elif "ernie" in llm_params ["model" ]:
199+ from langchain_community .chat_models import ErnieBotChat
200+
201201 try :
202202 self .model_token = models_tokens ["ernie" ][llm_params ["model" ]]
203203 except KeyError :
@@ -215,6 +215,8 @@ def handle_model(model_name, provider, token_key, default_token=8192):
215215 return OneApi (llm_params )
216216
217217 elif "nvidia" in llm_params ["model" ]:
218+ from langchain_nvidia_ai_endpoints import ChatNVIDIA
219+
218220 try :
219221 self .model_token = models_tokens ["nvidia" ][llm_params ["model" ].split ("/" )[- 1 ]]
220222 llm_params ["model" ] = "/" .join (llm_params ["model" ].split ("/" )[1 :])
0 commit comments