1010from langchain_aws import BedrockEmbeddings
1111from langchain_community .embeddings import HuggingFaceHubEmbeddings , OllamaEmbeddings
1212from langchain_google_genai import GoogleGenerativeAIEmbeddings
13+ from langchain_google_vertexai import VertexAIEmbeddings
1314from langchain_google_genai .embeddings import GoogleGenerativeAIEmbeddings
1415from langchain_fireworks import FireworksEmbeddings
1516from langchain_openai import AzureOpenAIEmbeddings , OpenAIEmbeddings
16-
1717from ..helpers import models_tokens
1818from ..models import (
1919 Anthropic ,
2525 Ollama ,
2626 OpenAI ,
2727 OneApi ,
28- Fireworks
28+ Fireworks ,
29+ VertexAI
2930)
3031from ..models .ernie import Ernie
3132from ..utils .logging import set_verbosity_debug , set_verbosity_warning , set_verbosity_info
@@ -73,7 +74,7 @@ def __init__(self, prompt: str, config: dict,
7374 self .config = config
7475 self .schema = schema
7576 self .llm_model = self ._create_llm (config ["llm" ], chat = True )
76- self .embedder_model = self ._create_default_embedder (llm_config = config ["llm" ] ) if "embeddings" not in config else self ._create_embedder (
77+ self .embedder_model = self ._create_default_embedder (llm_config = config ["llm" ]) if "embeddings" not in config else self ._create_embedder (
7778 config ["embeddings" ])
7879 self .verbose = False if config is None else config .get (
7980 "verbose" , False )
@@ -179,7 +180,6 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
179180 except KeyError as exc :
180181 raise KeyError ("Model not supported" ) from exc
181182 return AzureOpenAI (llm_params )
182-
183183 elif "gemini" in llm_params ["model" ]:
184184 llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
185185 try :
@@ -194,6 +194,12 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
194194 except KeyError as exc :
195195 raise KeyError ("Model not supported" ) from exc
196196 return Anthropic (llm_params )
197+ elif llm_params ["model" ].startswith ("vertexai" ):
198+ try :
199+ self .model_token = models_tokens ["vertexai" ][llm_params ["model" ]]
200+ except KeyError as exc :
201+ raise KeyError ("Model not supported" ) from exc
202+ return VertexAI (llm_params )
197203 elif "ollama" in llm_params ["model" ]:
198204 llm_params ["model" ] = llm_params ["model" ].split ("ollama/" )[- 1 ]
199205
@@ -287,9 +293,12 @@ def _create_default_embedder(self, llm_config=None) -> object:
287293 google_api_key = llm_config ["api_key" ], model = "models/embedding-001"
288294 )
289295 if isinstance (self .llm_model , OpenAI ):
290- return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key , base_url = self .llm_model .openai_api_base )
296+ return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key ,
297+ base_url = self .llm_model .openai_api_base )
291298 elif isinstance (self .llm_model , DeepSeek ):
292299 return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key )
300+ elif isinstance (self .llm_model , VertexAI ):
301+ return VertexAIEmbeddings ()
293302 elif isinstance (self .llm_model , AzureOpenAIEmbeddings ):
294303 return self .llm_model
295304 elif isinstance (self .llm_model , AzureOpenAI ):
0 commit comments