11"""
22AbstractGraph Module
33"""
4+
45from abc import ABC , abstractmethod
56from typing import Optional
7+
68from langchain_aws import BedrockEmbeddings
7- from langchain_openai import AzureOpenAIEmbeddings , OpenAIEmbeddings
89from langchain_community .embeddings import HuggingFaceHubEmbeddings , OllamaEmbeddings
910from langchain_google_genai import GoogleGenerativeAIEmbeddings
10- from ..helpers import models_tokens
11- from ..models import AzureOpenAI , Bedrock , Gemini , Groq , HuggingFace , Ollama , OpenAI , Anthropic , DeepSeek
1211from langchain_google_genai .embeddings import GoogleGenerativeAIEmbeddings
12+ from langchain_openai import AzureOpenAIEmbeddings , OpenAIEmbeddings
13+
14+ from ..helpers import models_tokens
15+ from ..models import (
16+ Anthropic ,
17+ AzureOpenAI ,
18+ Bedrock ,
19+ Gemini ,
20+ Groq ,
21+ HuggingFace ,
22+ Ollama ,
23+ OpenAI ,
24+ )
25+ from ..utils .logging import set_verbosity_debug , set_verbosity_warning
1326
1427from ..helpers import models_tokens
1528from ..models import AzureOpenAI , Bedrock , Gemini , Groq , HuggingFace , Ollama , OpenAI , Anthropic , DeepSeek
@@ -67,10 +80,15 @@ def __init__(self, prompt: str, config: dict, source: Optional[str] = None, sche
6780 self .execution_info = None
6881
6982 # Set common configuration parameters
70- self .verbose = False if config is None else config .get (
71- "verbose" , False )
72- self .headless = True if config is None else config .get (
73- "headless" , True )
83+
84+ verbose = bool (config and config .get ("verbose" ))
85+
86+ if verbose :
87+ set_verbosity_debug ()
88+ else :
89+ set_verbosity_warning ()
90+
91+ self .headless = True if config is None else config .get ("headless" , True )
7492 self .loader_kwargs = config .get ("loader_kwargs" , {})
7593
7694 common_params = {
@@ -96,22 +114,22 @@ def set_common_params(self, params: dict, overwrite=False):
96114
97115 def _set_model_token (self , llm ):
98116
99- if ' Azure' in str (type (llm )):
117+ if " Azure" in str (type (llm )):
100118 try :
101119 self .model_token = models_tokens ["azure" ][llm .model_name ]
102120 except KeyError :
103121 raise KeyError ("Model not supported" )
104122
105- elif ' HuggingFaceEndpoint' in str (type (llm )):
106- if ' mistral' in llm .repo_id :
123+ elif " HuggingFaceEndpoint" in str (type (llm )):
124+ if " mistral" in llm .repo_id :
107125 try :
108- self .model_token = models_tokens [' mistral' ][llm .repo_id ]
126+ self .model_token = models_tokens [" mistral" ][llm .repo_id ]
109127 except KeyError :
110128 raise KeyError ("Model not supported" )
111- elif ' Google' in str (type (llm )):
129+ elif " Google" in str (type (llm )):
112130 try :
113- if ' gemini' in llm .model :
114- self .model_token = models_tokens [' gemini' ][llm .model ]
131+ if " gemini" in llm .model :
132+ self .model_token = models_tokens [" gemini" ][llm .model ]
115133 except KeyError :
116134 raise KeyError ("Model not supported" )
117135
@@ -129,17 +147,14 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
129147 KeyError: If the model is not supported.
130148 """
131149
132- llm_defaults = {
133- "temperature" : 0 ,
134- "streaming" : False
135- }
150+ llm_defaults = {"temperature" : 0 , "streaming" : False }
136151 llm_params = {** llm_defaults , ** llm_config }
137152
138153 # If model instance is passed directly instead of the model details
139- if ' model_instance' in llm_params :
154+ if " model_instance" in llm_params :
140155 if chat :
141- self ._set_model_token (llm_params [' model_instance' ])
142- return llm_params [' model_instance' ]
156+ self ._set_model_token (llm_params [" model_instance" ])
157+ return llm_params [" model_instance" ]
143158
144159 # Instantiate the language model based on the model name
145160 if "gpt-" in llm_params ["model" ]:
@@ -208,19 +223,21 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
208223 elif "bedrock" in llm_params ["model" ]:
209224 llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
210225 model_id = llm_params ["model" ]
211- client = llm_params .get (' client' , None )
226+ client = llm_params .get (" client" , None )
212227 try :
213228 self .model_token = models_tokens ["bedrock" ][llm_params ["model" ]]
214229 except KeyError :
215230 print ("model not found, using default token size (8192)" )
216231 self .model_token = 8192
217- return Bedrock ({
218- "client" : client ,
219- "model_id" : model_id ,
220- "model_kwargs" : {
221- "temperature" : llm_params ["temperature" ],
232+ return Bedrock (
233+ {
234+ "client" : client ,
235+ "model_id" : model_id ,
236+ "model_kwargs" : {
237+ "temperature" : llm_params ["temperature" ],
238+ },
222239 }
223- } )
240+ )
224241 elif "claude-3-" in llm_params ["model" ]:
225242 try :
226243 self .model_token = models_tokens ["claude" ]["claude3" ]
@@ -236,8 +253,7 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
236253 self .model_token = 8192
237254 return DeepSeek (llm_params )
238255 else :
239- raise ValueError (
240- "Model provided by the configuration not supported" )
256+ raise ValueError ("Model provided by the configuration not supported" )
241257
242258 def _create_default_embedder (self , llm_config = None ) -> object :
243259 """
@@ -250,8 +266,9 @@ def _create_default_embedder(self, llm_config=None) -> object:
250266 ValueError: If the model is not supported.
251267 """
252268 if isinstance (self .llm_model , Gemini ):
253- return GoogleGenerativeAIEmbeddings (google_api_key = llm_config ['api_key' ],
254- model = "models/embedding-001" )
269+ return GoogleGenerativeAIEmbeddings (
270+ google_api_key = llm_config ["api_key" ], model = "models/embedding-001"
271+ )
255272 if isinstance (self .llm_model , OpenAI ):
256273 return OpenAIEmbeddings (api_key = self .llm_model .openai_api_key )
257274 elif isinstance (self .llm_model , DeepSeek ):
@@ -288,8 +305,8 @@ def _create_embedder(self, embedder_config: dict) -> object:
288305 Raises:
289306 KeyError: If the model is not supported.
290307 """
291- if ' model_instance' in embedder_config :
292- return embedder_config [' model_instance' ]
308+ if " model_instance" in embedder_config :
309+ return embedder_config [" model_instance" ]
293310 # Instantiate the embedding model based on the model name
294311 if "openai" in embedder_config ["model" ]:
295312 return OpenAIEmbeddings (api_key = embedder_config ["api_key" ])
@@ -306,25 +323,27 @@ def _create_embedder(self, embedder_config: dict) -> object:
306323 try :
307324 models_tokens ["hugging_face" ][embedder_config ["model" ]]
308325 except KeyError as exc :
309- raise KeyError ("Model not supported" )from exc
326+ raise KeyError ("Model not supported" ) from exc
310327 return HuggingFaceHubEmbeddings (model = embedder_config ["model" ])
311328 elif "gemini" in embedder_config ["model" ]:
312329 try :
313330 models_tokens ["gemini" ][embedder_config ["model" ]]
314331 except KeyError as exc :
315- raise KeyError ("Model not supported" )from exc
332+ raise KeyError ("Model not supported" ) from exc
316333 return GoogleGenerativeAIEmbeddings (model = embedder_config ["model" ])
317334 elif "bedrock" in embedder_config ["model" ]:
318335 embedder_config ["model" ] = embedder_config ["model" ].split ("/" )[- 1 ]
319- client = embedder_config .get (' client' , None )
336+ client = embedder_config .get (" client" , None )
320337 try :
321338 models_tokens ["bedrock" ][embedder_config ["model" ]]
322339 except KeyError as exc :
323340 raise KeyError ("Model not supported" ) from exc
324- return BedrockEmbeddings (client = client , model_id = embedder_config ["model" ])
341+ return BedrockEmbeddings (client = client , model_id = embedder_config ["model" ])
342+ else :
343+ raise ValueError ("Model provided by the configuration not supported" )
325344
326345 def get_state (self , key = None ) -> dict :
327- """""
346+ """ ""
328347 Get the final state of the graph.
329348
330349 Args:
0 commit comments