@@ -63,13 +63,10 @@ def __init__(self, prompt: str, config: dict,
6363 self .cache_path = self .config .get ("cache_path" , False )
6464 self .browser_base = self .config .get ("browser_base" )
6565
66- # Create the graph
6766 self .graph = self ._create_graph ()
6867 self .final_state = None
6968 self .execution_info = None
7069
71- # Set common configuration parameters
72-
7370 verbose = bool (config and config .get ("verbose" ))
7471
7572 if verbose :
@@ -87,12 +84,10 @@ def __init__(self, prompt: str, config: dict,
8784
8885 self .set_common_params (common_params , overwrite = True )
8986
90- # set burr config
9187 self .burr_kwargs = config .get ("burr_kwargs" , None )
9288 if self .burr_kwargs is not None :
9389 self .graph .use_burr = True
9490 if "app_instance_id" not in self .burr_kwargs :
95- # set a random uuid for the app_instance_id to avoid conflicts
9691 self .burr_kwargs ["app_instance_id" ] = str (uuid .uuid4 ())
9792
9893 self .graph .burr_config = self .burr_kwargs
@@ -125,7 +120,6 @@ def _create_llm(self, llm_config: dict) -> object:
125120 llm_defaults = {"temperature" : 0 , "streaming" : False }
126121 llm_params = {** llm_defaults , ** llm_config }
127122
128- # If model instance is passed directly instead of the model details
129123 if "model_instance" in llm_params :
130124 try :
131125 self .model_token = llm_params ["model_tokens" ]
@@ -145,18 +139,14 @@ def handle_model(model_name, provider, token_key, default_token=8192):
145139 warnings .simplefilter ("ignore" )
146140 return init_chat_model (** llm_params )
147141
148- known_models = ["chatgpt" ,"gpt" ,"openai" , "azure_openai" , "google_genai" ,
149- "ollama" , "oneapi" , "nvidia" , "groq" , "google_vertexai" ,
150- "bedrock" , "mistralai" , "hugging_face" , "deepseek" , "ernie" , "fireworks" ]
151-
142+ known_models = {"chatgpt" ,"gpt" ,"openai" , "azure_openai" , "google_genai" ,
143+ "ollama" , "oneapi" , "nvidia" , "groq" , "google_vertexai" ,
144+ "bedrock" , "mistralai" , "hugging_face" , "deepseek" , "ernie" , "fireworks" }
152145
153146 if llm_params ["model" ].split ("/" )[0 ] not in known_models and llm_params ["model" ].split ("-" )[0 ] not in known_models :
154147 raise ValueError (f"Model '{ llm_params ['model' ]} ' is not supported" )
155148
156149 try :
157- if "azure" in llm_params ["model" ]:
158- model_name = llm_params ["model" ].split ("/" )[- 1 ]
159- return handle_model (model_name , "azure_openai" , model_name )
160150 if "fireworks" in llm_params ["model" ]:
161151 model_name = "/" .join (llm_params ["model" ].split ("/" )[1 :])
162152 token_key = llm_params ["model" ].split ("/" )[- 1 ]
@@ -207,7 +197,6 @@ def handle_model(model_name, provider, token_key, default_token=8192):
207197 return ErnieBotChat (llm_params )
208198
209199 elif "oneapi" in llm_params ["model" ]:
210- # take the model after the last dash
211200 llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
212201 try :
213202 self .model_token = models_tokens ["oneapi" ][llm_params ["model" ]]
0 commit comments