22from importlib .metadata import version
33from typing import TYPE_CHECKING , Any , Dict , List , Optional , TypedDict , Union , cast
44
5+ from pydantic import BaseModel
6+
57from literalai .helper import ensure_values_serializable
68from literalai .observability .generation import (
79 ChatGeneration ,
@@ -148,6 +150,8 @@ def process_content(self, content: Any, root=True):
148150 return [self ._convert_message (m ) for m in content ]
149151 elif self ._is_message (content ):
150152 return self ._convert_message (content )
153+ elif isinstance (content , BaseModel ):
154+ return content .model_dump ()
151155 elif isinstance (content , dict ):
152156 processed_dict = {}
153157 for key , value in content .items ():
@@ -186,7 +190,9 @@ def _build_llm_settings(
186190 }
187191
188192 # make sure there is no api key specification
189- settings = {k : v for k , v in merged .items () if not k .endswith ("_api_key" )}
193+ settings = self .process_content (
194+ {k : v for k , v in merged .items () if not k .endswith ("_api_key" )}
195+ )
190196 model_keys = ["azure_deployment" , "deployment_name" , "model" , "model_name" ]
191197 model = next ((settings [k ] for k in model_keys if k in settings ), None )
192198 tools = None
@@ -203,6 +209,7 @@ def _build_llm_settings(
203209 "RunnableParallel" ,
204210 "RunnableAssign" ,
205211 "RunnableLambda" ,
212+ "structured_outputs_parser" ,
206213 "<lambda>" ,
207214 ]
208215 DEFAULT_TO_KEEP = ["retriever" , "llm" , "agent" , "chain" , "tool" ]
0 commit comments