3232
3333
3434class RunMetadata (TypedDict , total = False ):
35- messages : Union [List [Dict [str , Any ]], List [str ]]
35+ input : Any
36+ """Input of the run: messages, prompt variables, etc."""
37+ name : str
38+ """Name of the run: chain name, model name, etc."""
3639 provider : str
40+ """Provider of the run: OpenAI, Anthropic"""
3741 model : str
42+ """Model used in the run"""
3843 model_params : Dict [str , Any ]
44+ """Model parameters of the run: temperature, max_tokens, etc."""
3945 base_url : str
46+ """Base URL of the provider's API used in the run."""
4047 start_time : float
48+ """Start time of the run."""
4149 end_time : float
50+ """End time of the run."""
4251
4352
4453RunStorage = Dict [UUID , RunMetadata ]
@@ -119,8 +128,7 @@ def on_chain_start(
119128 self ._log_debug_event ("on_chain_start" , run_id , parent_run_id , inputs = inputs )
120129 self ._set_parent_of_run (run_id , parent_run_id )
121130 if parent_run_id is None and self ._trace_name is None :
122- self ._trace_name = self ._get_langchain_run_name (serialized , ** kwargs )
123- self ._trace_input = inputs
131+ self ._set_span_metadata (run_id , self ._get_langchain_run_name (serialized , ** kwargs ), inputs )
124132
125133 def on_chat_model_start (
126134 self ,
@@ -134,7 +142,7 @@ def on_chat_model_start(
134142 self ._log_debug_event ("on_chat_model_start" , run_id , parent_run_id , messages = messages )
135143 self ._set_parent_of_run (run_id , parent_run_id )
136144 input = [_convert_message_to_dict (message ) for row in messages for message in row ]
137- self ._set_run_metadata (serialized , run_id , input , ** kwargs )
145+ self ._set_llm_metadata (serialized , run_id , input , ** kwargs )
138146
139147 def on_llm_start (
140148 self ,
@@ -147,7 +155,7 @@ def on_llm_start(
147155 ):
148156 self ._log_debug_event ("on_llm_start" , run_id , parent_run_id , prompts = prompts )
149157 self ._set_parent_of_run (run_id , parent_run_id )
150- self ._set_run_metadata (serialized , run_id , prompts , ** kwargs )
158+ self ._set_llm_metadata (serialized , run_id , prompts , ** kwargs )
151159
152160 def on_llm_new_token (
153161 self ,
@@ -204,7 +212,7 @@ def on_chain_end(
204212 self ._pop_parent_of_run (run_id )
205213
206214 if parent_run_id is None :
207- self ._capture_trace (run_id , outputs = outputs )
215+ self ._pop_trace_and_capture (run_id , outputs = outputs )
208216
209217 def on_chain_error (
210218 self ,
@@ -218,7 +226,7 @@ def on_chain_error(
218226 self ._pop_parent_of_run (run_id )
219227
220228 if parent_run_id is None :
221- self ._capture_trace (run_id , outputs = None )
229+ self ._pop_trace_and_capture (run_id , outputs = None )
222230
223231 def on_llm_end (
224232 self ,
@@ -253,7 +261,7 @@ def on_llm_end(
253261 "$ai_provider" : run .get ("provider" ),
254262 "$ai_model" : run .get ("model" ),
255263 "$ai_model_parameters" : run .get ("model_params" ),
256- "$ai_input" : with_privacy_mode (self ._client , self ._privacy_mode , run .get ("messages " )),
264+ "$ai_input" : with_privacy_mode (self ._client , self ._privacy_mode , run .get ("input " )),
257265 "$ai_output_choices" : with_privacy_mode (self ._client , self ._privacy_mode , output ),
258266 "$ai_http_status" : 200 ,
259267 "$ai_input_tokens" : input_tokens ,
@@ -292,7 +300,7 @@ def on_llm_error(
292300 "$ai_provider" : run .get ("provider" ),
293301 "$ai_model" : run .get ("model" ),
294302 "$ai_model_parameters" : run .get ("model_params" ),
295- "$ai_input" : with_privacy_mode (self ._client , self ._privacy_mode , run .get ("messages " )),
303+ "$ai_input" : with_privacy_mode (self ._client , self ._privacy_mode , run .get ("input " )),
296304 "$ai_http_status" : _get_http_status (error ),
297305 "$ai_latency" : latency ,
298306 "$ai_trace_id" : trace_id ,
@@ -377,7 +385,14 @@ def _find_root_run(self, run_id: UUID) -> UUID:
377385 id = self ._parent_tree [id ]
378386 return id
379387
380- def _set_run_metadata (
388+ def _set_span_metadata (self , run_id : UUID , name : str , input : Any ):
389+ self ._runs [run_id ] = {
390+ "name" : name ,
391+ "input" : input ,
392+ "start_time" : time .time (),
393+ }
394+
395+ def _set_llm_metadata (
381396 self ,
382397 serialized : Dict [str , Any ],
383398 run_id : UUID ,
@@ -387,7 +402,7 @@ def _set_run_metadata(
387402 ** kwargs ,
388403 ):
389404 run : RunMetadata = {
390- "messages " : messages ,
405+ "input " : messages ,
391406 "start_time" : time .time (),
392407 }
393408 if isinstance (invocation_params , dict ):
@@ -450,12 +465,16 @@ def _get_langchain_run_name(self, serialized: Optional[Dict[str, Any]], **kwargs
450465 except (KeyError , TypeError ):
451466 pass
452467
453- def _capture_trace (self , run_id : UUID , * , outputs : Optional [Dict [str , Any ]]):
468+ def _pop_trace_and_capture (self , run_id : UUID , * , outputs : Optional [Dict [str , Any ]]):
454469 trace_id = self ._get_trace_id (run_id )
470+ run = self ._pop_run_metadata (run_id )
471+ if not run :
472+ return
455473 event_properties = {
456- "$ai_trace_name" : self . _trace_name ,
474+ "$ai_trace_name" : run . get ( "name" ) ,
457475 "$ai_trace_id" : trace_id ,
458- "$ai_input_state" : with_privacy_mode (self ._client , self ._privacy_mode , self ._trace_input ),
476+ "$ai_input_state" : with_privacy_mode (self ._client , self ._privacy_mode , run .get ("input" )),
477+ "$ai_latency" : run .get ("end_time" , 0 ) - run .get ("start_time" , 0 ),
459478 ** self ._properties ,
460479 }
461480 if outputs is not None :
0 commit comments