2121from langchain_core .messages import BaseMessage # type: ignore
2222from langchain_core .outputs import LLMResult # type: ignore
2323
24- from opentelemetry .instrumentation .langchain .span_manager import _SpanManager
25- from opentelemetry .semconv ._incubating .attributes import (
26- gen_ai_attributes as GenAI ,
27- )
28- from opentelemetry .trace import Tracer
24+ from opentelemetry .instrumentation .langchain .invocation_manager import _InvocationManager
2925
26+ from opentelemetry .util .genai .handler import TelemetryHandler
27+ from opentelemetry .util .genai .types import LLMInvocation , Text , InputMessage , OutputMessage , Error
3028
3129class OpenTelemetryLangChainCallbackHandler (BaseCallbackHandler ): # type: ignore[misc]
3230 """
@@ -35,13 +33,11 @@ class OpenTelemetryLangChainCallbackHandler(BaseCallbackHandler): # type: ignor
3533
3634 def __init__ (
3735 self ,
38- tracer : Tracer ,
36+ telemetry_handler : TelemetryHandler
3937 ) -> None :
4038 super ().__init__ () # type: ignore
41-
42- self .span_manager = _SpanManager (
43- tracer = tracer ,
44- )
39+ self ._telemetry_handler = telemetry_handler
40+ self ._invocation_manager = _InvocationManager ()
4541
4642 def on_chat_model_start (
4743 self ,
@@ -82,59 +78,68 @@ def on_chat_model_start(
8278 if request_model == "unknown" :
8379 return
8480
85- span = self .span_manager .create_chat_span (
86- run_id = run_id ,
87- parent_run_id = parent_run_id ,
88- request_model = request_model ,
89- )
90-
91- if params is not None :
92- top_p = params .get ("top_p" )
93- if top_p is not None :
94- span .set_attribute (GenAI .GEN_AI_REQUEST_TOP_P , top_p )
95- frequency_penalty = params .get ("frequency_penalty" )
96- if frequency_penalty is not None :
97- span .set_attribute (
98- GenAI .GEN_AI_REQUEST_FREQUENCY_PENALTY , frequency_penalty
99- )
100- presence_penalty = params .get ("presence_penalty" )
101- if presence_penalty is not None :
102- span .set_attribute (
103- GenAI .GEN_AI_REQUEST_PRESENCE_PENALTY , presence_penalty
104- )
105- stop_sequences = params .get ("stop" )
106- if stop_sequences is not None :
107- span .set_attribute (
108- GenAI .GEN_AI_REQUEST_STOP_SEQUENCES , stop_sequences
109- )
110- seed = params .get ("seed" )
111- if seed is not None :
112- span .set_attribute (GenAI .GEN_AI_REQUEST_SEED , seed )
113- # ChatOpenAI
114- temperature = params .get ("temperature" )
115- if temperature is not None :
116- span .set_attribute (
117- GenAI .GEN_AI_REQUEST_TEMPERATURE , temperature
118- )
119- # ChatOpenAI
120- max_tokens = params .get ("max_completion_tokens" )
121- if max_tokens is not None :
122- span .set_attribute (GenAI .GEN_AI_REQUEST_MAX_TOKENS , max_tokens )
123-
81+ # TODO: uncomment and modify following after PR #3862 is merged
82+ # if params is not None:
83+ # top_p = params.get("top_p")
84+ # if top_p is not None:
85+ # span.set_attribute(GenAI.GEN_AI_REQUEST_TOP_P, top_p)
86+ # frequency_penalty = params.get("frequency_penalty")
87+ # if frequency_penalty is not None:
88+ # span.set_attribute(
89+ # GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY, frequency_penalty
90+ # )
91+ # presence_penalty = params.get("presence_penalty")
92+ # if presence_penalty is not None:
93+ # span.set_attribute(
94+ # GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY, presence_penalty
95+ # )
96+ # stop_sequences = params.get("stop")
97+ # if stop_sequences is not None:
98+ # span.set_attribute(
99+ # GenAI.GEN_AI_REQUEST_STOP_SEQUENCES, stop_sequences
100+ # )
101+ # seed = params.get("seed")
102+ # if seed is not None:
103+ # span.set_attribute(GenAI.GEN_AI_REQUEST_SEED, seed)
104+ # # ChatOpenAI
105+ # temperature = params.get("temperature")
106+ # if temperature is not None:
107+ # span.set_attribute(
108+ # GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature
109+ # )
110+ # # ChatOpenAI
111+ # max_tokens = params.get("max_completion_tokens")
112+ # if max_tokens is not None:
113+ # span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens)
114+ #
115+ provider = "unknown"
124116 if metadata is not None :
125117 provider = metadata .get ("ls_provider" )
126- if provider is not None :
127- span .set_attribute ("gen_ai.provider.name" , provider )
128- # ChatBedrock
129- temperature = metadata .get ("ls_temperature" )
130- if temperature is not None :
131- span .set_attribute (
132- GenAI .GEN_AI_REQUEST_TEMPERATURE , temperature
133- )
134- # ChatBedrock
135- max_tokens = metadata .get ("ls_max_tokens" )
136- if max_tokens is not None :
137- span .set_attribute (GenAI .GEN_AI_REQUEST_MAX_TOKENS , max_tokens )
118+
119+ # TODO: uncomment and modify following after PR #3862 is merged
120+
121+ # # ChatBedrock
122+ # temperature = metadata.get("ls_temperature")
123+ # if temperature is not None:
124+ # span.set_attribute(
125+ # GenAI.GEN_AI_REQUEST_TEMPERATURE, temperature
126+ # )
127+ # # ChatBedrock
128+ # max_tokens = metadata.get("ls_max_tokens")
129+ # if max_tokens is not None:
130+ # span.set_attribute(GenAI.GEN_AI_REQUEST_MAX_TOKENS, max_tokens)
131+
132+ input_messages : list [InputMessage ] = []
133+ for sub_messages in messages :
134+ for message in sub_messages :
135+ content = get_property_value (message , "content" )
136+ role = get_property_value (message , "type" )
137+ parts = [Text (content = content , type = "text" )]
138+ input_messages .append (InputMessage (parts = parts , role = role ))
139+
140+ llm_invocation = LLMInvocation (request_model = request_model , input_messages = input_messages , provider = provider )
141+ llm_invocation = self ._telemetry_handler .start_llm (invocation = llm_invocation )
142+ self ._invocation_manager .add_invocation_state (run_id = run_id , parent_run_id = parent_run_id , invocation = llm_invocation )
138143
139144 def on_llm_end (
140145 self ,
@@ -144,25 +149,28 @@ def on_llm_end(
144149 parent_run_id : UUID | None ,
145150 ** kwargs : Any ,
146151 ) -> None :
147- span = self .span_manager . get_span ( run_id )
152+ invocation = self ._invocation_manager . get_invocation ( run_id = run_id )
148153
149- if span is None :
150- # If the span does not exist, we cannot set attributes or end it
154+ if invocation is None or not isinstance ( invocation , LLMInvocation ) :
155+ # If the invocation does not exist, we cannot set attributes or end it
151156 return
152157
153- finish_reasons : list [str ] = []
158+ llm_invocation : LLMInvocation = invocation
159+
160+ output_messages : list [OutputMessage ] = []
154161 for generation in getattr (response , "generations" , []): # type: ignore
155162 for chat_generation in generation :
163+ # Get finish reason
156164 generation_info = getattr (
157165 chat_generation , "generation_info" , None
158166 )
159167 if generation_info is not None :
160168 finish_reason = generation_info .get (
161169 "finish_reason" , "unknown"
162170 )
163- if finish_reason is not None :
164- finish_reasons .append (str (finish_reason ))
171+
165172 if chat_generation .message :
173+ # Get finish reason if generation_info is None above
166174 if (
167175 generation_info is None
168176 and chat_generation .message .response_metadata
@@ -172,46 +180,46 @@ def on_llm_end(
172180 "stopReason" , "unknown"
173181 )
174182 )
175- if finish_reason is not None :
176- finish_reasons .append (str (finish_reason ))
183+
184+ # Get message content
185+ parts = [Text (content = get_property_value (chat_generation .message , "content" ), type = "text" )]
186+ role = get_property_value (chat_generation .message , "type" )
187+ output_message = OutputMessage (role = role , parts = parts , finish_reason = finish_reason )
188+ output_messages .append (output_message )
189+
190+ # Get token usage if available
177191 if chat_generation .message .usage_metadata :
178192 input_tokens = (
179193 chat_generation .message .usage_metadata .get (
180194 "input_tokens" , 0
181195 )
182196 )
197+ llm_invocation .input_tokens = input_tokens
198+
183199 output_tokens = (
184200 chat_generation .message .usage_metadata .get (
185201 "output_tokens" , 0
186202 )
187203 )
188- span .set_attribute (
189- GenAI .GEN_AI_USAGE_INPUT_TOKENS , input_tokens
190- )
191- span .set_attribute (
192- GenAI .GEN_AI_USAGE_OUTPUT_TOKENS , output_tokens
193- )
204+ llm_invocation .output_tokens = output_tokens
194205
195- span .set_attribute (
196- GenAI .GEN_AI_RESPONSE_FINISH_REASONS , finish_reasons
197- )
206+ llm_invocation .output_messages = output_messages
198207
199208 llm_output = getattr (response , "llm_output" , None ) # type: ignore
200209 if llm_output is not None :
201210 response_model = llm_output .get ("model_name" ) or llm_output .get (
202211 "model"
203212 )
204213 if response_model is not None :
205- span .set_attribute (
206- GenAI .GEN_AI_RESPONSE_MODEL , str (response_model )
207- )
214+ llm_invocation .response_model_name = str (response_model )
208215
209216 response_id = llm_output .get ("id" )
210217 if response_id is not None :
211- span . set_attribute ( GenAI . GEN_AI_RESPONSE_ID , str (response_id ) )
218+ llm_invocation . response_id = str (response_id )
212219
213- # End the LLM span
214- self .span_manager .end_span (run_id )
220+ invocation = self ._telemetry_handler .stop_llm (invocation = invocation )
221+ if not invocation .span .is_recording ():
222+ self ._invocation_manager .delete_invocation_state (run_id = run_id )
215223
216224 def on_llm_error (
217225 self ,
@@ -221,4 +229,22 @@ def on_llm_error(
221229 parent_run_id : UUID | None ,
222230 ** kwargs : Any ,
223231 ) -> None :
224- self .span_manager .handle_error (error , run_id )
232+ invocation = self ._invocation_manager .get_invocation (run_id = run_id )
233+
234+ if invocation is None or not isinstance (invocation , LLMInvocation ):
235+ # If the invocation does not exist, we cannot set attributes or end it
236+ return
237+
238+ invocation : LLMInvocation = invocation
239+
240+ error = Error (message = str (error ), type = type (error ))
241+ invocation = self ._telemetry_handler .fail_llm (invocation = invocation , error = error )
242+ if not invocation .span .is_recording ():
243+ self ._invocation_manager .delete_invocation_state (run_id = run_id )
244+
245+
246+ def get_property_value (obj , property_name ):
247+ if isinstance (obj , dict ):
248+ return obj .get (property_name , None )
249+
250+ return getattr (obj , property_name , None )
0 commit comments