@@ -27,7 +27,7 @@ class _SpanState:
2727 children : List [UUID ] = field (default_factory = list )
2828
2929
30- class OpenTelemetryLangChainCallbackHandler (BaseCallbackHandler ): # type: ignore[misc]
30+ class OpenTelemetryLangChainCallbackHandler (BaseCallbackHandler ): # type: ignore[misc]
3131 """
3232 A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future.
3333 """
@@ -36,7 +36,7 @@ def __init__(
3636 self ,
3737 tracer : Tracer ,
3838 ) -> None :
39- super ().__init__ () # type: ignore
39+ super ().__init__ () # type: ignore
4040 self ._tracer = tracer
4141
4242 # Map from run_id -> _SpanState, to keep track of spans and parent/child relationships
@@ -53,7 +53,9 @@ def _create_span(
5353 if parent_run_id is not None and parent_run_id in self .spans :
5454 parent_span = self .spans [parent_run_id ].span
5555 ctx = set_span_in_context (parent_span )
56- span = self ._tracer .start_span (name = span_name , kind = kind , context = ctx )
56+ span = self ._tracer .start_span (
57+ name = span_name , kind = kind , context = ctx
58+ )
5759 else :
5860 # top-level or missing parent
5961 span = self ._tracer .start_span (name = span_name , kind = kind )
@@ -72,14 +74,16 @@ def _create_llm_span(
7274 parent_run_id : Optional [UUID ],
7375 name : str ,
7476 ) -> Span :
75-
7677 span = self ._create_span (
7778 run_id = run_id ,
7879 parent_run_id = parent_run_id ,
7980 span_name = f"{ name } .{ GenAI .GenAiOperationNameValues .CHAT .value } " ,
8081 kind = SpanKind .CLIENT ,
8182 )
82- span .set_attribute (GenAI .GEN_AI_OPERATION_NAME , GenAI .GenAiOperationNameValues .CHAT .value )
83+ span .set_attribute (
84+ GenAI .GEN_AI_OPERATION_NAME ,
85+ GenAI .GenAiOperationNameValues .CHAT .value ,
86+ )
8387 span .set_attribute (GenAI .GEN_AI_SYSTEM , name )
8488
8589 return span
@@ -99,15 +103,15 @@ def _get_span(self, run_id: UUID) -> Span:
99103
100104 @dont_throw
101105 def on_chat_model_start (
102- self ,
103- serialized : Dict [str , Any ],
104- messages : List [List [BaseMessage ]], # type: ignore
105- * ,
106- run_id : UUID ,
107- tags : Optional [List [str ]] = None ,
108- parent_run_id : Optional [UUID ] = None ,
109- metadata : Optional [Dict [str , Any ]] = None ,
110- ** kwargs : Any ,
106+ self ,
107+ serialized : Dict [str , Any ],
108+ messages : List [List [BaseMessage ]], # type: ignore
109+ * ,
110+ run_id : UUID ,
111+ tags : Optional [List [str ]] = None ,
112+ parent_run_id : Optional [UUID ] = None ,
113+ metadata : Optional [Dict [str , Any ]] = None ,
114+ ** kwargs : Any ,
111115 ) -> None :
112116 name = serialized .get ("name" ) or kwargs .get ("name" ) or "ChatLLM"
113117 span = self ._create_llm_span (
@@ -126,18 +130,23 @@ def on_chat_model_start(
126130 span .set_attribute (GenAI .GEN_AI_REQUEST_TOP_P , top_p )
127131 frequency_penalty = invocation_params .get ("frequency_penalty" )
128132 if frequency_penalty is not None :
129- span .set_attribute (GenAI .GEN_AI_REQUEST_FREQUENCY_PENALTY , frequency_penalty )
133+ span .set_attribute (
134+ GenAI .GEN_AI_REQUEST_FREQUENCY_PENALTY , frequency_penalty
135+ )
130136 presence_penalty = invocation_params .get ("presence_penalty" )
131137 if presence_penalty is not None :
132- span .set_attribute (GenAI .GEN_AI_REQUEST_PRESENCE_PENALTY , presence_penalty )
138+ span .set_attribute (
139+ GenAI .GEN_AI_REQUEST_PRESENCE_PENALTY , presence_penalty
140+ )
133141 stop_sequences = invocation_params .get ("stop" )
134142 if stop_sequences is not None :
135- span .set_attribute (GenAI .GEN_AI_REQUEST_STOP_SEQUENCES , stop_sequences )
143+ span .set_attribute (
144+ GenAI .GEN_AI_REQUEST_STOP_SEQUENCES , stop_sequences
145+ )
136146 seed = invocation_params .get ("seed" )
137147 if seed is not None :
138148 span .set_attribute (GenAI .GEN_AI_REQUEST_SEED , seed )
139149
140-
141150 if metadata is not None :
142151 max_tokens = metadata .get ("ls_max_tokens" )
143152 if max_tokens is not None :
@@ -148,35 +157,45 @@ def on_chat_model_start(
148157 span .set_attribute ("gen_ai.provider.name" , provider )
149158 temperature = metadata .get ("ls_temperature" )
150159 if temperature is not None :
151- span .set_attribute (GenAI .GEN_AI_REQUEST_TEMPERATURE , temperature )
160+ span .set_attribute (
161+ GenAI .GEN_AI_REQUEST_TEMPERATURE , temperature
162+ )
152163
153164 @dont_throw
154165 def on_llm_end (
155- self ,
156- response : LLMResult , # type: ignore
157- * ,
158- run_id : UUID ,
159- parent_run_id : Optional [UUID ] = None ,
160- ** kwargs : Any ,
166+ self ,
167+ response : LLMResult , # type: ignore
168+ * ,
169+ run_id : UUID ,
170+ parent_run_id : Optional [UUID ] = None ,
171+ ** kwargs : Any ,
161172 ) -> None :
162173 span = self ._get_span (run_id )
163174
164175 finish_reasons : List [str ] = []
165- for generation in getattr (response , "generations" , []): # type: ignore
176+ for generation in getattr (response , "generations" , []): # type: ignore
166177 for chat_generation in generation :
167- generation_info = getattr (chat_generation , "generation_info" , None )
178+ generation_info = getattr (
179+ chat_generation , "generation_info" , None
180+ )
168181 if generation_info is not None :
169182 finish_reason = generation_info .get ("finish_reason" )
170183 if finish_reason is not None :
171184 finish_reasons .append (str (finish_reason ) or "error" )
172185
173- span .set_attribute (GenAI .GEN_AI_RESPONSE_FINISH_REASONS , finish_reasons )
186+ span .set_attribute (
187+ GenAI .GEN_AI_RESPONSE_FINISH_REASONS , finish_reasons
188+ )
174189
175- llm_output = getattr (response , "llm_output" , None ) # type: ignore
190+ llm_output = getattr (response , "llm_output" , None ) # type: ignore
176191 if llm_output is not None :
177- response_model = llm_output .get ("model_name" ) or llm_output .get ("model" )
192+ response_model = llm_output .get ("model_name" ) or llm_output .get (
193+ "model"
194+ )
178195 if response_model is not None :
179- span .set_attribute (GenAI .GEN_AI_RESPONSE_MODEL , str (response_model ))
196+ span .set_attribute (
197+ GenAI .GEN_AI_RESPONSE_MODEL , str (response_model )
198+ )
180199
181200 response_id = llm_output .get ("id" )
182201 if response_id is not None :
@@ -187,27 +206,35 @@ def on_llm_end(
187206 if usage :
188207 prompt_tokens = usage .get ("prompt_tokens" , 0 )
189208 completion_tokens = usage .get ("completion_tokens" , 0 )
190- span .set_attribute (GenAI .GEN_AI_USAGE_INPUT_TOKENS ,
191- int (prompt_tokens ) if prompt_tokens is not None else 0 )
192- span .set_attribute (GenAI .GEN_AI_USAGE_OUTPUT_TOKENS ,
193- int (completion_tokens ) if completion_tokens is not None else 0 )
209+ span .set_attribute (
210+ GenAI .GEN_AI_USAGE_INPUT_TOKENS ,
211+ int (prompt_tokens ) if prompt_tokens is not None else 0 ,
212+ )
213+ span .set_attribute (
214+ GenAI .GEN_AI_USAGE_OUTPUT_TOKENS ,
215+ int (completion_tokens )
216+ if completion_tokens is not None
217+ else 0 ,
218+ )
194219
195220 # End the LLM span
196221 self ._end_span (run_id )
197222
198223 @dont_throw
199224 def on_llm_error (
200- self ,
201- error : BaseException ,
202- * ,
203- run_id : UUID ,
204- parent_run_id : Optional [UUID ] = None ,
205- ** kwargs : Any ,
225+ self ,
226+ error : BaseException ,
227+ * ,
228+ run_id : UUID ,
229+ parent_run_id : Optional [UUID ] = None ,
230+ ** kwargs : Any ,
206231 ) -> None :
207232 self ._handle_error (error , run_id )
208233
209234 def _handle_error (self , error : BaseException , run_id : UUID ):
210235 span = self ._get_span (run_id )
211236 span .set_status (Status (StatusCode .ERROR , str (error )))
212- span .set_attribute (ErrorAttributes .ERROR_TYPE , type (error ).__qualname__ )
237+ span .set_attribute (
238+ ErrorAttributes .ERROR_TYPE , type (error ).__qualname__
239+ )
213240 self ._end_span (run_id )
0 commit comments