@@ -27,7 +27,7 @@ class _SpanState:
27
27
children : List [UUID ] = field (default_factory = list )
28
28
29
29
30
- class OpenTelemetryLangChainCallbackHandler (BaseCallbackHandler ): # type: ignore[misc]
30
+ class OpenTelemetryLangChainCallbackHandler (BaseCallbackHandler ): # type: ignore[misc]
31
31
"""
32
32
A callback handler for LangChain that uses OpenTelemetry to create spans for LLM calls and chains, tools etc,. in future.
33
33
"""
@@ -36,7 +36,7 @@ def __init__(
36
36
self ,
37
37
tracer : Tracer ,
38
38
) -> None :
39
- super ().__init__ () # type: ignore
39
+ super ().__init__ () # type: ignore
40
40
self ._tracer = tracer
41
41
42
42
# Map from run_id -> _SpanState, to keep track of spans and parent/child relationships
@@ -53,7 +53,9 @@ def _create_span(
53
53
if parent_run_id is not None and parent_run_id in self .spans :
54
54
parent_span = self .spans [parent_run_id ].span
55
55
ctx = set_span_in_context (parent_span )
56
- span = self ._tracer .start_span (name = span_name , kind = kind , context = ctx )
56
+ span = self ._tracer .start_span (
57
+ name = span_name , kind = kind , context = ctx
58
+ )
57
59
else :
58
60
# top-level or missing parent
59
61
span = self ._tracer .start_span (name = span_name , kind = kind )
@@ -72,14 +74,16 @@ def _create_llm_span(
72
74
parent_run_id : Optional [UUID ],
73
75
name : str ,
74
76
) -> Span :
75
-
76
77
span = self ._create_span (
77
78
run_id = run_id ,
78
79
parent_run_id = parent_run_id ,
79
80
span_name = f"{ name } .{ GenAI .GenAiOperationNameValues .CHAT .value } " ,
80
81
kind = SpanKind .CLIENT ,
81
82
)
82
- span .set_attribute (GenAI .GEN_AI_OPERATION_NAME , GenAI .GenAiOperationNameValues .CHAT .value )
83
+ span .set_attribute (
84
+ GenAI .GEN_AI_OPERATION_NAME ,
85
+ GenAI .GenAiOperationNameValues .CHAT .value ,
86
+ )
83
87
span .set_attribute (GenAI .GEN_AI_SYSTEM , name )
84
88
85
89
return span
@@ -99,15 +103,15 @@ def _get_span(self, run_id: UUID) -> Span:
99
103
100
104
@dont_throw
101
105
def on_chat_model_start (
102
- self ,
103
- serialized : Dict [str , Any ],
104
- messages : List [List [BaseMessage ]], # type: ignore
105
- * ,
106
- run_id : UUID ,
107
- tags : Optional [List [str ]] = None ,
108
- parent_run_id : Optional [UUID ] = None ,
109
- metadata : Optional [Dict [str , Any ]] = None ,
110
- ** kwargs : Any ,
106
+ self ,
107
+ serialized : Dict [str , Any ],
108
+ messages : List [List [BaseMessage ]], # type: ignore
109
+ * ,
110
+ run_id : UUID ,
111
+ tags : Optional [List [str ]] = None ,
112
+ parent_run_id : Optional [UUID ] = None ,
113
+ metadata : Optional [Dict [str , Any ]] = None ,
114
+ ** kwargs : Any ,
111
115
) -> None :
112
116
name = serialized .get ("name" ) or kwargs .get ("name" ) or "ChatLLM"
113
117
span = self ._create_llm_span (
@@ -126,18 +130,23 @@ def on_chat_model_start(
126
130
span .set_attribute (GenAI .GEN_AI_REQUEST_TOP_P , top_p )
127
131
frequency_penalty = invocation_params .get ("frequency_penalty" )
128
132
if frequency_penalty is not None :
129
- span .set_attribute (GenAI .GEN_AI_REQUEST_FREQUENCY_PENALTY , frequency_penalty )
133
+ span .set_attribute (
134
+ GenAI .GEN_AI_REQUEST_FREQUENCY_PENALTY , frequency_penalty
135
+ )
130
136
presence_penalty = invocation_params .get ("presence_penalty" )
131
137
if presence_penalty is not None :
132
- span .set_attribute (GenAI .GEN_AI_REQUEST_PRESENCE_PENALTY , presence_penalty )
138
+ span .set_attribute (
139
+ GenAI .GEN_AI_REQUEST_PRESENCE_PENALTY , presence_penalty
140
+ )
133
141
stop_sequences = invocation_params .get ("stop" )
134
142
if stop_sequences is not None :
135
- span .set_attribute (GenAI .GEN_AI_REQUEST_STOP_SEQUENCES , stop_sequences )
143
+ span .set_attribute (
144
+ GenAI .GEN_AI_REQUEST_STOP_SEQUENCES , stop_sequences
145
+ )
136
146
seed = invocation_params .get ("seed" )
137
147
if seed is not None :
138
148
span .set_attribute (GenAI .GEN_AI_REQUEST_SEED , seed )
139
149
140
-
141
150
if metadata is not None :
142
151
max_tokens = metadata .get ("ls_max_tokens" )
143
152
if max_tokens is not None :
@@ -148,35 +157,45 @@ def on_chat_model_start(
148
157
span .set_attribute ("gen_ai.provider.name" , provider )
149
158
temperature = metadata .get ("ls_temperature" )
150
159
if temperature is not None :
151
- span .set_attribute (GenAI .GEN_AI_REQUEST_TEMPERATURE , temperature )
160
+ span .set_attribute (
161
+ GenAI .GEN_AI_REQUEST_TEMPERATURE , temperature
162
+ )
152
163
153
164
@dont_throw
154
165
def on_llm_end (
155
- self ,
156
- response : LLMResult , # type: ignore
157
- * ,
158
- run_id : UUID ,
159
- parent_run_id : Optional [UUID ] = None ,
160
- ** kwargs : Any ,
166
+ self ,
167
+ response : LLMResult , # type: ignore
168
+ * ,
169
+ run_id : UUID ,
170
+ parent_run_id : Optional [UUID ] = None ,
171
+ ** kwargs : Any ,
161
172
) -> None :
162
173
span = self ._get_span (run_id )
163
174
164
175
finish_reasons : List [str ] = []
165
- for generation in getattr (response , "generations" , []): # type: ignore
176
+ for generation in getattr (response , "generations" , []): # type: ignore
166
177
for chat_generation in generation :
167
- generation_info = getattr (chat_generation , "generation_info" , None )
178
+ generation_info = getattr (
179
+ chat_generation , "generation_info" , None
180
+ )
168
181
if generation_info is not None :
169
182
finish_reason = generation_info .get ("finish_reason" )
170
183
if finish_reason is not None :
171
184
finish_reasons .append (str (finish_reason ) or "error" )
172
185
173
- span .set_attribute (GenAI .GEN_AI_RESPONSE_FINISH_REASONS , finish_reasons )
186
+ span .set_attribute (
187
+ GenAI .GEN_AI_RESPONSE_FINISH_REASONS , finish_reasons
188
+ )
174
189
175
- llm_output = getattr (response , "llm_output" , None ) # type: ignore
190
+ llm_output = getattr (response , "llm_output" , None ) # type: ignore
176
191
if llm_output is not None :
177
- response_model = llm_output .get ("model_name" ) or llm_output .get ("model" )
192
+ response_model = llm_output .get ("model_name" ) or llm_output .get (
193
+ "model"
194
+ )
178
195
if response_model is not None :
179
- span .set_attribute (GenAI .GEN_AI_RESPONSE_MODEL , str (response_model ))
196
+ span .set_attribute (
197
+ GenAI .GEN_AI_RESPONSE_MODEL , str (response_model )
198
+ )
180
199
181
200
response_id = llm_output .get ("id" )
182
201
if response_id is not None :
@@ -187,27 +206,35 @@ def on_llm_end(
187
206
if usage :
188
207
prompt_tokens = usage .get ("prompt_tokens" , 0 )
189
208
completion_tokens = usage .get ("completion_tokens" , 0 )
190
- span .set_attribute (GenAI .GEN_AI_USAGE_INPUT_TOKENS ,
191
- int (prompt_tokens ) if prompt_tokens is not None else 0 )
192
- span .set_attribute (GenAI .GEN_AI_USAGE_OUTPUT_TOKENS ,
193
- int (completion_tokens ) if completion_tokens is not None else 0 )
209
+ span .set_attribute (
210
+ GenAI .GEN_AI_USAGE_INPUT_TOKENS ,
211
+ int (prompt_tokens ) if prompt_tokens is not None else 0 ,
212
+ )
213
+ span .set_attribute (
214
+ GenAI .GEN_AI_USAGE_OUTPUT_TOKENS ,
215
+ int (completion_tokens )
216
+ if completion_tokens is not None
217
+ else 0 ,
218
+ )
194
219
195
220
# End the LLM span
196
221
self ._end_span (run_id )
197
222
198
223
@dont_throw
199
224
def on_llm_error (
200
- self ,
201
- error : BaseException ,
202
- * ,
203
- run_id : UUID ,
204
- parent_run_id : Optional [UUID ] = None ,
205
- ** kwargs : Any ,
225
+ self ,
226
+ error : BaseException ,
227
+ * ,
228
+ run_id : UUID ,
229
+ parent_run_id : Optional [UUID ] = None ,
230
+ ** kwargs : Any ,
206
231
) -> None :
207
232
self ._handle_error (error , run_id )
208
233
209
234
def _handle_error (self , error : BaseException , run_id : UUID ):
210
235
span = self ._get_span (run_id )
211
236
span .set_status (Status (StatusCode .ERROR , str (error )))
212
- span .set_attribute (ErrorAttributes .ERROR_TYPE , type (error ).__qualname__ )
237
+ span .set_attribute (
238
+ ErrorAttributes .ERROR_TYPE , type (error ).__qualname__
239
+ )
213
240
self ._end_span (run_id )
0 commit comments