@@ -50,12 +50,10 @@ def traced_method(wrapped, instance, args, kwargs):
5050 ** get_langtrace_attributes (version , service_provider ),
5151 ** get_llm_request_attributes (kwargs ),
5252 ** get_llm_url (instance ),
53- SpanAttributes .LLM_URL .value : APIS ["RERANK" ]["URL" ],
54- SpanAttributes .LLM_PATH .value : APIS ["RERANK" ]["ENDPOINT" ],
55- SpanAttributes .LLM_REQUEST_DOCUMENTS .value : json .dumps (
56- kwargs .get ("documents" )
57- ),
58- SpanAttributes .LLM_COHERE_RERANK_QUERY .value : kwargs .get ("query" ),
53+ SpanAttributes .LLM_URL : APIS ["RERANK" ]["URL" ],
54+ SpanAttributes .LLM_PATH : APIS ["RERANK" ]["ENDPOINT" ],
55+ SpanAttributes .LLM_REQUEST_DOCUMENTS : json .dumps (kwargs .get ("documents" )),
56+ SpanAttributes .LLM_COHERE_RERANK_QUERY : kwargs .get ("query" ),
5957 ** get_extra_attributes (),
6058 }
6159
@@ -73,13 +71,11 @@ def traced_method(wrapped, instance, args, kwargs):
7371 for _ , doc in enumerate (result .results ):
7472 results .append (doc .json ())
7573 span .set_attribute (
76- SpanAttributes .LLM_COHERE_RERANK_RESULTS . value , json .dumps (results )
74+ SpanAttributes .LLM_COHERE_RERANK_RESULTS , json .dumps (results )
7775 )
7876
7977 if (hasattr (result , "response_id" )) and (result .response_id is not None ):
80- span .set_attribute (
81- SpanAttributes .LLM_RESPONSE_ID .value , result .response_id
82- )
78+ span .set_attribute (SpanAttributes .LLM_RESPONSE_ID , result .response_id )
8379
8480 if hasattr (result , "meta" ) and result .meta is not None :
8581 if (
@@ -89,16 +85,16 @@ def traced_method(wrapped, instance, args, kwargs):
8985 usage = result .meta .billed_units
9086 if usage is not None :
9187 span .set_attribute (
92- SpanAttributes .LLM_USAGE_PROMPT_TOKENS . value ,
88+ SpanAttributes .LLM_USAGE_PROMPT_TOKENS ,
9389 usage .input_tokens or 0 ,
9490 )
9591 span .set_attribute (
96- SpanAttributes .LLM_USAGE_COMPLETION_TOKENS . value ,
92+ SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ,
9793 usage .output_tokens or 0 ,
9894 )
9995
10096 span .set_attribute (
101- SpanAttributes .LLM_USAGE_TOTAL_TOKENS . value ,
97+ SpanAttributes .LLM_USAGE_TOTAL_TOKENS ,
10298 (usage .input_tokens or 0 ) + (usage .output_tokens or 0 ),
10399 )
104100
@@ -130,18 +126,14 @@ def traced_method(wrapped, instance, args, kwargs):
130126 ** get_langtrace_attributes (version , service_provider ),
131127 ** get_llm_request_attributes (kwargs ),
132128 ** get_llm_url (instance ),
133- SpanAttributes .LLM_URL . value : APIS ["EMBED" ]["URL" ],
134- SpanAttributes .LLM_PATH . value : APIS ["EMBED" ]["ENDPOINT" ],
135- SpanAttributes .LLM_REQUEST_EMBEDDING_INPUTS . value : json .dumps (
129+ SpanAttributes .LLM_URL : APIS ["EMBED" ]["URL" ],
130+ SpanAttributes .LLM_PATH : APIS ["EMBED" ]["ENDPOINT" ],
131+ SpanAttributes .LLM_REQUEST_EMBEDDING_INPUTS : json .dumps (
136132 kwargs .get ("texts" )
137133 ),
138- SpanAttributes .LLM_REQUEST_EMBEDDING_DATASET_ID .value : kwargs .get (
139- "dataset_id"
140- ),
141- SpanAttributes .LLM_REQUEST_EMBEDDING_INPUT_TYPE .value : kwargs .get (
142- "input_type"
143- ),
144- SpanAttributes .LLM_REQUEST_EMBEDDING_JOB_NAME .value : kwargs .get ("name" ),
134+ SpanAttributes .LLM_REQUEST_EMBEDDING_DATASET_ID : kwargs .get ("dataset_id" ),
135+ SpanAttributes .LLM_REQUEST_EMBEDDING_INPUT_TYPE : kwargs .get ("input_type" ),
136+ SpanAttributes .LLM_REQUEST_EMBEDDING_JOB_NAME : kwargs .get ("name" ),
145137 ** get_extra_attributes (),
146138 }
147139
@@ -211,8 +203,8 @@ def traced_method(wrapped, instance, args, kwargs):
211203 ** get_langtrace_attributes (version , service_provider ),
212204 ** get_llm_request_attributes (kwargs , prompts = prompts ),
213205 ** get_llm_url (instance ),
214- SpanAttributes .LLM_URL . value : APIS ["CHAT_CREATE" ]["URL" ],
215- SpanAttributes .LLM_PATH . value : APIS ["CHAT_CREATE" ]["ENDPOINT" ],
206+ SpanAttributes .LLM_URL : APIS ["CHAT_CREATE" ]["URL" ],
207+ SpanAttributes .LLM_PATH : APIS ["CHAT_CREATE" ]["ENDPOINT" ],
216208 ** get_extra_attributes (),
217209 }
218210
@@ -248,17 +240,15 @@ def traced_method(wrapped, instance, args, kwargs):
248240 result .generation_id is not None
249241 ):
250242 span .set_attribute (
251- SpanAttributes .LLM_GENERATION_ID . value , result .generation_id
243+ SpanAttributes .LLM_GENERATION_ID , result .generation_id
252244 )
253245 if (hasattr (result , "response_id" )) and (result .response_id is not None ):
254- span .set_attribute (
255- SpanAttributes .LLM_RESPONSE_ID .value , result .response_id
256- )
246+ span .set_attribute (SpanAttributes .LLM_RESPONSE_ID , result .response_id )
257247 if (hasattr (result , "is_search_required" )) and (
258248 result .is_search_required is not None
259249 ):
260250 span .set_attribute (
261- SpanAttributes .LLM_REQUEST_SEARCH_REQUIRED . value ,
251+ SpanAttributes .LLM_REQUEST_SEARCH_REQUIRED ,
262252 result .is_search_required ,
263253 )
264254
@@ -289,27 +279,25 @@ def traced_method(wrapped, instance, args, kwargs):
289279 for item in result .chat_history
290280 ]
291281 span .set_attribute (
292- SpanAttributes .LLM_COMPLETIONS . value , json .dumps (responses )
282+ SpanAttributes .LLM_COMPLETIONS , json .dumps (responses )
293283 )
294284 else :
295285 responses = [{"role" : "CHATBOT" , "content" : result .text }]
296286 span .set_attribute (
297- SpanAttributes .LLM_COMPLETIONS . value , json .dumps (responses )
287+ SpanAttributes .LLM_COMPLETIONS , json .dumps (responses )
298288 )
299289 elif hasattr (result , "tool_calls" ) and result .tool_calls is not None :
300290 tool_calls = []
301291 for tool_call in result .tool_calls :
302292 tool_calls .append (tool_call .json ())
303293 span .set_attribute (
304- SpanAttributes .LLM_TOOL_RESULTS .value , json .dumps (tool_calls )
305- )
306- span .set_attribute (
307- SpanAttributes .LLM_COMPLETIONS .value , json .dumps ([])
294+ SpanAttributes .LLM_TOOL_RESULTS , json .dumps (tool_calls )
308295 )
296+ span .set_attribute (SpanAttributes .LLM_COMPLETIONS , json .dumps ([]))
309297 else :
310298 responses = []
311299 span .set_attribute (
312- SpanAttributes .LLM_COMPLETIONS . value , json .dumps (responses )
300+ SpanAttributes .LLM_COMPLETIONS , json .dumps (responses )
313301 )
314302
315303 # Get the usage
@@ -321,16 +309,16 @@ def traced_method(wrapped, instance, args, kwargs):
321309 usage = result .meta .billed_units
322310 if usage is not None :
323311 span .set_attribute (
324- SpanAttributes .LLM_USAGE_PROMPT_TOKENS . value ,
312+ SpanAttributes .LLM_USAGE_PROMPT_TOKENS ,
325313 usage .input_tokens or 0 ,
326314 )
327315 span .set_attribute (
328- SpanAttributes .LLM_USAGE_COMPLETION_TOKENS . value ,
316+ SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ,
329317 usage .output_tokens or 0 ,
330318 )
331319
332320 span .set_attribute (
333- SpanAttributes .LLM_USAGE_TOTAL_TOKENS . value ,
321+ SpanAttributes .LLM_USAGE_TOTAL_TOKENS ,
334322 (usage .input_tokens or 0 ) + (usage .output_tokens or 0 ),
335323 )
336324
@@ -390,9 +378,9 @@ def traced_method(wrapped, instance, args, kwargs):
390378 ** get_langtrace_attributes (version , service_provider ),
391379 ** get_llm_request_attributes (kwargs , prompts = prompts ),
392380 ** get_llm_url (instance ),
393- SpanAttributes .LLM_IS_STREAMING . value : True ,
394- SpanAttributes .LLM_URL . value : APIS ["CHAT_STREAM" ]["URL" ],
395- SpanAttributes .LLM_PATH . value : APIS ["CHAT_STREAM" ]["ENDPOINT" ],
381+ SpanAttributes .LLM_IS_STREAMING : True ,
382+ SpanAttributes .LLM_URL : APIS ["CHAT_STREAM" ]["URL" ],
383+ SpanAttributes .LLM_PATH : APIS ["CHAT_STREAM" ]["ENDPOINT" ],
396384 ** get_extra_attributes (),
397385 }
398386
@@ -426,11 +414,7 @@ def traced_method(wrapped, instance, args, kwargs):
426414 content = ""
427415 span .add_event (
428416 Event .STREAM_OUTPUT .value ,
429- {
430- SpanAttributes .LLM_CONTENT_COMPLETION_CHUNK .value : "" .join (
431- content
432- )
433- },
417+ {SpanAttributes .LLM_CONTENT_COMPLETION_CHUNK : "" .join (content )},
434418 )
435419
436420 if (
@@ -442,21 +426,21 @@ def traced_method(wrapped, instance, args, kwargs):
442426 response .generation_id is not None
443427 ):
444428 span .set_attribute (
445- SpanAttributes .LLM_GENERATION_ID . value ,
429+ SpanAttributes .LLM_GENERATION_ID ,
446430 response .generation_id ,
447431 )
448432 if (hasattr (response , "response_id" )) and (
449433 response .response_id is not None
450434 ):
451435 span .set_attribute (
452- SpanAttributes .LLM_RESPONSE_ID . value ,
436+ SpanAttributes .LLM_RESPONSE_ID ,
453437 response .response_id ,
454438 )
455439 if (hasattr (response , "is_search_required" )) and (
456440 response .is_search_required is not None
457441 ):
458442 span .set_attribute (
459- SpanAttributes .LLM_REQUEST_SEARCH_REQUIRED . value ,
443+ SpanAttributes .LLM_REQUEST_SEARCH_REQUIRED ,
460444 response .is_search_required ,
461445 )
462446
@@ -484,15 +468,15 @@ def traced_method(wrapped, instance, args, kwargs):
484468 for item in response .chat_history
485469 ]
486470 span .set_attribute (
487- SpanAttributes .LLM_COMPLETIONS . value ,
471+ SpanAttributes .LLM_COMPLETIONS ,
488472 json .dumps (responses ),
489473 )
490474 else :
491475 responses = [
492476 {"role" : "CHATBOT" , "content" : response .text }
493477 ]
494478 span .set_attribute (
495- SpanAttributes .LLM_COMPLETIONS . value ,
479+ SpanAttributes .LLM_COMPLETIONS ,
496480 json .dumps (responses ),
497481 )
498482
@@ -505,16 +489,16 @@ def traced_method(wrapped, instance, args, kwargs):
505489 usage = response .meta .billed_units
506490 if usage is not None :
507491 span .set_attribute (
508- SpanAttributes .LLM_USAGE_PROMPT_TOKENS . value ,
492+ SpanAttributes .LLM_USAGE_PROMPT_TOKENS ,
509493 usage .input_tokens or 0 ,
510494 )
511495 span .set_attribute (
512- SpanAttributes .LLM_USAGE_COMPLETION_TOKENS . value ,
496+ SpanAttributes .LLM_USAGE_COMPLETION_TOKENS ,
513497 usage .output_tokens or 0 ,
514498 )
515499
516500 span .set_attribute (
517- SpanAttributes .LLM_USAGE_TOTAL_TOKENS . value ,
501+ SpanAttributes .LLM_USAGE_TOTAL_TOKENS ,
518502 (usage .input_tokens or 0 )
519503 + (usage .output_tokens or 0 ),
520504 )
0 commit comments