@@ -33,7 +33,8 @@ def traced_method(wrapped, instance, args, kwargs):
3333 "llm.model" : kwargs .get ("model" ),
3434 "llm.stream" : kwargs .get ("stream" ),
3535 "url.full" : base_url ,
36- "llm.api" : api ["METHOD" ],
36+ "llm.api" : api ["ENDPOINT" ],
37+ "llm.response_format" : kwargs .get ("format" ),
3738 ** (extra_attributes if extra_attributes is not None else {}),
3839 }
3940
@@ -87,6 +88,10 @@ async def traced_method(wrapped, instance, args, kwargs):
8788 "langtrace.version" : v (LANGTRACE_SDK_NAME ),
8889 "llm.model" : kwargs .get ("model" ),
8990 "llm.stream" : kwargs .get ("stream" ),
91+ "llm.response_format" : kwargs .get ("format" ),
92+ "http.timeout" : (
93+ kwargs .get ("keep_alive" ) if "keep_alive" in kwargs else None
94+ ),
9095 ** (extra_attributes if extra_attributes is not None else {}),
9196 }
9297
@@ -134,32 +139,43 @@ def _set_response_attributes(span, response):
134139 if total_tokens > 0 :
135140 set_span_attribute (span , "llm.token.counts" , json .dumps (usage_dict ))
136141 set_span_attribute (span , "llm.finish_reason" , response .get ("done_reason" ))
137-
138142 if "message" in response :
139143 set_span_attribute (span , "llm.responses" , json .dumps ([response .get ("message" )]))
140144
141145 if "response" in response :
142146 set_span_attribute (
143- span , "llm.responses" , json .dumps ([response .get ("response" )])
147+ span ,
148+ "llm.responses" ,
149+ json .dumps ([{"role" : "assistant" , "content" : response .get ("response" )}]),
144150 )
145151
146152
147153@silently_fail
148154def _set_input_attributes (span , kwargs , attributes ):
155+ options = kwargs .get ("options" )
156+
149157 for field , value in attributes .model_dump (by_alias = True ).items ():
150158 set_span_attribute (span , field , value )
151-
152159 if "messages" in kwargs :
153160 set_span_attribute (
154161 span ,
155162 "llm.prompts" ,
156- json .dumps ([ kwargs .get ("messages" , [])] ),
163+ json .dumps (kwargs .get ("messages" , [])),
157164 )
158165 if "prompt" in kwargs :
159166 set_span_attribute (
160167 span ,
161168 "llm.prompts" ,
162- json .dumps ([{"role" : "user" , "content" : kwargs .get ("prompt" , [])}]),
169+ json .dumps ([{"role" : "user" , "content" : kwargs .get ("prompt" , "" )}]),
170+ )
171+ if "options" in kwargs :
172+ set_span_attribute (span , "llm.temperature" , options .get ("temperature" ))
173+ set_span_attribute (span , "llm.top_p" , options .get ("top_p" ))
174+ set_span_attribute (
175+ span , "llm.frequency_penalty" , options .get ("frequency_penalty" )
176+ )
177+ set_span_attribute (
178+ span , "llm.presence_penalty" , options .get ("presence_penalty" )
163179 )
164180
165181
@@ -169,7 +185,6 @@ def _handle_streaming_response(span, response, api):
169185 accumulated_tokens = {"message" : {"content" : "" , "role" : "" }}
170186 if api == "completion" :
171187 accumulated_tokens = {"response" : "" }
172-
173188 span .add_event (Event .STREAM_START .value )
174189 try :
175190 for chunk in response :
0 commit comments