@@ -109,43 +109,6 @@ async def get_project_id_async(self, project_name: str) -> str:
109
109
except httpx .HTTPStatusError as e :
110
110
raise Exception (f"Failed to register project: { e .response .text } " )
111
111
112
- @staticmethod
113
- def add_metadata_from_header (litellm_params : dict , metadata : dict ) -> dict :
114
- """
115
- Adds metadata from proxy request headers to Braintrust logging if keys start with "braintrust_"
116
- and overwrites litellm_params.metadata if already included.
117
-
118
- For example if you want to append your trace to an existing `trace_id` via header, send
119
- `headers: { ..., langfuse_existing_trace_id: your-existing-trace-id }` via proxy request.
120
- """
121
- if litellm_params is None :
122
- return metadata
123
-
124
- if litellm_params .get ("proxy_server_request" ) is None :
125
- return metadata
126
-
127
- if metadata is None :
128
- metadata = {}
129
-
130
- proxy_headers = (
131
- litellm_params .get ("proxy_server_request" , {}).get ("headers" , {}) or {}
132
- )
133
-
134
- for metadata_param_key in proxy_headers :
135
- if metadata_param_key .startswith ("braintrust" ):
136
- trace_param_key = metadata_param_key .replace ("braintrust" , "" , 1 )
137
- if trace_param_key in metadata :
138
- verbose_logger .warning (
139
- f"Overwriting Braintrust `{ trace_param_key } ` from request header"
140
- )
141
- else :
142
- verbose_logger .debug (
143
- f"Found Braintrust `{ trace_param_key } ` in request header"
144
- )
145
- metadata [trace_param_key ] = proxy_headers .get (metadata_param_key )
146
-
147
- return metadata
148
-
149
112
async def create_default_project_and_experiment (self ):
150
113
project = await self .global_braintrust_http_handler .post (
151
114
f"{ self .api_base } /project" , headers = self .headers , json = {"name" : "litellm" }
@@ -172,6 +135,7 @@ def log_success_event( # noqa: PLR0915
172
135
litellm_call_id = kwargs .get ("litellm_call_id" )
173
136
standard_logging_object = kwargs .get ("standard_logging_object" , {})
174
137
prompt = {"messages" : kwargs .get ("messages" )}
138
+
175
139
output = None
176
140
choices = []
177
141
if response_obj is not None and (
@@ -276,6 +240,7 @@ async def async_log_success_event( # noqa: PLR0915
276
240
verbose_logger .debug ("REACHES BRAINTRUST SUCCESS" )
277
241
try :
278
242
litellm_call_id = kwargs .get ("litellm_call_id" )
243
+ standard_logging_object = kwargs .get ("standard_logging_object" , {})
279
244
prompt = {"messages" : kwargs .get ("messages" )}
280
245
output = None
281
246
choices = []
@@ -300,32 +265,14 @@ async def async_log_success_event( # noqa: PLR0915
300
265
output = response_obj ["data" ]
301
266
302
267
litellm_params = kwargs .get ("litellm_params" , {})
303
- metadata = (
304
- litellm_params .get ("metadata" , {}) or {}
305
- ) # if litellm_params['metadata'] == None
306
- metadata = self .add_metadata_from_header (litellm_params , metadata )
268
+ dynamic_metadata = litellm_params .get ("dynamic_metadata" , {}) or {}
269
+
307
270
clean_metadata = {}
308
- new_metadata = {}
309
- for key , value in metadata .items ():
310
- if (
311
- isinstance (value , list )
312
- or isinstance (value , str )
313
- or isinstance (value , int )
314
- or isinstance (value , float )
315
- ):
316
- new_metadata [key ] = value
317
- elif isinstance (value , BaseModel ):
318
- new_metadata [key ] = value .model_dump_json ()
319
- elif isinstance (value , dict ):
320
- for k , v in value .items ():
321
- if isinstance (v , datetime ):
322
- value [k ] = v .isoformat ()
323
- new_metadata [key ] = value
324
271
325
272
# Get project_id from metadata or create default if needed
326
- project_id = metadata .get ("project_id" )
273
+ project_id = dynamic_metadata .get ("project_id" )
327
274
if project_id is None :
328
- project_name = metadata .get ("project_name" )
275
+ project_name = dynamic_metadata .get ("project_name" )
329
276
project_id = (
330
277
await self .get_project_id_async (project_name )
331
278
if project_name
@@ -338,8 +285,8 @@ async def async_log_success_event( # noqa: PLR0915
338
285
project_id = self .default_project_id
339
286
340
287
tags = []
341
- if isinstance (metadata , dict ):
342
- for key , value in metadata .items ():
288
+ if isinstance (dynamic_metadata , dict ):
289
+ for key , value in dynamic_metadata .items ():
343
290
# generate langfuse tags - Default Tags sent to Langfuse from LiteLLM Proxy
344
291
if (
345
292
litellm .langfuse_default_tags is not None
@@ -348,25 +295,7 @@ async def async_log_success_event( # noqa: PLR0915
348
295
):
349
296
tags .append (f"{ key } :{ value } " )
350
297
351
- # clean litellm metadata before logging
352
- if key in [
353
- "headers" ,
354
- "endpoint" ,
355
- "caching_groups" ,
356
- "previous_models" ,
357
- ]:
358
- continue
359
- else :
360
- clean_metadata [key ] = value
361
-
362
298
cost = kwargs .get ("response_cost" , None )
363
- if cost is not None :
364
- clean_metadata ["litellm_response_cost" ] = cost
365
-
366
- # metadata.model is required for braintrust to calculate the "Estimated cost" metric
367
- litellm_model = kwargs .get ("model" , None )
368
- if litellm_model is not None :
369
- clean_metadata ["model" ] = litellm_model
370
299
371
300
metrics : Optional [dict ] = None
372
301
usage_obj = getattr (response_obj , "usage" , None )
@@ -394,13 +323,13 @@ async def async_log_success_event( # noqa: PLR0915
394
323
)
395
324
396
325
# Allow metadata override for span name
397
- span_name = metadata .get ("span_name" , "Chat Completion" )
326
+ span_name = dynamic_metadata .get ("span_name" , "Chat Completion" )
398
327
399
328
request_data = {
400
329
"id" : litellm_call_id ,
401
330
"input" : prompt ["messages" ],
402
331
"output" : output ,
403
- "metadata" : filter_json_serializable ( clean_metadata ) ,
332
+ "metadata" : standard_logging_object ,
404
333
"tags" : tags ,
405
334
"span_attributes" : {"name" : span_name , "type" : "llm" },
406
335
}
0 commit comments