Skip to content

Commit 5990713

Browse files
fix(braintrust_logging.py): refactor to consistently use standard logging payload
1 parent 9cffabb commit 5990713

File tree

1 file changed

+10
-81
lines changed

1 file changed

+10
-81
lines changed

litellm/integrations/braintrust_logging.py

Lines changed: 10 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -109,43 +109,6 @@ async def get_project_id_async(self, project_name: str) -> str:
109109
except httpx.HTTPStatusError as e:
110110
raise Exception(f"Failed to register project: {e.response.text}")
111111

112-
@staticmethod
113-
def add_metadata_from_header(litellm_params: dict, metadata: dict) -> dict:
114-
"""
115-
Adds metadata from proxy request headers to Braintrust logging if keys start with "braintrust_"
116-
and overwrites litellm_params.metadata if already included.
117-
118-
For example if you want to append your trace to an existing `trace_id` via header, send
119-
`headers: { ..., langfuse_existing_trace_id: your-existing-trace-id }` via proxy request.
120-
"""
121-
if litellm_params is None:
122-
return metadata
123-
124-
if litellm_params.get("proxy_server_request") is None:
125-
return metadata
126-
127-
if metadata is None:
128-
metadata = {}
129-
130-
proxy_headers = (
131-
litellm_params.get("proxy_server_request", {}).get("headers", {}) or {}
132-
)
133-
134-
for metadata_param_key in proxy_headers:
135-
if metadata_param_key.startswith("braintrust"):
136-
trace_param_key = metadata_param_key.replace("braintrust", "", 1)
137-
if trace_param_key in metadata:
138-
verbose_logger.warning(
139-
f"Overwriting Braintrust `{trace_param_key}` from request header"
140-
)
141-
else:
142-
verbose_logger.debug(
143-
f"Found Braintrust `{trace_param_key}` in request header"
144-
)
145-
metadata[trace_param_key] = proxy_headers.get(metadata_param_key)
146-
147-
return metadata
148-
149112
async def create_default_project_and_experiment(self):
150113
project = await self.global_braintrust_http_handler.post(
151114
f"{self.api_base}/project", headers=self.headers, json={"name": "litellm"}
@@ -172,6 +135,7 @@ def log_success_event( # noqa: PLR0915
172135
litellm_call_id = kwargs.get("litellm_call_id")
173136
standard_logging_object = kwargs.get("standard_logging_object", {})
174137
prompt = {"messages": kwargs.get("messages")}
138+
175139
output = None
176140
choices = []
177141
if response_obj is not None and (
@@ -276,6 +240,7 @@ async def async_log_success_event( # noqa: PLR0915
276240
verbose_logger.debug("REACHES BRAINTRUST SUCCESS")
277241
try:
278242
litellm_call_id = kwargs.get("litellm_call_id")
243+
standard_logging_object = kwargs.get("standard_logging_object", {})
279244
prompt = {"messages": kwargs.get("messages")}
280245
output = None
281246
choices = []
@@ -300,32 +265,14 @@ async def async_log_success_event( # noqa: PLR0915
300265
output = response_obj["data"]
301266

302267
litellm_params = kwargs.get("litellm_params", {})
303-
metadata = (
304-
litellm_params.get("metadata", {}) or {}
305-
) # if litellm_params['metadata'] == None
306-
metadata = self.add_metadata_from_header(litellm_params, metadata)
268+
dynamic_metadata = litellm_params.get("dynamic_metadata", {}) or {}
269+
307270
clean_metadata = {}
308-
new_metadata = {}
309-
for key, value in metadata.items():
310-
if (
311-
isinstance(value, list)
312-
or isinstance(value, str)
313-
or isinstance(value, int)
314-
or isinstance(value, float)
315-
):
316-
new_metadata[key] = value
317-
elif isinstance(value, BaseModel):
318-
new_metadata[key] = value.model_dump_json()
319-
elif isinstance(value, dict):
320-
for k, v in value.items():
321-
if isinstance(v, datetime):
322-
value[k] = v.isoformat()
323-
new_metadata[key] = value
324271

325272
# Get project_id from metadata or create default if needed
326-
project_id = metadata.get("project_id")
273+
project_id = dynamic_metadata.get("project_id")
327274
if project_id is None:
328-
project_name = metadata.get("project_name")
275+
project_name = dynamic_metadata.get("project_name")
329276
project_id = (
330277
await self.get_project_id_async(project_name)
331278
if project_name
@@ -338,8 +285,8 @@ async def async_log_success_event( # noqa: PLR0915
338285
project_id = self.default_project_id
339286

340287
tags = []
341-
if isinstance(metadata, dict):
342-
for key, value in metadata.items():
288+
if isinstance(dynamic_metadata, dict):
289+
for key, value in dynamic_metadata.items():
343290
# generate langfuse tags - Default Tags sent to Langfuse from LiteLLM Proxy
344291
if (
345292
litellm.langfuse_default_tags is not None
@@ -348,25 +295,7 @@ async def async_log_success_event( # noqa: PLR0915
348295
):
349296
tags.append(f"{key}:{value}")
350297

351-
# clean litellm metadata before logging
352-
if key in [
353-
"headers",
354-
"endpoint",
355-
"caching_groups",
356-
"previous_models",
357-
]:
358-
continue
359-
else:
360-
clean_metadata[key] = value
361-
362298
cost = kwargs.get("response_cost", None)
363-
if cost is not None:
364-
clean_metadata["litellm_response_cost"] = cost
365-
366-
# metadata.model is required for braintrust to calculate the "Estimated cost" metric
367-
litellm_model = kwargs.get("model", None)
368-
if litellm_model is not None:
369-
clean_metadata["model"] = litellm_model
370299

371300
metrics: Optional[dict] = None
372301
usage_obj = getattr(response_obj, "usage", None)
@@ -394,13 +323,13 @@ async def async_log_success_event( # noqa: PLR0915
394323
)
395324

396325
# Allow metadata override for span name
397-
span_name = metadata.get("span_name", "Chat Completion")
326+
span_name = dynamic_metadata.get("span_name", "Chat Completion")
398327

399328
request_data = {
400329
"id": litellm_call_id,
401330
"input": prompt["messages"],
402331
"output": output,
403-
"metadata": filter_json_serializable(clean_metadata),
332+
"metadata": standard_logging_object,
404333
"tags": tags,
405334
"span_attributes": {"name": span_name, "type": "llm"},
406335
}

0 commit comments

Comments
 (0)