Skip to content

Commit 4a422ca

Browse files
fix: support logging dynamic metadata values to braintrust
1 parent b6f6dc5 commit 4a422ca

File tree

2 files changed

+103
-71
lines changed

2 files changed

+103
-71
lines changed

litellm/integrations/braintrust_logging.py

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ def log_success_event( # noqa: PLR0915
159159
output = response_obj["data"]
160160

161161
litellm_params = kwargs.get("litellm_params", {}) or {}
162-
dynamic_metadata = litellm_params.get("dynamic_metadata", {}) or {}
162+
dynamic_metadata = litellm_params.get("metadata", {}) or {}
163163

164164
# Get project_id from metadata or create default if needed
165165
project_id = dynamic_metadata.get("project_id")
@@ -175,6 +175,7 @@ def log_success_event( # noqa: PLR0915
175175
project_id = self.default_project_id
176176

177177
tags = []
178+
178179
if isinstance(dynamic_metadata, dict):
179180
for key, value in dynamic_metadata.items():
180181
# generate langfuse tags - Default Tags sent to Langfuse from LiteLLM Proxy
@@ -185,6 +186,11 @@ def log_success_event( # noqa: PLR0915
185186
):
186187
tags.append(f"{key}:{value}")
187188

189+
if (
190+
isinstance(value, str) and key not in standard_logging_object
191+
): # support logging dynamic metadata to braintrust
192+
standard_logging_object[key] = value
193+
188194
cost = kwargs.get("response_cost", None)
189195

190196
metrics: Optional[dict] = None
@@ -265,9 +271,7 @@ async def async_log_success_event( # noqa: PLR0915
265271
output = response_obj["data"]
266272

267273
litellm_params = kwargs.get("litellm_params", {})
268-
dynamic_metadata = litellm_params.get("dynamic_metadata", {}) or {}
269-
270-
clean_metadata = {}
274+
dynamic_metadata = litellm_params.get("metadata", {}) or {}
271275

272276
# Get project_id from metadata or create default if needed
273277
project_id = dynamic_metadata.get("project_id")
@@ -285,6 +289,7 @@ async def async_log_success_event( # noqa: PLR0915
285289
project_id = self.default_project_id
286290

287291
tags = []
292+
288293
if isinstance(dynamic_metadata, dict):
289294
for key, value in dynamic_metadata.items():
290295
# generate langfuse tags - Default Tags sent to Langfuse from LiteLLM Proxy
@@ -295,6 +300,11 @@ async def async_log_success_event( # noqa: PLR0915
295300
):
296301
tags.append(f"{key}:{value}")
297302

303+
if (
304+
isinstance(value, str) and key not in standard_logging_object
305+
): # support logging dynamic metadata to braintrust
306+
standard_logging_object[key] = value
307+
298308
cost = kwargs.get("response_cost", None)
299309

300310
metrics: Optional[dict] = None

tests/test_litellm/integrations/test_braintrust_span_name.py

Lines changed: 89 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
class TestBraintrustSpanName(unittest.TestCase):
1212
"""Test custom span_name functionality in Braintrust logging."""
1313

14-
@patch('litellm.integrations.braintrust_logging.HTTPHandler')
14+
@patch("litellm.integrations.braintrust_logging.HTTPHandler")
1515
def test_default_span_name(self, MockHTTPHandler):
1616
"""Test that default span name is 'Chat Completion' when not provided."""
1717
# Mock HTTP response
@@ -22,39 +22,43 @@ def test_default_span_name(self, MockHTTPHandler):
2222
# Setup
2323
logger = BraintrustLogger(api_key="test-key")
2424
logger.default_project_id = "test-project-id"
25-
25+
2626
# Create a properly structured mock response
2727
response_obj = litellm.ModelResponse(
2828
id="test-id",
2929
object="chat.completion",
3030
created=1234567890,
3131
model="gpt-3.5-turbo",
32-
choices=[{
33-
"index": 0,
34-
"message": {"role": "assistant", "content": "test response"},
35-
"finish_reason": "stop"
36-
}],
37-
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
32+
choices=[
33+
{
34+
"index": 0,
35+
"message": {"role": "assistant", "content": "test response"},
36+
"finish_reason": "stop",
37+
}
38+
],
39+
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
3840
)
39-
41+
4042
kwargs = {
4143
"litellm_call_id": "test-call-id",
4244
"messages": [{"role": "user", "content": "test"}],
4345
"litellm_params": {"metadata": {}},
4446
"model": "gpt-3.5-turbo",
45-
"response_cost": 0.001
47+
"response_cost": 0.001,
4648
}
47-
49+
4850
# Execute
4951
logger.log_success_event(kwargs, response_obj, datetime.now(), datetime.now())
50-
52+
5153
# Verify
5254
call_args = mock_http_handler.post.call_args
5355
self.assertIsNotNone(call_args)
54-
json_data = call_args.kwargs['json']
55-
self.assertEqual(json_data['events'][0]['span_attributes']['name'], 'Chat Completion')
56+
json_data = call_args.kwargs["json"]
57+
self.assertEqual(
58+
json_data["events"][0]["span_attributes"]["name"], "Chat Completion"
59+
)
5660

57-
@patch('litellm.integrations.braintrust_logging.HTTPHandler')
61+
@patch("litellm.integrations.braintrust_logging.HTTPHandler")
5862
def test_custom_span_name(self, MockHTTPHandler):
5963
"""Test that custom span name is used when provided in metadata."""
6064
# Mock HTTP response
@@ -65,39 +69,43 @@ def test_custom_span_name(self, MockHTTPHandler):
6569
# Setup
6670
logger = BraintrustLogger(api_key="test-key")
6771
logger.default_project_id = "test-project-id"
68-
72+
6973
# Create a properly structured mock response
7074
response_obj = litellm.ModelResponse(
7175
id="test-id",
7276
object="chat.completion",
7377
created=1234567890,
7478
model="gpt-3.5-turbo",
75-
choices=[{
76-
"index": 0,
77-
"message": {"role": "assistant", "content": "test response"},
78-
"finish_reason": "stop"
79-
}],
80-
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
79+
choices=[
80+
{
81+
"index": 0,
82+
"message": {"role": "assistant", "content": "test response"},
83+
"finish_reason": "stop",
84+
}
85+
],
86+
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
8187
)
82-
88+
8389
kwargs = {
8490
"litellm_call_id": "test-call-id",
8591
"messages": [{"role": "user", "content": "test"}],
8692
"litellm_params": {"metadata": {"span_name": "Custom Operation"}},
8793
"model": "gpt-3.5-turbo",
88-
"response_cost": 0.001
94+
"response_cost": 0.001,
8995
}
90-
96+
9197
# Execute
9298
logger.log_success_event(kwargs, response_obj, datetime.now(), datetime.now())
93-
99+
94100
# Verify
95101
call_args = mock_http_handler.post.call_args
96102
self.assertIsNotNone(call_args)
97-
json_data = call_args.kwargs['json']
98-
self.assertEqual(json_data['events'][0]['span_attributes']['name'], 'Custom Operation')
103+
json_data = call_args.kwargs["json"]
104+
self.assertEqual(
105+
json_data["events"][0]["span_attributes"]["name"], "Custom Operation"
106+
)
99107

100-
@patch('litellm.integrations.braintrust_logging.HTTPHandler')
108+
@patch("litellm.integrations.braintrust_logging.HTTPHandler")
101109
def test_span_name_with_other_metadata(self, MockHTTPHandler):
102110
"""Test that span_name works alongside other metadata fields."""
103111
# Mock HTTP response
@@ -108,21 +116,23 @@ def test_span_name_with_other_metadata(self, MockHTTPHandler):
108116
# Setup
109117
logger = BraintrustLogger(api_key="test-key")
110118
logger.default_project_id = "test-project-id"
111-
119+
112120
# Create a properly structured mock response
113121
response_obj = litellm.ModelResponse(
114122
id="test-id",
115123
object="chat.completion",
116124
created=1234567890,
117125
model="gpt-3.5-turbo",
118-
choices=[{
119-
"index": 0,
120-
"message": {"role": "assistant", "content": "test response"},
121-
"finish_reason": "stop"
122-
}],
123-
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
126+
choices=[
127+
{
128+
"index": 0,
129+
"message": {"role": "assistant", "content": "test response"},
130+
"finish_reason": "stop",
131+
}
132+
],
133+
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
124134
)
125-
135+
126136
kwargs = {
127137
"litellm_call_id": "test-call-id",
128138
"messages": [{"role": "user", "content": "test"}],
@@ -132,34 +142,40 @@ def test_span_name_with_other_metadata(self, MockHTTPHandler):
132142
"project_id": "custom-project",
133143
"user_id": "user123",
134144
"session_id": "session456",
135-
"environment": "production"
145+
"environment": "production",
136146
}
137147
},
138148
"model": "gpt-3.5-turbo",
139-
"response_cost": 0.001
149+
"response_cost": 0.001,
150+
"standard_logging_object": {
151+
"user_id": "user123",
152+
},
140153
}
141-
154+
142155
# Execute
143156
logger.log_success_event(kwargs, response_obj, datetime.now(), datetime.now())
144-
157+
145158
# Verify
146159
call_args = mock_http_handler.post.call_args
147160
self.assertIsNotNone(call_args)
148-
json_data = call_args.kwargs['json']
149-
161+
json_data = call_args.kwargs["json"]
162+
150163
# Check span name
151-
self.assertEqual(json_data['events'][0]['span_attributes']['name'], 'Multi Metadata Test')
152-
164+
self.assertEqual(
165+
json_data["events"][0]["span_attributes"]["name"], "Multi Metadata Test"
166+
)
167+
153168
# Check that other metadata is preserved (except for filtered keys)
154-
event_metadata = json_data['events'][0]['metadata']
155-
self.assertEqual(event_metadata['user_id'], 'user123')
156-
self.assertEqual(event_metadata['session_id'], 'session456')
157-
self.assertEqual(event_metadata['environment'], 'production')
158-
169+
event_metadata = json_data["events"][0]["metadata"]
170+
print(event_metadata)
171+
self.assertEqual(event_metadata["user_id"], "user123")
172+
self.assertEqual(event_metadata["session_id"], "session456")
173+
self.assertEqual(event_metadata["environment"], "production")
174+
159175
# Span name should be in span_attributes, not in metadata
160-
self.assertIn('span_name', event_metadata) # span_name is also kept in metadata
176+
self.assertIn("span_name", event_metadata) # span_name is also kept in metadata
161177

162-
@patch('litellm.integrations.braintrust_logging.get_async_httpx_client')
178+
@patch("litellm.integrations.braintrust_logging.get_async_httpx_client")
163179
async def test_async_custom_span_name(self, mock_get_http_handler):
164180
"""Test async logging with custom span name."""
165181
# Mock async HTTP response
@@ -170,38 +186,44 @@ async def test_async_custom_span_name(self, mock_get_http_handler):
170186
# Setup
171187
logger = BraintrustLogger(api_key="test-key")
172188
logger.default_project_id = "test-project-id"
173-
189+
174190
# Create a properly structured mock response
175191
response_obj = litellm.ModelResponse(
176192
id="test-id",
177193
object="chat.completion",
178194
created=1234567890,
179195
model="gpt-3.5-turbo",
180-
choices=[{
181-
"index": 0,
182-
"message": {"role": "assistant", "content": "test response"},
183-
"finish_reason": "stop"
184-
}],
185-
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
196+
choices=[
197+
{
198+
"index": 0,
199+
"message": {"role": "assistant", "content": "test response"},
200+
"finish_reason": "stop",
201+
}
202+
],
203+
usage={"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
186204
)
187-
205+
188206
kwargs = {
189207
"litellm_call_id": "test-call-id",
190208
"messages": [{"role": "user", "content": "test"}],
191209
"litellm_params": {"metadata": {"span_name": "Async Custom Operation"}},
192210
"model": "gpt-3.5-turbo",
193-
"response_cost": 0.001
211+
"response_cost": 0.001,
194212
}
195-
213+
196214
# Execute
197-
await logger.async_log_success_event(kwargs, response_obj, datetime.now(), datetime.now())
198-
215+
await logger.async_log_success_event(
216+
kwargs, response_obj, datetime.now(), datetime.now()
217+
)
218+
199219
# Verify
200220
call_args = mock_http_handler.post.call_args
201221
self.assertIsNotNone(call_args)
202-
json_data = call_args.kwargs['json']
203-
self.assertEqual(json_data['events'][0]['span_attributes']['name'], 'Async Custom Operation')
222+
json_data = call_args.kwargs["json"]
223+
self.assertEqual(
224+
json_data["events"][0]["span_attributes"]["name"], "Async Custom Operation"
225+
)
204226

205227

206228
if __name__ == "__main__":
207-
unittest.main()
229+
unittest.main()

0 commit comments

Comments
 (0)