Skip to content

Commit 4da94db

Browse files
committed
style: Format files with ruff
1 parent 7f68214 commit 4da94db

File tree

4 files changed

+121
-42
lines changed

4 files changed

+121
-42
lines changed

posthog/ai/otel/exporter.py

Lines changed: 26 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -212,9 +212,7 @@ def _span_to_event(self, span: ReadableSpan) -> Optional[Dict[str, Any]]:
212212
trace_id = self._format_trace_id_as_uuid(span.context.trace_id)
213213
# Span IDs remain as hex (no dashes needed)
214214
span_id = format(span.context.span_id, "016x")
215-
parent_span_id = (
216-
format(span.parent.span_id, "016x") if span.parent else None
217-
)
215+
parent_span_id = format(span.parent.span_id, "016x") if span.parent else None
218216

219217
# Check for error status
220218
is_error = span.status.status_code == StatusCode.ERROR if span.status else False
@@ -223,7 +221,14 @@ def _span_to_event(self, span: ReadableSpan) -> Optional[Dict[str, Any]]:
223221
# Model request span → $ai_generation
224222
if self._is_generation_span(span_name, attrs):
225223
return self._create_generation_event(
226-
span, attrs, trace_id, span_id, parent_span_id, latency, is_error, error_message
224+
span,
225+
attrs,
226+
trace_id,
227+
span_id,
228+
parent_span_id,
229+
latency,
230+
is_error,
231+
error_message,
227232
)
228233

229234
# Agent run span → skip (PostHog UI auto-creates trace wrapper from generation events)
@@ -234,13 +239,27 @@ def _span_to_event(self, span: ReadableSpan) -> Optional[Dict[str, Any]]:
234239
# Tool execution span → $ai_span
235240
if self._is_tool_span(span_name, attrs):
236241
return self._create_tool_span_event(
237-
span, attrs, trace_id, span_id, parent_span_id, latency, is_error, error_message
242+
span,
243+
attrs,
244+
trace_id,
245+
span_id,
246+
parent_span_id,
247+
latency,
248+
is_error,
249+
error_message,
238250
)
239251

240252
# Generic span that might be part of AI workflow
241253
if self._is_ai_related_span(span_name, attrs):
242254
return self._create_span_event(
243-
span, attrs, trace_id, span_id, parent_span_id, latency, is_error, error_message
255+
span,
256+
attrs,
257+
trace_id,
258+
span_id,
259+
parent_span_id,
260+
latency,
261+
is_error,
262+
error_message,
244263
)
245264

246265
return None
@@ -507,9 +526,7 @@ def _create_span_event(
507526

508527
return {"name": "$ai_span", "properties": properties}
509528

510-
def _parse_json_attr(
511-
self, value: Optional[Union[str, Any]]
512-
) -> Optional[Any]:
529+
def _parse_json_attr(self, value: Optional[Union[str, Any]]) -> Optional[Any]:
513530
"""Parse a JSON string attribute, returning the value as-is if already parsed."""
514531
if value is None:
515532
return None

posthog/ai/pydantic_ai/exporter.py

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,9 @@ def _transform_span(self, span: ReadableSpan) -> ReadableSpan:
9393
normalized = self._normalize_messages(input_msgs)
9494
if normalized != input_msgs:
9595
attrs["gen_ai.input.messages"] = (
96-
json.dumps(normalized) if isinstance(normalized, list) else normalized
96+
json.dumps(normalized)
97+
if isinstance(normalized, list)
98+
else normalized
9799
)
98100
modified = True
99101

@@ -103,7 +105,9 @@ def _transform_span(self, span: ReadableSpan) -> ReadableSpan:
103105
normalized = self._normalize_messages(output_msgs)
104106
if normalized != output_msgs:
105107
attrs["gen_ai.output.messages"] = (
106-
json.dumps(normalized) if isinstance(normalized, list) else normalized
108+
json.dumps(normalized)
109+
if isinstance(normalized, list)
110+
else normalized
107111
)
108112
modified = True
109113

@@ -176,20 +180,24 @@ def _normalize_pydantic_message(self, msg: Dict[str, Any]) -> Dict[str, Any]:
176180
if part_type == "text" and "content" in part:
177181
text_parts.append(str(part["content"]))
178182
elif part_type == "tool_call":
179-
tool_calls.append({
180-
"id": part.get("id", ""),
181-
"type": "function",
182-
"function": {
183-
"name": part.get("name", ""),
184-
"arguments": part.get("arguments", "{}"),
183+
tool_calls.append(
184+
{
185+
"id": part.get("id", ""),
186+
"type": "function",
187+
"function": {
188+
"name": part.get("name", ""),
189+
"arguments": part.get("arguments", "{}"),
190+
},
185191
}
186-
})
192+
)
187193

188194
# Build normalized message
189195
normalized: Dict[str, Any] = {"role": role}
190196

191197
if text_parts:
192-
normalized["content"] = "\n".join(text_parts) if len(text_parts) > 1 else text_parts[0]
198+
normalized["content"] = (
199+
"\n".join(text_parts) if len(text_parts) > 1 else text_parts[0]
200+
)
193201
elif not tool_calls:
194202
normalized["content"] = ""
195203

posthog/test/ai/otel/test_exporter.py

Lines changed: 57 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -111,14 +111,19 @@ def test_is_agent_span(self, mock_client):
111111
exporter = PostHogSpanExporter(mock_client)
112112
assert exporter._is_agent_span("agent run", {}) is True
113113
assert exporter._is_agent_span("invoke_agent", {}) is True
114-
assert exporter._is_agent_span("some_span", {"gen_ai.agent.name": "test"}) # truthy
114+
assert exporter._is_agent_span(
115+
"some_span", {"gen_ai.agent.name": "test"}
116+
) # truthy
115117
assert not exporter._is_agent_span("some_span", {})
116118

117119
def test_is_tool_span(self, mock_client):
118120
exporter = PostHogSpanExporter(mock_client)
119121
assert exporter._is_tool_span("execute_tool get_weather", {}) is True
120122
assert exporter._is_tool_span("running tools", {}) is True
121-
assert exporter._is_tool_span("some_span", {"gen_ai.tool.name": "get_weather"}) is True
123+
assert (
124+
exporter._is_tool_span("some_span", {"gen_ai.tool.name": "get_weather"})
125+
is True
126+
)
122127
assert exporter._is_tool_span("model call", {}) is False
123128

124129

@@ -135,8 +140,12 @@ def test_basic_generation_event(self, mock_client):
135140
"gen_ai.system": "openai",
136141
"gen_ai.usage.input_tokens": 100,
137142
"gen_ai.usage.output_tokens": 50,
138-
"gen_ai.input.messages": json.dumps([{"role": "user", "content": "Hello"}]),
139-
"gen_ai.output.messages": json.dumps([{"role": "assistant", "content": "Hi!"}]),
143+
"gen_ai.input.messages": json.dumps(
144+
[{"role": "user", "content": "Hello"}]
145+
),
146+
"gen_ai.output.messages": json.dumps(
147+
[{"role": "assistant", "content": "Hi!"}]
148+
),
140149
},
141150
)
142151

@@ -178,14 +187,20 @@ def test_generation_event_with_error(self, mock_client):
178187
assert props["$ai_error"] == "Rate limit exceeded"
179188

180189
def test_generation_event_privacy_mode(self, mock_client):
181-
exporter = PostHogSpanExporter(mock_client, distinct_id="user_123", privacy_mode=True)
190+
exporter = PostHogSpanExporter(
191+
mock_client, distinct_id="user_123", privacy_mode=True
192+
)
182193

183194
span = create_mock_span(
184195
name="chat openai",
185196
attributes={
186197
"gen_ai.request.model": "gpt-4",
187-
"gen_ai.input.messages": json.dumps([{"role": "user", "content": "Secret data"}]),
188-
"gen_ai.output.messages": json.dumps([{"role": "assistant", "content": "Response"}]),
198+
"gen_ai.input.messages": json.dumps(
199+
[{"role": "user", "content": "Secret data"}]
200+
),
201+
"gen_ai.output.messages": json.dumps(
202+
[{"role": "assistant", "content": "Response"}]
203+
),
189204
},
190205
)
191206

@@ -224,7 +239,9 @@ class TestAgentSpanHandling:
224239
def test_agent_span_is_skipped(self, mock_client):
225240
exporter = PostHogSpanExporter(mock_client, distinct_id="user_123")
226241

227-
span = create_mock_span(name="agent run", attributes={"gen_ai.agent.name": "TestAgent"})
242+
span = create_mock_span(
243+
name="agent run", attributes={"gen_ai.agent.name": "TestAgent"}
244+
)
228245

229246
exporter.export([span])
230247

@@ -250,7 +267,9 @@ def test_basic_tool_span_event(self, mock_client):
250267
name="execute_tool get_weather",
251268
attributes={
252269
"gen_ai.tool.name": "get_weather",
253-
"gen_ai.tool.call.arguments": json.dumps({"latitude": 37.7749, "longitude": -122.4194}),
270+
"gen_ai.tool.call.arguments": json.dumps(
271+
{"latitude": 37.7749, "longitude": -122.4194}
272+
),
254273
"gen_ai.tool.call.result": "Sunny, 72°F",
255274
},
256275
parent_span_id=0xABCDEF1234567890,
@@ -266,11 +285,16 @@ def test_basic_tool_span_event(self, mock_client):
266285
assert "$ai_trace_id" in props
267286
assert "$ai_span_id" in props
268287
assert "$ai_parent_id" in props
269-
assert props["$ai_tool_arguments"] == {"latitude": 37.7749, "longitude": -122.4194}
288+
assert props["$ai_tool_arguments"] == {
289+
"latitude": 37.7749,
290+
"longitude": -122.4194,
291+
}
270292
assert props["$ai_tool_result"] == "Sunny, 72°F"
271293

272294
def test_tool_span_privacy_mode(self, mock_client):
273-
exporter = PostHogSpanExporter(mock_client, distinct_id="user_123", privacy_mode=True)
295+
exporter = PostHogSpanExporter(
296+
mock_client, distinct_id="user_123", privacy_mode=True
297+
)
274298

275299
span = create_mock_span(
276300
name="execute_tool get_weather",
@@ -294,7 +318,9 @@ class TestDistinctIdHandling:
294318
def test_distinct_id_from_constructor(self, mock_client):
295319
exporter = PostHogSpanExporter(mock_client, distinct_id="configured_user")
296320

297-
span = create_mock_span(name="chat openai", attributes={"gen_ai.request.model": "gpt-4"})
321+
span = create_mock_span(
322+
name="chat openai", attributes={"gen_ai.request.model": "gpt-4"}
323+
)
298324

299325
exporter.export([span])
300326

@@ -326,12 +352,17 @@ def test_distinct_id_fallback_to_trace_id(self, mock_client):
326352

327353
exporter.export([span])
328354

329-
assert mock_client.capture.call_args[1]["distinct_id"] == "abcdef1234567890abcdef1234567890"
355+
assert (
356+
mock_client.capture.call_args[1]["distinct_id"]
357+
== "abcdef1234567890abcdef1234567890"
358+
)
330359

331360
def test_process_person_profile_false_when_no_distinct_id(self, mock_client):
332361
exporter = PostHogSpanExporter(mock_client)
333362

334-
span = create_mock_span(name="chat openai", attributes={"gen_ai.request.model": "gpt-4"})
363+
span = create_mock_span(
364+
name="chat openai", attributes={"gen_ai.request.model": "gpt-4"}
365+
)
335366

336367
exporter.export([span])
337368

@@ -349,7 +380,9 @@ def test_additional_properties_included(self, mock_client):
349380
properties={"$ai_session_id": "session_abc", "custom_prop": "value"},
350381
)
351382

352-
span = create_mock_span(name="chat openai", attributes={"gen_ai.request.model": "gpt-4"})
383+
span = create_mock_span(
384+
name="chat openai", attributes={"gen_ai.request.model": "gpt-4"}
385+
)
353386

354387
exporter.export([span])
355388

@@ -364,7 +397,9 @@ def test_groups_included(self, mock_client):
364397
groups={"company": "posthog", "team": "product"},
365398
)
366399

367-
span = create_mock_span(name="chat openai", attributes={"gen_ai.request.model": "gpt-4"})
400+
span = create_mock_span(
401+
name="chat openai", attributes={"gen_ai.request.model": "gpt-4"}
402+
)
368403

369404
exporter.export([span])
370405

@@ -380,7 +415,9 @@ class TestExportResult:
380415
def test_export_returns_success(self, mock_client):
381416
exporter = PostHogSpanExporter(mock_client, distinct_id="user_123")
382417

383-
span = create_mock_span(name="chat openai", attributes={"gen_ai.request.model": "gpt-4"})
418+
span = create_mock_span(
419+
name="chat openai", attributes={"gen_ai.request.model": "gpt-4"}
420+
)
384421

385422
result = exporter.export([span])
386423

@@ -390,7 +427,9 @@ def test_export_handles_exceptions_gracefully(self, mock_client):
390427
mock_client.capture.side_effect = Exception("Network error")
391428
exporter = PostHogSpanExporter(mock_client, distinct_id="user_123")
392429

393-
span = create_mock_span(name="chat openai", attributes={"gen_ai.request.model": "gpt-4"})
430+
span = create_mock_span(
431+
name="chat openai", attributes={"gen_ai.request.model": "gpt-4"}
432+
)
394433

395434
result = exporter.export([span])
396435

posthog/test/ai/pydantic_ai/test_exporter.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,10 @@ def test_normalize_simple_text_message(self, mock_client):
6767
exporter = PydanticAISpanExporter(mock_client, distinct_id="user_123")
6868

6969
pydantic_format = [
70-
{"parts": [{"content": "Hello, how are you?", "type": "text"}], "role": "user"}
70+
{
71+
"parts": [{"content": "Hello, how are you?", "type": "text"}],
72+
"role": "user",
73+
}
7174
]
7275
result = exporter._normalize_messages(pydantic_format)
7376

@@ -168,7 +171,9 @@ def test_normalize_preserves_finish_reason(self, mock_client):
168171
]
169172
result = exporter._normalize_messages(pydantic_format)
170173

171-
assert result == [{"content": "Done!", "role": "assistant", "finish_reason": "stop"}]
174+
assert result == [
175+
{"content": "Done!", "role": "assistant", "finish_reason": "stop"}
176+
]
172177

173178
def test_normalize_already_openai_format(self, mock_client):
174179
exporter = PydanticAISpanExporter(mock_client, distinct_id="user_123")
@@ -301,7 +306,12 @@ def test_export_normalizes_and_captures(self, mock_client):
301306
exporter = PydanticAISpanExporter(mock_client, distinct_id="user_123")
302307

303308
pydantic_input = json.dumps(
304-
[{"parts": [{"content": "What's the weather?", "type": "text"}], "role": "user"}]
309+
[
310+
{
311+
"parts": [{"content": "What's the weather?", "type": "text"}],
312+
"role": "user",
313+
}
314+
]
305315
)
306316
pydantic_output = json.dumps(
307317
[
@@ -329,7 +339,9 @@ def test_export_normalizes_and_captures(self, mock_client):
329339
mock_client.capture.assert_called_once()
330340

331341
props = mock_client.capture.call_args[1]["properties"]
332-
assert props["$ai_input"] == [{"content": "What's the weather?", "role": "user"}]
342+
assert props["$ai_input"] == [
343+
{"content": "What's the weather?", "role": "user"}
344+
]
333345
assert props["$ai_output_choices"] == [
334346
{"content": "It's sunny!", "role": "assistant", "finish_reason": "stop"}
335347
]
@@ -440,7 +452,10 @@ def test_does_not_overwrite_existing_genai_attributes(self, mock_client):
440452
transformed = exporter._transform_span(span)
441453

442454
# Should preserve existing GenAI attribute, not overwrite
443-
assert transformed.attributes["gen_ai.tool.call.arguments"] == '{"existing": "value"}'
455+
assert (
456+
transformed.attributes["gen_ai.tool.call.arguments"]
457+
== '{"existing": "value"}'
458+
)
444459

445460
def test_tool_span_export_with_mapped_attributes(self, mock_client):
446461
exporter = PydanticAISpanExporter(mock_client, distinct_id="user_123")

0 commit comments

Comments
 (0)