Skip to content

Commit 36d637a

Browse files
committed
chore: isort black
1 parent 9680a5e commit 36d637a

File tree

6 files changed

+83
-41
lines changed

6 files changed

+83
-41
lines changed

llm_observability_examples.py

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -219,14 +219,27 @@ def tool_call_openai_call(distinct_id, trace_id, properties, groups):
219219
response = openai_client.chat.completions.create(
220220
model="gpt-4o-mini",
221221
messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
222-
tools=[{"type": "function", "function": {"name": "get_weather", "description": "Get weather", "parameters": {
223-
"type": "object",
224-
"properties": {
225-
"location": {"type": "string", "description": "The location to get the weather for"},
226-
"unit": {"type": "string", "description": "The unit of temperature to return the weather in", "enum": ["celsius", "fahrenheit"]}
227-
},
228-
"required": ["location", "unit"]
229-
}}}],
222+
tools=[
223+
{
224+
"type": "function",
225+
"function": {
226+
"name": "get_weather",
227+
"description": "Get weather",
228+
"parameters": {
229+
"type": "object",
230+
"properties": {
231+
"location": {"type": "string", "description": "The location to get the weather for"},
232+
"unit": {
233+
"type": "string",
234+
"description": "The unit of temperature to return the weather in",
235+
"enum": ["celsius", "fahrenheit"],
236+
},
237+
},
238+
"required": ["location", "unit"],
239+
},
240+
},
241+
}
242+
],
230243
posthog_distinct_id=distinct_id,
231244
posthog_trace_id=trace_id,
232245
posthog_properties=properties,
@@ -240,7 +253,9 @@ def streaming_tool_call_openai_call(distinct_id, trace_id, properties, groups):
240253
response = openai_client.chat.completions.create(
241254
model="gpt-4o-mini",
242255
messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
243-
tools=[{"type": "function", "function": {"name": "get_weather", "description": "Get weather", "parameters": {}}}],
256+
tools=[
257+
{"type": "function", "function": {"name": "get_weather", "description": "Get weather", "parameters": {}}}
258+
],
244259
stream=True,
245260
posthog_distinct_id=distinct_id,
246261
posthog_trace_id=trace_id,
@@ -254,6 +269,7 @@ def streaming_tool_call_openai_call(distinct_id, trace_id, properties, groups):
254269

255270
return response
256271

272+
257273
# HOW TO RUN:
258274
# comment out one of these to run the other
259275

posthog/ai/openai/openai.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,9 +114,11 @@ def generator():
114114
"total_tokens",
115115
]
116116
}
117-
117+
118118
# Add support for cached tokens
119-
if hasattr(chunk.usage, "prompt_tokens_details") and hasattr(chunk.usage.prompt_tokens_details, "cached_tokens"):
119+
if hasattr(chunk.usage, "prompt_tokens_details") and hasattr(
120+
chunk.usage.prompt_tokens_details, "cached_tokens"
121+
):
120122
usage_stats["cache_read_input_tokens"] = chunk.usage.prompt_tokens_details.cached_tokens
121123

122124
if hasattr(chunk, "choices") and chunk.choices and len(chunk.choices) > 0:

posthog/ai/openai/openai_async.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,9 @@ async def async_generator():
114114
}
115115

116116
# Add support for cached tokens
117-
if hasattr(chunk.usage, "prompt_tokens_details") and hasattr(chunk.usage.prompt_tokens_details, "cached_tokens"):
117+
if hasattr(chunk.usage, "prompt_tokens_details") and hasattr(
118+
chunk.usage.prompt_tokens_details, "cached_tokens"
119+
):
118120
usage_stats["cache_read_input_tokens"] = chunk.usage.prompt_tokens_details.cached_tokens
119121

120122
if hasattr(chunk, "choices") and chunk.choices and len(chunk.choices) > 0:

posthog/ai/utils.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,9 @@ def get_usage(response, provider: str) -> Dict[str, Any]:
3939
}
4040
elif provider == "openai":
4141
cached_tokens = 0
42-
if hasattr(response.usage, "prompt_tokens_details") and hasattr(response.usage.prompt_tokens_details, "cached_tokens"):
42+
if hasattr(response.usage, "prompt_tokens_details") and hasattr(
43+
response.usage.prompt_tokens_details, "cached_tokens"
44+
):
4345
cached_tokens = response.usage.prompt_tokens_details.cached_tokens
4446
return {
4547
"input_tokens": response.usage.prompt_tokens,
@@ -99,7 +101,12 @@ def format_tool_calls(response, provider: str):
99101
if hasattr(response, "tools") and response.tools and len(response.tools) > 0:
100102
return response.tools
101103
elif provider == "openai":
102-
if (hasattr(response, "choices") and response.choices and hasattr(response.choices[0].message, "tool_calls") and response.choices[0].message.tool_calls):
104+
if (
105+
hasattr(response, "choices")
106+
and response.choices
107+
and hasattr(response.choices[0].message, "tool_calls")
108+
and response.choices[0].message.tool_calls
109+
):
103110
return response.choices[0].message.tool_calls
104111
return None
105112

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ def stream_generator():
5454

5555
return stream_generator()
5656

57+
5758
@pytest.fixture
5859
def mock_anthropic_response_with_cached_tokens():
5960
# Create a mock Usage object with cached_tokens in input_tokens_details
@@ -63,7 +64,7 @@ def mock_anthropic_response_with_cached_tokens():
6364
cache_read_input_tokens=15,
6465
cache_creation_input_tokens=2,
6566
)
66-
67+
6768
return Message(
6869
id="msg_123",
6970
type="message",
@@ -390,4 +391,4 @@ def test_cached_tokens(mock_client, mock_anthropic_response_with_cached_tokens):
390391
assert props["$ai_cache_creation_input_tokens"] == 2
391392
assert props["$ai_http_status"] == 200
392393
assert props["foo"] == "bar"
393-
assert isinstance(props["$ai_latency"], float)
394+
assert isinstance(props["$ai_latency"], float)

posthog/test/ai/openai/test_openai.py

Lines changed: 39 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
1-
import time
21
import json
3-
from unittest.mock import patch, MagicMock
2+
import time
3+
from unittest.mock import MagicMock, patch
44

55
import pytest
66
from openai.types.chat import ChatCompletion, ChatCompletionMessage, ToolCall, ToolCallFunction
77
from openai.types.chat.chat_completion import Choice
8-
from openai.types.completion_usage import CompletionUsage
9-
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
10-
from openai.types.embedding import Embedding
118
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta
129
from openai.types.chat.chat_completion_chunk import ToolCall as ChunkToolCall
1310
from openai.types.chat.chat_completion_chunk import ToolCallFunction as ChunkToolCallFunction
11+
from openai.types.completion_usage import CompletionUsage
12+
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
13+
from openai.types.embedding import Embedding
1414

1515
from posthog.ai.openai import OpenAI
1616

@@ -255,7 +255,9 @@ def test_error(mock_client, mock_openai_response):
255255

256256

257257
def test_cached_tokens(mock_client, mock_openai_response_with_cached_tokens):
258-
with patch("openai.resources.chat.completions.Completions.create", return_value=mock_openai_response_with_cached_tokens):
258+
with patch(
259+
"openai.resources.chat.completions.Completions.create", return_value=mock_openai_response_with_cached_tokens
260+
):
259261
client = OpenAI(api_key="test-key", posthog_client=mock_client)
260262
response = client.chat.completions.create(
261263
model="gpt-4",
@@ -285,12 +287,19 @@ def test_cached_tokens(mock_client, mock_openai_response_with_cached_tokens):
285287

286288

287289
def test_tool_calls(mock_client, mock_openai_response_with_tool_calls):
288-
with patch("openai.resources.chat.completions.Completions.create", return_value=mock_openai_response_with_tool_calls):
290+
with patch(
291+
"openai.resources.chat.completions.Completions.create", return_value=mock_openai_response_with_tool_calls
292+
):
289293
client = OpenAI(api_key="test-key", posthog_client=mock_client)
290294
response = client.chat.completions.create(
291295
model="gpt-4",
292296
messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
293-
tools=[{"type": "function", "function": {"name": "get_weather", "description": "Get weather", "parameters": {}}}],
297+
tools=[
298+
{
299+
"type": "function",
300+
"function": {"name": "get_weather", "description": "Get weather", "parameters": {}},
301+
}
302+
],
294303
posthog_distinct_id="test-id",
295304
)
296305

@@ -306,23 +315,23 @@ def test_tool_calls(mock_client, mock_openai_response_with_tool_calls):
306315
assert props["$ai_model"] == "gpt-4"
307316
assert props["$ai_input"] == [{"role": "user", "content": "What's the weather in San Francisco?"}]
308317
assert props["$ai_output_choices"] == [{"role": "assistant", "content": "I'll check the weather for you."}]
309-
318+
310319
# Check that tool calls are properly captured
311320
assert "$ai_tools" in props
312321
tool_calls = props["$ai_tools"]
313322
assert len(tool_calls) == 1
314-
323+
315324
# Verify the tool call details
316325
tool_call = tool_calls[0]
317326
assert tool_call.id == "call_abc123"
318327
assert tool_call.type == "function"
319328
assert tool_call.function.name == "get_weather"
320-
329+
321330
# Verify the arguments
322331
arguments = tool_call.function.arguments
323332
parsed_args = json.loads(arguments)
324333
assert parsed_args == {"location": "San Francisco", "unit": "celsius"}
325-
334+
326335
# Check token usage
327336
assert props["$ai_input_tokens"] == 20
328337
assert props["$ai_output_tokens"] == 15
@@ -434,53 +443,58 @@ def test_streaming_with_tool_calls(mock_client):
434443
mock_create.return_value = tool_call_chunks
435444

436445
client = OpenAI(api_key="test-key", posthog_client=mock_client)
437-
446+
438447
# Call the streaming method
439448
response_generator = client.chat.completions.create(
440449
model="gpt-4",
441450
messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
442-
tools=[{"type": "function", "function": {"name": "get_weather", "description": "Get weather", "parameters": {}}}],
451+
tools=[
452+
{
453+
"type": "function",
454+
"function": {"name": "get_weather", "description": "Get weather", "parameters": {}},
455+
}
456+
],
443457
stream=True,
444458
posthog_distinct_id="test-id",
445459
)
446-
460+
447461
# Consume the generator to trigger the event capture
448462
chunks = list(response_generator)
449-
463+
450464
# Verify the chunks were returned correctly
451465
assert len(chunks) == 4
452466
assert chunks == tool_call_chunks
453-
467+
454468
# Verify the capture was called with the right arguments
455469
assert mock_client.capture.call_count == 1
456-
470+
457471
call_args = mock_client.capture.call_args[1]
458472
props = call_args["properties"]
459-
473+
460474
assert call_args["distinct_id"] == "test-id"
461475
assert call_args["event"] == "$ai_generation"
462476
assert props["$ai_provider"] == "openai"
463477
assert props["$ai_model"] == "gpt-4"
464-
478+
465479
# Check that the tool calls were properly accumulated
466480
assert "$ai_tools" in props
467481
tool_calls = props["$ai_tools"]
468482
assert len(tool_calls) == 1
469-
483+
470484
# Verify the complete tool call was properly assembled
471485
tool_call = tool_calls[0]
472486
assert tool_call.id == "call_abc123"
473487
assert tool_call.type == "function"
474488
assert tool_call.function.name == "get_weather"
475-
489+
476490
# Verify the arguments were concatenated correctly
477491
arguments = tool_call.function.arguments
478492
parsed_args = json.loads(arguments)
479493
assert parsed_args == {"location": "San Francisco", "unit": "celsius"}
480-
494+
481495
# Check that the content was also accumulated
482496
assert props["$ai_output_choices"][0]["content"] == "The weather in San Francisco is 15°C."
483-
497+
484498
# Check token usage
485499
assert props["$ai_input_tokens"] == 20
486-
assert props["$ai_output_tokens"] == 15
500+
assert props["$ai_output_tokens"] == 15

0 commit comments

Comments
 (0)