Skip to content

Commit 79b4fe8

Browse files
committed
feat: anthroipic
1 parent 5a9b19e commit 79b4fe8

File tree

5 files changed

+72
-46
lines changed

5 files changed

+72
-46
lines changed

CHANGELOG.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,8 @@
1+
## 3.8.4 - 2025-01-17
2+
3+
1. Add Anthropic support for LLM Observability.
4+
2. Update LLM Observability to use output_choices.
5+
16
## 3.8.3 - 2025-01-14
27

38
1. Fix setuptools to include the `posthog.ai.openai` and `posthog.ai.langchain` packages for the `posthoganalytics` package.

posthog/ai/anthropic/anthropic.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -80,13 +80,16 @@ def create(
8080

8181
def stream(
8282
self,
83-
posthog_distinct_id: Optional[str],
84-
posthog_trace_id: Optional[str],
85-
posthog_properties: Optional[Dict[str, Any]],
86-
posthog_privacy_mode: bool,
87-
posthog_groups: Optional[Dict[str, Any]],
83+
posthog_distinct_id: Optional[str] = None,
84+
posthog_trace_id: Optional[str] = None,
85+
posthog_properties: Optional[Dict[str, Any]] = None,
86+
posthog_privacy_mode: bool = False,
87+
posthog_groups: Optional[Dict[str, Any]] = None,
8888
**kwargs: Any,
8989
):
90+
if posthog_trace_id is None:
91+
posthog_trace_id = uuid.uuid4()
92+
9093
return self._create_streaming(
9194
posthog_distinct_id,
9295
posthog_trace_id,
@@ -106,7 +109,7 @@ def _create_streaming(
106109
**kwargs: Any,
107110
):
108111
start_time = time.time()
109-
usage_stats: Dict[str, int] = {}
112+
usage_stats: Dict[str, int] = {"input_tokens": 0, "output_tokens": 0}
110113
accumulated_content = []
111114
response = super().create(**kwargs)
112115

@@ -117,8 +120,11 @@ def generator():
117120
for event in response:
118121
if hasattr(event, "usage") and event.usage:
119122
usage_stats = {
120-
"input_tokens": getattr(event.usage, "input_tokens", 0),
121-
"output_tokens": getattr(event.usage, "output_tokens", 0),
123+
k: getattr(event.usage, k, 0)
124+
for k in [
125+
"input_tokens",
126+
"output_tokens",
127+
]
122128
}
123129

124130
if hasattr(event, "content") and event.content:
@@ -130,6 +136,7 @@ def generator():
130136
end_time = time.time()
131137
latency = end_time - start_time
132138
output = "".join(accumulated_content)
139+
133140
self._capture_streaming_event(
134141
posthog_distinct_id,
135142
posthog_trace_id,

posthog/ai/utils.py

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,23 @@ def get_model_params(kwargs: Dict[str, Any]) -> Dict[str, Any]:
2828
return model_params
2929

3030

31+
def get_usage(response, provider: str) -> Dict[str, Any]:
32+
if provider == "anthropic":
33+
return {
34+
"input_tokens": response.usage.input_tokens,
35+
"output_tokens": response.usage.output_tokens,
36+
}
37+
elif provider == "openai":
38+
return {
39+
"input_tokens": response.usage.prompt_tokens,
40+
"output_tokens": response.usage.completion_tokens,
41+
}
42+
return {
43+
"input_tokens": 0,
44+
"output_tokens": 0,
45+
}
46+
47+
3148
def format_response(response, provider: str):
3249
"""
3350
Format a regular (non-streaming) response.
@@ -103,19 +120,17 @@ def call_llm_and_track_usage(
103120
posthog_trace_id = uuid.uuid4()
104121

105122
if response and hasattr(response, "usage"):
106-
usage = response.usage.model_dump()
123+
usage = get_usage(response, provider)
107124

108-
input_tokens = usage.get("prompt_tokens", 0)
109-
output_tokens = usage.get("completion_tokens", 0)
110125
event_properties = {
111126
"$ai_provider": provider,
112127
"$ai_model": kwargs.get("model"),
113128
"$ai_model_parameters": get_model_params(kwargs),
114129
"$ai_input": with_privacy_mode(ph_client, posthog_privacy_mode, kwargs.get("messages")),
115130
"$ai_output_choices": with_privacy_mode(ph_client, posthog_privacy_mode, format_response(response, provider)),
116131
"$ai_http_status": http_status,
117-
"$ai_input_tokens": input_tokens,
118-
"$ai_output_tokens": output_tokens,
132+
"$ai_input_tokens": usage.get("input_tokens", 0),
133+
"$ai_output_tokens": usage.get("output_tokens", 0),
119134
"$ai_latency": latency,
120135
"$ai_trace_id": posthog_trace_id,
121136
"$ai_base_url": str(base_url),
@@ -171,19 +186,17 @@ async def call_llm_and_track_usage_async(
171186
posthog_trace_id = uuid.uuid4()
172187

173188
if response and hasattr(response, "usage"):
174-
usage = response.usage.model_dump()
189+
usage = get_usage(response, provider)
175190

176-
input_tokens = usage.get("prompt_tokens", 0)
177-
output_tokens = usage.get("completion_tokens", 0)
178191
event_properties = {
179192
"$ai_provider": provider,
180193
"$ai_model": kwargs.get("model"),
181194
"$ai_model_parameters": get_model_params(kwargs),
182195
"$ai_input": with_privacy_mode(ph_client, posthog_privacy_mode, kwargs.get("messages")),
183196
"$ai_output_choices": with_privacy_mode(ph_client, posthog_privacy_mode, format_response(response, provider)),
184197
"$ai_http_status": http_status,
185-
"$ai_input_tokens": input_tokens,
186-
"$ai_output_tokens": output_tokens,
198+
"$ai_input_tokens": usage.get("input_tokens", 0),
199+
"$ai_output_tokens": usage.get("output_tokens", 0),
187200
"$ai_latency": latency,
188201
"$ai_trace_id": posthog_trace_id,
189202
"$ai_base_url": str(base_url),

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 28 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from unittest.mock import patch
33

44
import pytest
5-
from anthropic.types import Message, Usage, MessageStreamEvent
5+
from anthropic.types import Message, Usage
66

77
from posthog.ai.anthropic import Anthropic
88

@@ -33,26 +33,23 @@ def mock_anthropic_response():
3333

3434
@pytest.fixture
3535
def mock_anthropic_stream():
36-
yield from [
37-
MessageStreamEvent(
38-
id="msg_123",
39-
type="message",
40-
role="assistant",
41-
content="A",
42-
),
43-
MessageStreamEvent(
44-
id="msg_123",
45-
type="message",
46-
role="assistant",
47-
content="B",
48-
),
49-
MessageStreamEvent(
50-
id="msg_123",
51-
type="message",
52-
role="assistant",
53-
content="C",
54-
),
55-
]
36+
class MockStreamEvent:
37+
def __init__(self, content, usage=None):
38+
self.content = content
39+
self.usage = usage
40+
41+
def stream_generator():
42+
yield MockStreamEvent("A")
43+
yield MockStreamEvent("B")
44+
yield MockStreamEvent(
45+
"C",
46+
usage=Usage(
47+
input_tokens=20,
48+
output_tokens=10,
49+
),
50+
)
51+
52+
return stream_generator()
5653

5754

5855
def test_basic_completion(mock_client, mock_anthropic_response):
@@ -101,6 +98,9 @@ def test_streaming(mock_client, mock_anthropic_stream):
10198
assert chunks[0].content == "A"
10299
assert chunks[1].content == "B"
103100
assert chunks[2].content == "C"
101+
102+
# Wait a bit to ensure the capture is called
103+
time.sleep(0.1)
104104
assert mock_client.capture.call_count == 1
105105

106106
call_args = mock_client.capture.call_args[1]
@@ -111,12 +111,11 @@ def test_streaming(mock_client, mock_anthropic_stream):
111111
assert props["$ai_provider"] == "anthropic"
112112
assert props["$ai_model"] == "claude-3-opus-20240229"
113113
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
114-
assert props["$ai_output_choices"] == [{"role": "assistant", "content": "Test response"}]
114+
assert props["$ai_output_choices"] == [{"role": "assistant", "content": "ABC"}]
115115
assert props["$ai_input_tokens"] == 20
116116
assert props["$ai_output_tokens"] == 10
117-
assert props["$ai_http_status"] == 200
118-
assert props["foo"] == "bar"
119117
assert isinstance(props["$ai_latency"], float)
118+
assert props["foo"] == "bar"
120119

121120

122121
def test_streaming_with_stream_endpoint(mock_client, mock_anthropic_stream):
@@ -135,6 +134,9 @@ def test_streaming_with_stream_endpoint(mock_client, mock_anthropic_stream):
135134
assert chunks[0].content == "A"
136135
assert chunks[1].content == "B"
137136
assert chunks[2].content == "C"
137+
138+
# Wait a bit to ensure the capture is called
139+
time.sleep(0.1)
138140
assert mock_client.capture.call_count == 1
139141

140142
call_args = mock_client.capture.call_args[1]
@@ -145,12 +147,11 @@ def test_streaming_with_stream_endpoint(mock_client, mock_anthropic_stream):
145147
assert props["$ai_provider"] == "anthropic"
146148
assert props["$ai_model"] == "claude-3-opus-20240229"
147149
assert props["$ai_input"] == [{"role": "user", "content": "Hello"}]
148-
assert props["$ai_output_choices"] == [{"role": "assistant", "content": "Test response"}]
150+
assert props["$ai_output_choices"] == [{"role": "assistant", "content": "ABC"}]
149151
assert props["$ai_input_tokens"] == 20
150152
assert props["$ai_output_tokens"] == 10
151-
assert props["$ai_http_status"] == 200
152-
assert props["foo"] == "bar"
153153
assert isinstance(props["$ai_latency"], float)
154+
assert props["foo"] == "bar"
154155

155156

156157
def test_groups(mock_client, mock_anthropic_response):

posthog/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
VERSION = "3.8.3"
1+
VERSION = "3.8.4"
22

33
if __name__ == "__main__":
44
print(VERSION, end="") # noqa: T201

0 commit comments

Comments
 (0)