Skip to content

Commit 09b9b5d

Browse files
authored
bug(llmo): fix anthropic tool call response (#297)
* bug(llmo): fix anthropic's tool call response * bug(llmo): fix tool calls response handling for anthropic * bug(llmo): run formatter * bug(llmo): bump version * bug(llmo): add date to changelog
1 parent 5a52af6 commit 09b9b5d

File tree

4 files changed

+98
-4
lines changed

4 files changed

+98
-4
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# 6.3.2 - 2025-07-31
2+
3+
- fix: Anthropic's tool calls are now handled properly
4+
15
# 6.3.0 - 2025-07-22
26

37
- feat: Enhanced `send_feature_flags` parameter to accept `SendFeatureFlagsOptions` object for declarative control over local/remote evaluation and custom properties

posthog/ai/utils.py

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,12 @@ def format_response(response, provider: str):
118118
def format_response_anthropic(response):
119119
output = []
120120
for choice in response.content:
121-
if choice.text:
121+
if (
122+
hasattr(choice, "type")
123+
and choice.type == "text"
124+
and hasattr(choice, "text")
125+
and choice.text
126+
):
122127
output.append(
123128
{
124129
"role": "assistant",
@@ -225,8 +230,21 @@ def format_response_gemini(response):
225230

226231
def format_tool_calls(response, provider: str):
227232
if provider == "anthropic":
228-
if hasattr(response, "tools") and response.tools and len(response.tools) > 0:
229-
return response.tools
233+
if hasattr(response, "content") and response.content:
234+
tool_calls = []
235+
236+
for content_item in response.content:
237+
if hasattr(content_item, "type") and content_item.type == "tool_use":
238+
tool_calls.append(
239+
{
240+
"type": content_item.type,
241+
"id": content_item.id,
242+
"name": content_item.name,
243+
"input": content_item.input,
244+
}
245+
)
246+
247+
return tool_calls if tool_calls else None
230248
elif provider == "openai":
231249
# Handle both Chat Completions and Responses API
232250
if hasattr(response, "choices") and response.choices:
@@ -378,6 +396,7 @@ def call_llm_and_track_usage(
378396
}
379397

380398
tool_calls = format_tool_calls(response, provider)
399+
381400
if tool_calls:
382401
event_properties["$ai_tools"] = with_privacy_mode(
383402
ph_client, posthog_privacy_mode, tool_calls

posthog/test/ai/anthropic/test_anthropic.py

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,31 @@ def mock_anthropic_response_with_cached_tokens():
8888
)
8989

9090

91+
@pytest.fixture
92+
def mock_anthropic_response_with_tool_use():
93+
return Message(
94+
id="msg_123",
95+
type="message",
96+
role="assistant",
97+
content=[
98+
{"type": "text", "text": "I'll help you with that."},
99+
{
100+
"type": "tool_use",
101+
"id": "tool_1",
102+
"name": "get_weather",
103+
"input": {"location": "New York"},
104+
},
105+
],
106+
model="claude-3-opus-20240229",
107+
usage=Usage(
108+
input_tokens=20,
109+
output_tokens=10,
110+
),
111+
stop_reason="end_turn",
112+
stop_sequence=None,
113+
)
114+
115+
91116
def test_basic_completion(mock_client, mock_anthropic_response):
92117
with patch(
93118
"anthropic.resources.Messages.create", return_value=mock_anthropic_response
@@ -434,3 +459,49 @@ def test_cached_tokens(mock_client, mock_anthropic_response_with_cached_tokens):
434459
assert props["$ai_http_status"] == 200
435460
assert props["foo"] == "bar"
436461
assert isinstance(props["$ai_latency"], float)
462+
463+
464+
def test_tool_use_response(mock_client, mock_anthropic_response_with_tool_use):
465+
with patch(
466+
"anthropic.resources.Messages.create",
467+
return_value=mock_anthropic_response_with_tool_use,
468+
):
469+
client = Anthropic(api_key="test-key", posthog_client=mock_client)
470+
response = client.messages.create(
471+
model="claude-3-opus-20240229",
472+
messages=[{"role": "user", "content": "What's the weather like?"}],
473+
posthog_distinct_id="test-id",
474+
posthog_properties={"foo": "bar"},
475+
)
476+
477+
assert response == mock_anthropic_response_with_tool_use
478+
assert mock_client.capture.call_count == 1
479+
480+
call_args = mock_client.capture.call_args[1]
481+
props = call_args["properties"]
482+
483+
assert call_args["distinct_id"] == "test-id"
484+
assert call_args["event"] == "$ai_generation"
485+
assert props["$ai_provider"] == "anthropic"
486+
assert props["$ai_model"] == "claude-3-opus-20240229"
487+
assert props["$ai_input"] == [
488+
{"role": "user", "content": "What's the weather like?"}
489+
]
490+
# Should only include text content, not tool_use content
491+
assert props["$ai_output_choices"] == [
492+
{"role": "assistant", "content": "I'll help you with that."}
493+
]
494+
assert props["$ai_input_tokens"] == 20
495+
assert props["$ai_output_tokens"] == 10
496+
assert props["$ai_http_status"] == 200
497+
assert props["foo"] == "bar"
498+
assert isinstance(props["$ai_latency"], float)
499+
# Verify that tools are captured separately
500+
assert props["$ai_tools"] == [
501+
{
502+
"type": "tool_use",
503+
"id": "tool_1",
504+
"name": "get_weather",
505+
"input": {"location": "New York"},
506+
}
507+
]

posthog/version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
VERSION = "6.3.1"
1+
VERSION = "6.3.2"
22

33
if __name__ == "__main__":
44
print(VERSION, end="") # noqa: T201

0 commit comments

Comments
 (0)