Skip to content

Commit c2c1697

Browse files
authored
fix(anthropic): serialize assistant message pydantic models (#3041)
1 parent 45f4346 commit c2c1697

File tree

4 files changed

+344
-32
lines changed

4 files changed

+344
-32
lines changed

packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/__init__.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,10 @@
1717
from opentelemetry.instrumentation.anthropic.utils import (
1818
JSONEncoder,
1919
acount_prompt_tokens_from_request,
20+
count_prompt_tokens_from_request,
2021
dont_throw,
2122
error_metrics_attributes,
22-
count_prompt_tokens_from_request,
23+
model_as_dict,
2324
run_async,
2425
set_span_attribute,
2526
shared_metrics_attributes,
@@ -134,16 +135,16 @@ async def _dump_content(message_index, content, span):
134135
elif isinstance(content, list):
135136
# If the content is a list of text blocks, concatenate them.
136137
# This is more commonly used in prompt caching.
137-
if all([item.get("type") == "text" for item in content]):
138-
return "".join([item.get("text") for item in content])
138+
if all([model_as_dict(item).get("type") == "text" for item in content]):
139+
return "".join([model_as_dict(item).get("text") for item in content])
139140

140141
content = [
141142
(
142143
await _process_image_item(
143-
item, span.context.trace_id, span.context.span_id, message_index, j
144+
model_as_dict(item), span.context.trace_id, span.context.span_id, message_index, j
144145
)
145-
if _is_base64_image(item)
146-
else item
146+
if _is_base64_image(model_as_dict(item))
147+
else model_as_dict(item)
147148
)
148149
for j, item in enumerate(content)
149150
]

packages/opentelemetry-instrumentation-anthropic/opentelemetry/instrumentation/anthropic/utils.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,14 @@
44
import logging
55
import threading
66
import traceback
7+
from importlib.metadata import version
78
from opentelemetry import context as context_api
89
from opentelemetry.instrumentation.anthropic.config import Config
910
from opentelemetry.semconv_ai import SpanAttributes
1011

1112
GEN_AI_SYSTEM = "gen_ai.system"
1213
GEN_AI_SYSTEM_ANTHROPIC = "anthropic"
14+
_PYDANTIC_VERSION = version("pydantic")
1315

1416

1517
def set_span_attribute(span, name, value):
@@ -150,3 +152,17 @@ def default(self, o):
150152
logger = logging.getLogger(__name__)
151153
logger.debug("Failed to serialize object of type: %s", type(o).__name__)
152154
return ""
155+
156+
157+
def model_as_dict(model):
158+
if isinstance(model, dict):
159+
return model
160+
if _PYDANTIC_VERSION < "2.0.0" and hasattr(model, "dict"):
161+
return model.dict()
162+
if hasattr(model, "model_dump"):
163+
return model.model_dump()
164+
else:
165+
try:
166+
return dict(model)
167+
except Exception:
168+
return model

packages/opentelemetry-instrumentation-anthropic/tests/cassettes/test_messages/test_anthropic_image_with_history.yaml

Lines changed: 224 additions & 0 deletions
Large diffs are not rendered by default.

packages/opentelemetry-instrumentation-anthropic/tests/test_messages.py

Lines changed: 97 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,20 @@
99

1010
from .utils import verify_metrics
1111

12+
image_content_block = {
13+
"type": "image",
14+
"source": {
15+
"type": "base64",
16+
"media_type": "image/jpeg",
17+
"data": base64.b64encode(
18+
open(
19+
Path(__file__).parent.joinpath("data/logo.jpg"),
20+
"rb",
21+
).read()
22+
).decode("utf-8"),
23+
},
24+
}
25+
1226

1327
@pytest.mark.vcr
1428
def test_anthropic_message_create(exporter, reader):
@@ -76,19 +90,7 @@ def test_anthropic_multi_modal(exporter):
7690
"type": "text",
7791
"text": "What do you see?",
7892
},
79-
{
80-
"type": "image",
81-
"source": {
82-
"type": "base64",
83-
"media_type": "image/jpeg",
84-
"data": base64.b64encode(
85-
open(
86-
Path(__file__).parent.joinpath("data/logo.jpg"),
87-
"rb",
88-
).read()
89-
).decode("utf-8"),
90-
},
91-
},
93+
image_content_block,
9294
],
9395
},
9496
],
@@ -129,6 +131,87 @@ def test_anthropic_multi_modal(exporter):
129131
)
130132

131133

134+
@pytest.mark.vcr
135+
def test_anthropic_image_with_history(exporter):
136+
client = Anthropic()
137+
system_message = "You are a helpful assistant. Be concise and to the point."
138+
user_message1 = {
139+
"role": "user",
140+
"content": "Are you capable of describing an image?"
141+
}
142+
user_message2 = {
143+
"role": "user",
144+
"content": [
145+
{"type": "text", "text": "What do you see?"},
146+
image_content_block,
147+
]
148+
}
149+
150+
response1 = client.messages.create(
151+
max_tokens=1024,
152+
model="claude-3-5-haiku-latest",
153+
system=system_message,
154+
messages=[
155+
user_message1,
156+
],
157+
)
158+
159+
response2 = client.messages.create(
160+
max_tokens=1024,
161+
model="claude-3-5-haiku-latest",
162+
system=system_message,
163+
messages=[
164+
user_message1,
165+
{"role": "assistant", "content": response1.content},
166+
user_message2,
167+
],
168+
)
169+
170+
spans = exporter.get_finished_spans()
171+
assert all(span.name == "anthropic.chat" for span in spans)
172+
assert (
173+
spans[0].attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"] ==
174+
system_message
175+
)
176+
assert spans[0].attributes[f"{SpanAttributes.LLM_PROMPTS}.0.role"] == "system"
177+
assert (
178+
spans[0].attributes[f"{SpanAttributes.LLM_PROMPTS}.1.content"]
179+
== "Are you capable of describing an image?"
180+
)
181+
assert spans[0].attributes[f"{SpanAttributes.LLM_PROMPTS}.1.role"] == "user"
182+
assert spans[0].attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] == response1.content[0].text
183+
assert spans[0].attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.role"] == "assistant"
184+
assert (
185+
spans[0].attributes.get("gen_ai.response.id")
186+
== "msg_01Ctc62hUPvikvYASXZqTo9q"
187+
)
188+
189+
assert (
190+
spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"] ==
191+
system_message
192+
)
193+
assert spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.0.role"] == "system"
194+
assert (
195+
spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.1.content"]
196+
== "Are you capable of describing an image?"
197+
)
198+
assert spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.1.role"] == "user"
199+
assert spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.2.content"] == response1.content[0].text
200+
assert spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.2.role"] == "assistant"
201+
assert json.loads(spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.3.content"]) == [
202+
{"type": "text", "text": "What do you see?"},
203+
{"type": "image_url", "image_url": {"url": "/some/url"}},
204+
]
205+
assert spans[1].attributes[f"{SpanAttributes.LLM_PROMPTS}.3.role"] == "user"
206+
207+
assert spans[1].attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"] == response2.content[0].text
208+
assert spans[1].attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.role"] == "assistant"
209+
assert (
210+
spans[1].attributes.get("gen_ai.response.id")
211+
== "msg_01EtAvxHCWn5jjdUCnG4wEAd"
212+
)
213+
214+
132215
@pytest.mark.vcr
133216
@pytest.mark.asyncio
134217
async def test_anthropic_async_multi_modal(exporter):
@@ -143,19 +226,7 @@ async def test_anthropic_async_multi_modal(exporter):
143226
"type": "text",
144227
"text": "What do you see?",
145228
},
146-
{
147-
"type": "image",
148-
"source": {
149-
"type": "base64",
150-
"media_type": "image/jpeg",
151-
"data": base64.b64encode(
152-
open(
153-
Path(__file__).parent.joinpath("data/logo.jpg"),
154-
"rb",
155-
).read()
156-
).decode("utf-8"),
157-
},
158-
},
229+
image_content_block,
159230
],
160231
},
161232
],

0 commit comments

Comments
 (0)