99
1010from .utils import verify_metrics
1111
12+ image_content_block = {
13+ "type" : "image" ,
14+ "source" : {
15+ "type" : "base64" ,
16+ "media_type" : "image/jpeg" ,
17+ "data" : base64 .b64encode (
18+ open (
19+ Path (__file__ ).parent .joinpath ("data/logo.jpg" ),
20+ "rb" ,
21+ ).read ()
22+ ).decode ("utf-8" ),
23+ },
24+ }
25+
1226
1327@pytest .mark .vcr
1428def test_anthropic_message_create (exporter , reader ):
@@ -76,19 +90,7 @@ def test_anthropic_multi_modal(exporter):
7690 "type" : "text" ,
7791 "text" : "What do you see?" ,
7892 },
79- {
80- "type" : "image" ,
81- "source" : {
82- "type" : "base64" ,
83- "media_type" : "image/jpeg" ,
84- "data" : base64 .b64encode (
85- open (
86- Path (__file__ ).parent .joinpath ("data/logo.jpg" ),
87- "rb" ,
88- ).read ()
89- ).decode ("utf-8" ),
90- },
91- },
93+ image_content_block ,
9294 ],
9395 },
9496 ],
@@ -129,6 +131,87 @@ def test_anthropic_multi_modal(exporter):
129131 )
130132
131133
134+ @pytest .mark .vcr
135+ def test_anthropic_image_with_history (exporter ):
136+ client = Anthropic ()
137+ system_message = "You are a helpful assistant. Be concise and to the point."
138+ user_message1 = {
139+ "role" : "user" ,
140+ "content" : "Are you capable of describing an image?"
141+ }
142+ user_message2 = {
143+ "role" : "user" ,
144+ "content" : [
145+ {"type" : "text" , "text" : "What do you see?" },
146+ image_content_block ,
147+ ]
148+ }
149+
150+ response1 = client .messages .create (
151+ max_tokens = 1024 ,
152+ model = "claude-3-5-haiku-latest" ,
153+ system = system_message ,
154+ messages = [
155+ user_message1 ,
156+ ],
157+ )
158+
159+ response2 = client .messages .create (
160+ max_tokens = 1024 ,
161+ model = "claude-3-5-haiku-latest" ,
162+ system = system_message ,
163+ messages = [
164+ user_message1 ,
165+ {"role" : "assistant" , "content" : response1 .content },
166+ user_message2 ,
167+ ],
168+ )
169+
170+ spans = exporter .get_finished_spans ()
171+ assert all (span .name == "anthropic.chat" for span in spans )
172+ assert (
173+ spans [0 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .0.content" ] ==
174+ system_message
175+ )
176+ assert spans [0 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .0.role" ] == "system"
177+ assert (
178+ spans [0 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .1.content" ]
179+ == "Are you capable of describing an image?"
180+ )
181+ assert spans [0 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .1.role" ] == "user"
182+ assert spans [0 ].attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .0.content" ] == response1 .content [0 ].text
183+ assert spans [0 ].attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .0.role" ] == "assistant"
184+ assert (
185+ spans [0 ].attributes .get ("gen_ai.response.id" )
186+ == "msg_01Ctc62hUPvikvYASXZqTo9q"
187+ )
188+
189+ assert (
190+ spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .0.content" ] ==
191+ system_message
192+ )
193+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .0.role" ] == "system"
194+ assert (
195+ spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .1.content" ]
196+ == "Are you capable of describing an image?"
197+ )
198+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .1.role" ] == "user"
199+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .2.content" ] == response1 .content [0 ].text
200+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .2.role" ] == "assistant"
201+ assert json .loads (spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .3.content" ]) == [
202+ {"type" : "text" , "text" : "What do you see?" },
203+ {"type" : "image_url" , "image_url" : {"url" : "/some/url" }},
204+ ]
205+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_PROMPTS } .3.role" ] == "user"
206+
207+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .0.content" ] == response2 .content [0 ].text
208+ assert spans [1 ].attributes [f"{ SpanAttributes .LLM_COMPLETIONS } .0.role" ] == "assistant"
209+ assert (
210+ spans [1 ].attributes .get ("gen_ai.response.id" )
211+ == "msg_01EtAvxHCWn5jjdUCnG4wEAd"
212+ )
213+
214+
132215@pytest .mark .vcr
133216@pytest .mark .asyncio
134217async def test_anthropic_async_multi_modal (exporter ):
@@ -143,19 +226,7 @@ async def test_anthropic_async_multi_modal(exporter):
143226 "type" : "text" ,
144227 "text" : "What do you see?" ,
145228 },
146- {
147- "type" : "image" ,
148- "source" : {
149- "type" : "base64" ,
150- "media_type" : "image/jpeg" ,
151- "data" : base64 .b64encode (
152- open (
153- Path (__file__ ).parent .joinpath ("data/logo.jpg" ),
154- "rb" ,
155- ).read ()
156- ).decode ("utf-8" ),
157- },
158- },
229+ image_content_block ,
159230 ],
160231 },
161232 ],
0 commit comments