Skip to content

Commit 572a238

Browse files
authored
fix(langchain): add tool call ids to tool message in history (#3033)
1 parent cef9bc9 commit 572a238

File tree

3 files changed

+161
-0
lines changed

3 files changed

+161
-0
lines changed

packages/opentelemetry-instrumentation-langchain/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,14 @@ def _set_chat_request(
193193
f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
194194
content,
195195
)
196+
197+
if msg.type == "tool" and hasattr(msg, "tool_call_id"):
198+
_set_span_attribute(
199+
span,
200+
f"{SpanAttributes.LLM_PROMPTS}.{i}.tool_call_id",
201+
msg.tool_call_id,
202+
)
203+
196204
i += 1
197205

198206

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
interactions:
2+
- request:
3+
body: '{"messages": [{"content": "Use the tool", "role": "user"}, {"content":
4+
null, "role": "assistant", "tool_calls": [{"type": "function", "id": "call_12345",
5+
"function": {"name": "sample_tool", "arguments": "{\"query\": \"test\"}"}}]},
6+
{"content": "Tool executed successfully", "role": "tool", "tool_call_id": "call_12345"}],
7+
"model": "gpt-4.1-nano", "stream": false, "temperature": 0.0, "tools": [{"type":
8+
"function", "function": {"name": "sample_tool", "description": "", "parameters":
9+
{"properties": {"query": {"type": "string"}}, "required": ["query"], "type":
10+
"object"}}}]}'
11+
headers:
12+
accept:
13+
- application/json
14+
accept-encoding:
15+
- gzip, deflate
16+
connection:
17+
- keep-alive
18+
content-length:
19+
- '574'
20+
content-type:
21+
- application/json
22+
host:
23+
- api.openai.com
24+
traceparent:
25+
- 00-31a4e7bb03dc5fd318421e146e321b4a-9b8e6d172d9024b8-01
26+
user-agent:
27+
- OpenAI/Python 1.59.9
28+
x-stainless-arch:
29+
- arm64
30+
x-stainless-async:
31+
- 'false'
32+
x-stainless-lang:
33+
- python
34+
x-stainless-os:
35+
- MacOS
36+
x-stainless-package-version:
37+
- 1.59.9
38+
x-stainless-retry-count:
39+
- '0'
40+
x-stainless-runtime:
41+
- CPython
42+
x-stainless-runtime-version:
43+
- 3.9.6
44+
method: POST
45+
uri: https://api.openai.com/v1/chat/completions
46+
response:
47+
body:
48+
string: !!binary |
49+
H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nV4x8bqqkH2y3FyRORQtCQkhIRavIsSeJWcdj/AEtq/53
50+
lKRtUmClvfgwb97zvDfznAAwJdkWmGh4EK3V6bunz793mdzX5QPeu48/NtWHuNs/HL7Sp3LPZh2D
51+
yu8owoU1F9RajUGRGWDhkAfsVPO7dbbKs+Vd3gMtSdQdrbYhXc3z1HBD6SJbrNNslearM70hJdCz
52+
LXxLAACe+7cb1Eg8sC1ks0ulRe95jWx7bQJgjnRXYdx75QM3gc1GUJAJaPrZvzQIgUhDwz2UiAbw
53+
gCIGlOCjEOh9FbU+zmFHv0BwA+9hkIQjRaiiCw26t1Nxh1X0vHNootYTgBtDgXcJ9bYez8jpakRT
54+
bR2V/i8qq5RRvikcck+mG9oHsqxHTwnAYx9YvMmAWUetDUWgJ+y/e7Me5Ni4pxHML2CgwPVY35xD
55+
vlUrJAautJ8EzgQXDcqROW6HR6loAiQTz/8O8z/twbcy9WvkR0AItAFlYR1KJW4Nj20Ouyt+qe2a
56+
cT8w8+h+KoFFUOi6PUiseNTDaTF/9AHbolKmRmedGu6rssVys1wt+aLaVCw5JX8AAAD//wMAd3Ex
57+
i24DAAA=
58+
headers:
59+
CF-RAY:
60+
- 952a1bd56803a387-LHR
61+
Connection:
62+
- keep-alive
63+
Content-Encoding:
64+
- gzip
65+
Content-Type:
66+
- application/json
67+
Date:
68+
- Fri, 20 Jun 2025 09:06:12 GMT
69+
Server:
70+
- cloudflare
71+
Set-Cookie:
72+
- __cf_bm=P6kTHNfsO0joHmAyW2DnN38mcTa5nOCl_Dp9WXbe5PQ-1750410372-1.0.1.1-kbHVJLkSG2HwTAEpKEZN4WQ.FdCtrsPFS9JbEXq6caWGD6sR_5DYzT5rLeqIDu1_eZCYowYHjz9hcdB4GP3CxSQU02RNgWpaGW4QosBZqL0;
73+
path=/; expires=Fri, 20-Jun-25 09:36:12 GMT; domain=.api.openai.com; HttpOnly;
74+
Secure; SameSite=None
75+
- _cfuvid=dIn5v7bqTmA8PJ8cgzwxPaS94RkxzCmwFDAFVjpexbc-1750410372155-0.0.1.1-604800000;
76+
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
77+
Transfer-Encoding:
78+
- chunked
79+
X-Content-Type-Options:
80+
- nosniff
81+
access-control-expose-headers:
82+
- X-Request-ID
83+
alt-svc:
84+
- h3=":443"; ma=86400
85+
cf-cache-status:
86+
- DYNAMIC
87+
openai-organization:
88+
- user-xzaeeoqlanzncr8pomspsknu
89+
openai-processing-ms:
90+
- '290'
91+
openai-version:
92+
- '2020-10-01'
93+
strict-transport-security:
94+
- max-age=31536000; includeSubDomains; preload
95+
x-envoy-upstream-service-time:
96+
- '294'
97+
x-ratelimit-limit-requests:
98+
- '30000'
99+
x-ratelimit-limit-tokens:
100+
- '150000000'
101+
x-ratelimit-remaining-requests:
102+
- '29999'
103+
x-ratelimit-remaining-tokens:
104+
- '149999985'
105+
x-ratelimit-reset-requests:
106+
- 2ms
107+
x-ratelimit-reset-tokens:
108+
- 0s
109+
x-request-id:
110+
- req_d38d63814dfeef272d7bcb6e7d8a4d89
111+
status:
112+
code: 200
113+
message: OK
114+
version: 1

packages/opentelemetry-instrumentation-langchain/tests/test_tool_calls.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def get_weather(location: str) -> str:
114114

115115
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.3.content"] == messages[3].content
116116
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.3.role"] == "tool"
117+
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.3.tool_call_id"] == messages[3].tool_call_id
117118

118119
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.4.content"] == messages[4].content
119120
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.4.role"] == "user"
@@ -271,6 +272,7 @@ def get_news(location: str) -> str:
271272

272273
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.role"] == "tool"
273274
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.content"] == messages[2].content
275+
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.tool_call_id"] == messages[2].tool_call_id
274276

275277
assert chat_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.role"] == "assistant"
276278
# Test that we write both the content and the tool calls
@@ -292,6 +294,43 @@ def get_news(location: str) -> str:
292294
)
293295

294296

297+
@pytest.mark.vcr
298+
def test_tool_message_with_tool_call_id(exporter):
299+
"""Test that tool_call_id is properly set in span attributes for ToolMessage."""
300+
def sample_tool(query: str) -> str:
301+
return "Tool response"
302+
303+
messages: list[BaseMessage] = [
304+
HumanMessage(content="Use the tool"),
305+
AIMessage(
306+
content="",
307+
tool_calls=[
308+
{
309+
"name": "sample_tool",
310+
"args": {"query": "test"},
311+
"id": "call_12345",
312+
"type": "tool_call",
313+
}
314+
],
315+
),
316+
ToolMessage(content="Tool executed successfully", tool_call_id="call_12345"),
317+
]
318+
319+
model = ChatOpenAI(model="gpt-4.1-nano", temperature=0)
320+
model_with_tools = model.bind_tools([sample_tool])
321+
model_with_tools.invoke(messages)
322+
spans = exporter.get_finished_spans()
323+
324+
assert len(spans) == 1
325+
chat_span = spans[0]
326+
assert chat_span.name == "ChatOpenAI.chat"
327+
328+
# Verify that the tool_call_id is properly set for the ToolMessage
329+
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.role"] == "tool"
330+
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.content"] == "Tool executed successfully"
331+
assert chat_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.2.tool_call_id"] == "call_12345"
332+
333+
295334
@pytest.mark.vcr
296335
def test_parallel_tool_calls(exporter):
297336
def get_weather(location: str) -> str:

0 commit comments

Comments
 (0)