Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .size-limit.js
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ module.exports = [
import: createImport('init'),
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: true,
limit: '162 KB',
limit: '163 KB',
},
{
name: '@sentry/node - without tracing',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ export default Sentry.withSentry(
callbacks: [callbackHandler],
});

// Test 2: Chain invocation
// Test 2: Chain invocation (without tool calls)
const chain = new MockChain('my_test_chain');
await chain.invoke(
{ input: 'test input' },
Expand All @@ -44,6 +44,15 @@ export default Sentry.withSentry(
callbacks: [callbackHandler],
});

// Test 4: Chain invocation with tool calls (tool_calls captured regardless of recordOutputs)
const chainWithToolCalls = new MockChain('chain_with_tool_calls', { includeToolCalls: true });
await chainWithToolCalls.invoke(
{ input: 'test input for tool calls' },
{
callbacks: [callbackHandler],
},
);

return new Response(JSON.stringify({ success: true }));
},
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,11 @@ export class MockChatModel {
// Mock LangChain Chain
export class MockChain {
private _name: string;
private _includeToolCalls: boolean;

public constructor(name: string) {
public constructor(name: string, options?: { includeToolCalls?: boolean }) {
this._name = name;
this._includeToolCalls = options?.includeToolCalls ?? false;
}

public async invoke(
Expand All @@ -151,7 +153,16 @@ export class MockChain {
}
}

const outputs = { result: 'Chain execution completed!' };
const outputs = this._includeToolCalls
? {
result: 'Chain execution completed!',
messages: [
{
tool_calls: [{ name: 'search_tool', args: { query: 'test query' } }],
},
],
}
: { result: 'Chain execution completed!' };

// Call handleChainEnd
for (const callback of callbacks) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,18 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal }
op: 'gen_ai.execute_tool',
origin: 'auto.ai.langchain',
}),
// Chain span with tool calls (captured regardless of recordOutputs)
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.ai.langchain',
'sentry.op': 'gen_ai.invoke_agent',
'langchain.chain.name': 'chain_with_tool_calls',
'gen_ai.response.tool_calls': expect.stringContaining('search_tool'),
}),
description: 'chain chain_with_tool_calls',
op: 'gen_ai.invoke_agent',
origin: 'auto.ai.langchain',
}),
]),
);
})
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import { RunnableLambda } from '@langchain/core/runnables';
import * as Sentry from '@sentry/node';

async function run() {
// Create callback handler - tool_calls are captured regardless of recordOutputs
const callbackHandler = Sentry.createLangChainCallbackHandler({
recordInputs: false,
recordOutputs: false,
});

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
// Test 1: Chain without tool calls
const simpleChain = new RunnableLambda({
func: input => {
return { result: `Processed: ${input.query}` };
},
}).withConfig({ runName: 'simple_chain' });

await simpleChain.invoke(
{ query: 'Hello world' },
{
callbacks: [callbackHandler],
},
);

// Test 2: Chain with tool calls in output
const chainWithToolCalls = new RunnableLambda({
func: input => {
return {
result: `Processed with tools: ${input.query}`,
messages: [
{
role: 'assistant',
content: 'I will use the search tool',
tool_calls: [
{
name: 'search',
args: { query: input.query },
id: 'tool_call_123',
},
{
name: 'calculator',
args: { expression: '2+2' },
id: 'tool_call_456',
},
],
},
],
};
},
}).withConfig({ runName: 'chain_with_tool_calls' });

await chainWithToolCalls.invoke(
{ query: 'Search for something' },
{
callbacks: [callbackHandler],
},
);

// Test 3: Chain with direct tool_calls on output (alternative format)
const chainWithDirectToolCalls = new RunnableLambda({
func: input => {
return {
result: `Direct tool calls: ${input.query}`,
tool_calls: [
{
name: 'weather',
args: { location: 'San Francisco' },
id: 'tool_call_789',
},
],
};
},
}).withConfig({ runName: 'chain_with_direct_tool_calls' });

await chainWithDirectToolCalls.invoke(
{ query: 'Get weather' },
{
callbacks: [callbackHandler],
},
);
});

await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,54 @@ describe('LangChain integration', () => {
},
);

const EXPECTED_TRANSACTION_CHAIN_TOOL_CALLS = {
transaction: 'main',
spans: expect.arrayContaining([
// Simple chain without tool calls
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.ai.langchain',
'sentry.op': 'gen_ai.invoke_agent',
}),
op: 'gen_ai.invoke_agent',
origin: 'auto.ai.langchain',
status: 'ok',
}),
// Chain with tool calls in messages format (captured regardless of recordOutputs)
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.ai.langchain',
'sentry.op': 'gen_ai.invoke_agent',
'gen_ai.response.tool_calls': expect.stringContaining('search'),
}),
op: 'gen_ai.invoke_agent',
origin: 'auto.ai.langchain',
status: 'ok',
}),
// Chain with direct tool_calls on output (captured regardless of recordOutputs)
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.ai.langchain',
'sentry.op': 'gen_ai.invoke_agent',
'gen_ai.response.tool_calls': expect.stringContaining('weather'),
}),
op: 'gen_ai.invoke_agent',
origin: 'auto.ai.langchain',
status: 'ok',
}),
]),
};

createEsmAndCjsTests(__dirname, 'scenario-chain-tool-calls.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates langchain chain spans with tool calls', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_CHAIN_TOOL_CALLS })
.start()
.completed();
});
});

createEsmAndCjsTests(
__dirname,
'scenario-openai-before-langchain.mjs',
Expand Down
14 changes: 13 additions & 1 deletion packages/core/src/tracing/langchain/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '
import { SPAN_STATUS_ERROR } from '../../tracing';
import { startSpanManual } from '../../tracing/trace';
import type { Span, SpanAttributeValue } from '../../types-hoist/span';
import { GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE } from '../ai/gen-ai-attributes';
import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
} from '../ai/gen-ai-attributes';
import { LANGCHAIN_ORIGIN } from './constants';
import type {
LangChainCallbackHandler,
Expand All @@ -16,6 +20,7 @@ import {
extractChatModelRequestAttributes,
extractLLMRequestAttributes,
extractLlmResponseAttributes,
extractToolCallsFromChainOutput,
getInvocationParams,
} from './utils';

Expand Down Expand Up @@ -216,6 +221,13 @@ export function createLangChainCallbackHandler(options: LangChainOptions = {}):
'langchain.chain.outputs': JSON.stringify(outputs),
});
}

// Tool calls metadata (names, IDs) are not PII, so capture them regardless of recordOutputs
const toolCalls = extractToolCallsFromChainOutput(outputs);
if (toolCalls && toolCalls.length > 0) {
span.setAttribute(GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, JSON.stringify(toolCalls));
}

exitSpan(runId);
}
},
Expand Down
22 changes: 22 additions & 0 deletions packages/core/src/tracing/langchain/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,28 @@ function addToolCallsAttributes(generations: LangChainMessage[][], attrs: Record
}
}

/**
* Extracts tool calls from chain outputs.
* Handles: { messages: [{ tool_calls }] }, { output: { messages } }, { tool_calls }
*/
export function extractToolCallsFromChainOutput(outputs: unknown): unknown[] | null {
if (!outputs || typeof outputs !== 'object') return null;

const toolCalls: unknown[] = [];
const out = outputs as Record<string, unknown>;
const messages = out.messages ?? (out.output as Record<string, unknown> | undefined)?.messages;

if (Array.isArray(messages)) {
for (const msg of messages) {
const calls = (msg as Record<string, unknown> | null)?.tool_calls;
if (Array.isArray(calls)) toolCalls.push(...calls);
}
}
if (Array.isArray(out.tool_calls)) toolCalls.push(...out.tool_calls);

return toolCalls.length > 0 ? toolCalls : null;
}

/**
* Adds token usage attributes, supporting both OpenAI (`tokenUsage`) and Anthropic (`usage`) formats.
* - Preserve zero values (0 tokens) by avoiding truthy checks.
Expand Down
Loading