diff --git a/dev-packages/node-integration-tests/package.json b/dev-packages/node-integration-tests/package.json index 79d933a3c525..0bc5b8b83b1c 100644 --- a/dev-packages/node-integration-tests/package.json +++ b/dev-packages/node-integration-tests/package.json @@ -31,6 +31,7 @@ "@hono/node-server": "^1.19.4", "@langchain/anthropic": "^0.3.10", "@langchain/core": "^0.3.28", + "@langchain/langgraph": "^0.2.32", "@nestjs/common": "^11", "@nestjs/core": "^11", "@nestjs/platform-express": "^11", diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs new file mode 100644 index 000000000000..be512ed2f773 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs @@ -0,0 +1,10 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs new file mode 100644 index 000000000000..06cc1a32e93e --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs @@ -0,0 +1,10 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/scenario-tools.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/scenario-tools.mjs new file mode 100644 index 000000000000..21110c337755 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/scenario-tools.mjs @@ -0,0 +1,164 @@ +import { tool } from '@langchain/core/tools'; +import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph'; +import { ToolNode } from '@langchain/langgraph/prebuilt'; +import * as Sentry from '@sentry/node'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'langgraph-tools-test' }, async () => { + // Define tools + const getWeatherTool = tool( + async ({ city }) => { + return JSON.stringify({ city, temperature: 72, condition: 'sunny' }); + }, + { + name: 'get_weather', + description: 'Get the current weather for a given city', + schema: z.object({ + city: z.string().describe('The city to get weather for'), + }), + }, + ); + + const getTimeTool = tool( + async () => { + return new Date().toISOString(); + }, + { + name: 'get_time', + description: 'Get the current time', + schema: z.object({}), + }, + ); + + const tools = [getWeatherTool, getTimeTool]; + const toolNode = new ToolNode(tools); + + // Define mock LLM function that returns without tool calls + const mockLlm = () => { + return { + messages: [ + { + role: 'assistant', + content: 'Response without calling tools', + response_metadata: { + model_name: 'gpt-4-0613', + finish_reason: 'stop', + tokenUsage: { + promptTokens: 25, + completionTokens: 15, + totalTokens: 40, + }, + }, + tool_calls: [], + }, + ], + }; + }; + + // Routing function - check if there are tool calls + const shouldContinue = state => { + const messages = state.messages; + const lastMessage = messages[messages.length - 1]; + + // If the last message has tool_calls, route to tools, otherwise end + if (lastMessage.tool_calls && lastMessage.tool_calls.length > 0) { + return 'tools'; + } + return END; + }; + + // Create graph with conditional edge to tools + const graph = new StateGraph(MessagesAnnotation) + .addNode('agent', mockLlm) + .addNode('tools', toolNode) + .addEdge(START, 'agent') + .addConditionalEdges('agent', shouldContinue, { + tools: 'tools', + [END]: END, + }) + .addEdge('tools', 'agent') + .compile({ name: 'tool_agent' }); + + // Simple invocation - won't call tools since mockLlm returns empty tool_calls + await graph.invoke({ + messages: [{ role: 'user', content: 'What is the weather?' }], + }); + + // Define mock LLM function that returns with tool calls + let callCount = 0; + const mockLlmWithTools = () => { + callCount++; + + // First call - return tool calls + if (callCount === 1) { + return { + messages: [ + { + role: 'assistant', + content: '', + response_metadata: { + model_name: 'gpt-4-0613', + finish_reason: 'tool_calls', + tokenUsage: { + promptTokens: 30, + completionTokens: 20, + totalTokens: 50, + }, + }, + tool_calls: [ + { + name: 'get_weather', + args: { city: 'San Francisco' }, + id: 'call_123', + type: 'tool_call', + }, + ], + }, + ], + }; + } + + // Second call - return final response after tool execution + return { + messages: [ + { + role: 'assistant', + content: 'Based on the weather data, it is sunny and 72 degrees in San Francisco.', + response_metadata: { + model_name: 'gpt-4-0613', + finish_reason: 'stop', + tokenUsage: { + promptTokens: 50, + completionTokens: 20, + totalTokens: 70, + }, + }, + tool_calls: [], + }, + ], + }; + }; + + // Create graph with tool calls enabled + const graphWithTools = new StateGraph(MessagesAnnotation) + .addNode('agent', mockLlmWithTools) + .addNode('tools', toolNode) + .addEdge(START, 'agent') + .addConditionalEdges('agent', shouldContinue, { + tools: 'tools', + [END]: END, + }) + .addEdge('tools', 'agent') + .compile({ name: 'tool_calling_agent' }); + + // Invocation that actually calls tools + await graphWithTools.invoke({ + messages: [{ role: 'user', content: 'What is the weather in San Francisco?' }], + }); + }); + + await Sentry.flush(2000); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/scenario.mjs new file mode 100644 index 000000000000..d93c4b5491c7 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/scenario.mjs @@ -0,0 +1,52 @@ +import { END, MessagesAnnotation, START, StateGraph } from '@langchain/langgraph'; +import * as Sentry from '@sentry/node'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'langgraph-test' }, async () => { + // Define a simple mock LLM function + const mockLlm = () => { + return { + messages: [ + { + role: 'assistant', + content: 'Mock LLM response', + response_metadata: { + model_name: 'mock-model', + finish_reason: 'stop', + tokenUsage: { + promptTokens: 20, + completionTokens: 10, + totalTokens: 30, + }, + }, + }, + ], + }; + }; + + // Create and compile the graph + const graph = new StateGraph(MessagesAnnotation) + .addNode('agent', mockLlm) + .addEdge(START, 'agent') + .addEdge('agent', END) + .compile({ name: 'weather_assistant' }); + + // Test: basic invocation + await graph.invoke({ + messages: [{ role: 'user', content: 'What is the weather today?' }], + }); + + // Test: invocation with multiple messages + await graph.invoke({ + messages: [ + { role: 'user', content: 'Hello' }, + { role: 'assistant', content: 'Hi there!' }, + { role: 'user', content: 'Tell me about the weather' }, + ], + }); + }); + + await Sentry.flush(2000); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts new file mode 100644 index 000000000000..6a67b5cd1e86 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts @@ -0,0 +1,208 @@ +import { afterAll, describe, expect } from 'vitest'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; + +describe('LangGraph integration', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = { + transaction: 'langgraph-test', + spans: expect.arrayContaining([ + // create_agent span + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'create_agent', + 'sentry.op': 'gen_ai.create_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'weather_assistant', + }, + description: 'create_agent weather_assistant', + op: 'gen_ai.create_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // First invoke_agent span + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'weather_assistant', + 'gen_ai.pipeline.name': 'weather_assistant', + }), + description: 'invoke_agent weather_assistant', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // Second invoke_agent span + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'weather_assistant', + 'gen_ai.pipeline.name': 'weather_assistant', + }), + description: 'invoke_agent weather_assistant', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { + transaction: 'langgraph-test', + spans: expect.arrayContaining([ + // create_agent span (PII enabled doesn't affect this span) + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'create_agent', + 'sentry.op': 'gen_ai.create_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'weather_assistant', + }, + description: 'create_agent weather_assistant', + op: 'gen_ai.create_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // First invoke_agent span with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'weather_assistant', + 'gen_ai.pipeline.name': 'weather_assistant', + 'gen_ai.request.messages': expect.stringContaining('What is the weather today?'), + }), + description: 'invoke_agent weather_assistant', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // Second invoke_agent span with PII and multiple messages + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'weather_assistant', + 'gen_ai.pipeline.name': 'weather_assistant', + 'gen_ai.request.messages': expect.stringContaining('Tell me about the weather'), + }), + description: 'invoke_agent weather_assistant', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + ]), + }; + + const EXPECTED_TRANSACTION_WITH_TOOLS = { + transaction: 'langgraph-tools-test', + spans: expect.arrayContaining([ + // create_agent span for first graph (no tool calls) + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'create_agent', + 'sentry.op': 'gen_ai.create_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'tool_agent', + }, + description: 'create_agent tool_agent', + op: 'gen_ai.create_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // invoke_agent span with tools available but not called + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'tool_agent', + 'gen_ai.pipeline.name': 'tool_agent', + 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), + 'gen_ai.request.messages': expect.stringContaining('What is the weather?'), + 'gen_ai.response.model': 'gpt-4-0613', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.text': expect.stringContaining('Response without calling tools'), + 'gen_ai.usage.input_tokens': 25, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 40, + }), + description: 'invoke_agent tool_agent', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // create_agent span for second graph (with tool calls) + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'create_agent', + 'sentry.op': 'gen_ai.create_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'tool_calling_agent', + }, + description: 'create_agent tool_calling_agent', + op: 'gen_ai.create_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + // invoke_agent span with tool calls and execution + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'invoke_agent', + 'sentry.op': 'gen_ai.invoke_agent', + 'sentry.origin': 'auto.ai.langgraph', + 'gen_ai.agent.name': 'tool_calling_agent', + 'gen_ai.pipeline.name': 'tool_calling_agent', + 'gen_ai.request.available_tools': expect.stringContaining('get_weather'), + 'gen_ai.request.messages': expect.stringContaining('San Francisco'), + 'gen_ai.response.model': 'gpt-4-0613', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.text': expect.stringMatching(/"role":"tool"/), + // Verify tool_calls are captured + 'gen_ai.response.tool_calls': expect.stringContaining('get_weather'), + 'gen_ai.usage.input_tokens': 80, + 'gen_ai.usage.output_tokens': 40, + 'gen_ai.usage.total_tokens': 120, + }), + description: 'invoke_agent tool_calling_agent', + op: 'gen_ai.invoke_agent', + origin: 'auto.ai.langgraph', + status: 'ok', + }), + ]), + }; + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => { + test('should instrument LangGraph with default PII settings', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }) + .start() + .completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => { + test('should instrument LangGraph with sendDefaultPii: true', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }) + .start() + .completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario-tools.mjs', 'instrument-with-pii.mjs', (createRunner, test) => { + test('should capture tools from LangGraph agent', { timeout: 30000 }, async () => { + await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_WITH_TOOLS }).start().completed(); + }); + }); +}); diff --git a/packages/astro/src/index.server.ts b/packages/astro/src/index.server.ts index 1774f597af43..2913022c816b 100644 --- a/packages/astro/src/index.server.ts +++ b/packages/astro/src/index.server.ts @@ -95,6 +95,7 @@ export { onUnhandledRejectionIntegration, openAIIntegration, langChainIntegration, + langGraphIntegration, parameterize, pinoIntegration, postgresIntegration, diff --git a/packages/aws-serverless/src/index.ts b/packages/aws-serverless/src/index.ts index 586babab40ee..6b36930265ca 100644 --- a/packages/aws-serverless/src/index.ts +++ b/packages/aws-serverless/src/index.ts @@ -58,6 +58,7 @@ export { onUnhandledRejectionIntegration, openAIIntegration, langChainIntegration, + langGraphIntegration, modulesIntegration, contextLinesIntegration, nodeContextIntegration, diff --git a/packages/bun/src/index.ts b/packages/bun/src/index.ts index 813e087dc2d2..d3d266b4dfc1 100644 --- a/packages/bun/src/index.ts +++ b/packages/bun/src/index.ts @@ -79,6 +79,7 @@ export { onUnhandledRejectionIntegration, openAIIntegration, langChainIntegration, + langGraphIntegration, modulesIntegration, contextLinesIntegration, nodeContextIntegration, diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index 504368cfb873..ed3dbe4750d7 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -152,6 +152,9 @@ export type { GoogleGenAIResponse } from './tracing/google-genai/types'; export { createLangChainCallbackHandler } from './tracing/langchain'; export { LANGCHAIN_INTEGRATION_NAME } from './tracing/langchain/constants'; export type { LangChainOptions, LangChainIntegration } from './tracing/langchain/types'; +export { instrumentStateGraphCompile } from './tracing/langgraph'; +export { LANGGRAPH_INTEGRATION_NAME } from './tracing/langgraph/constants'; +export type { LangGraphOptions, LangGraphIntegration, CompiledGraph } from './tracing/langgraph/types'; export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './tracing/openai/types'; export type { AnthropicAiClient, diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index 84efb21c1822..b07aa63d306f 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -134,6 +134,16 @@ export const GEN_AI_RESPONSE_STREAMING_ATTRIBUTE = 'gen_ai.response.streaming'; */ export const GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE = 'gen_ai.response.tool_calls'; +/** + * The agent name + */ +export const GEN_AI_AGENT_NAME_ATTRIBUTE = 'gen_ai.agent.name'; + +/** + * The pipeline name + */ +export const GEN_AI_PIPELINE_NAME_ATTRIBUTE = 'gen_ai.pipeline.name'; + /** * The number of cache creation input tokens used */ @@ -154,6 +164,11 @@ export const GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE = 'gen_ai.usage.inp */ export const GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE = 'gen_ai.usage.input_tokens.cached'; +/** + * The span operation name for invoking an agent + */ +export const GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE = 'gen_ai.invoke_agent'; + // ============================================================================= // OPENAI-SPECIFIC ATTRIBUTES // ============================================================================= diff --git a/packages/core/src/tracing/langgraph/constants.ts b/packages/core/src/tracing/langgraph/constants.ts new file mode 100644 index 000000000000..add875f7b655 --- /dev/null +++ b/packages/core/src/tracing/langgraph/constants.ts @@ -0,0 +1,2 @@ +export const LANGGRAPH_INTEGRATION_NAME = 'LangGraph'; +export const LANGGRAPH_ORIGIN = 'auto.ai.langgraph'; diff --git a/packages/core/src/tracing/langgraph/index.ts b/packages/core/src/tracing/langgraph/index.ts new file mode 100644 index 000000000000..65d315bf3f63 --- /dev/null +++ b/packages/core/src/tracing/langgraph/index.ts @@ -0,0 +1,157 @@ +import { captureException } from '../../exports'; +import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; +import { SPAN_STATUS_ERROR } from '../../tracing'; +import { + GEN_AI_AGENT_NAME_ATTRIBUTE, + GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_PIPELINE_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; +import { truncateGenAiMessages } from '../ai/messageTruncation'; +import type { LangChainMessage } from '../langchain/types'; +import { normalizeLangChainMessages } from '../langchain/utils'; +import { startSpan } from '../trace'; +import { LANGGRAPH_ORIGIN } from './constants'; +import type { CompiledGraph, LangGraphOptions } from './types'; +import { extractToolsFromCompiledGraph, setResponseAttributes } from './utils'; + +/** + * Instruments StateGraph's compile method to create spans for agent creation and invocation + * + * Wraps the compile() method to: + * - Create a `gen_ai.create_agent` span when compile() is called + * - Automatically wrap the invoke() method on the returned compiled graph with a `gen_ai.invoke_agent` span + * + */ +export function instrumentStateGraphCompile( + originalCompile: (...args: unknown[]) => CompiledGraph, + options: LangGraphOptions, +): (...args: unknown[]) => CompiledGraph { + return new Proxy(originalCompile, { + apply(target, thisArg, args: unknown[]): CompiledGraph { + return startSpan( + { + op: 'gen_ai.create_agent', + name: 'create_agent', + attributes: { + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGGRAPH_ORIGIN, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent', + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent', + }, + }, + span => { + try { + const compiledGraph = Reflect.apply(target, thisArg, args); + const compileOptions = args.length > 0 ? (args[0] as Record) : {}; + + // Extract graph name + if (compileOptions?.name && typeof compileOptions.name === 'string') { + span.setAttribute(GEN_AI_AGENT_NAME_ATTRIBUTE, compileOptions.name); + span.updateName(`create_agent ${compileOptions.name}`); + } + + // Instrument agent invoke method on the compiled graph + const originalInvoke = compiledGraph.invoke; + if (originalInvoke && typeof originalInvoke === 'function') { + compiledGraph.invoke = instrumentCompiledGraphInvoke( + originalInvoke.bind(compiledGraph) as (...args: unknown[]) => Promise, + compiledGraph, + compileOptions, + options, + ) as typeof originalInvoke; + } + + return compiledGraph; + } catch (error) { + span.setStatus({ code: SPAN_STATUS_ERROR, message: 'internal_error' }); + captureException(error, { + mechanism: { + handled: false, + type: 'auto.ai.langgraph.error', + }, + }); + throw error; + } + }, + ); + }, + }) as (...args: unknown[]) => CompiledGraph; +} + +/** + * Instruments CompiledGraph's invoke method to create spans for agent invocation + * + * Creates a `gen_ai.invoke_agent` span when invoke() is called + */ +function instrumentCompiledGraphInvoke( + originalInvoke: (...args: unknown[]) => Promise, + graphInstance: CompiledGraph, + compileOptions: Record, + options: LangGraphOptions, +): (...args: unknown[]) => Promise { + return new Proxy(originalInvoke, { + apply(target, thisArg, args: unknown[]): Promise { + return startSpan( + { + op: 'gen_ai.invoke_agent', + name: 'invoke_agent', + attributes: { + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: LANGGRAPH_ORIGIN, + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: GEN_AI_INVOKE_AGENT_OPERATION_ATTRIBUTE, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent', + }, + }, + async span => { + try { + const graphName = compileOptions?.name; + + if (graphName && typeof graphName === 'string') { + span.setAttribute(GEN_AI_PIPELINE_NAME_ATTRIBUTE, graphName); + span.setAttribute(GEN_AI_AGENT_NAME_ATTRIBUTE, graphName); + span.updateName(`invoke_agent ${graphName}`); + } + + // Extract available tools from the graph instance + const tools = extractToolsFromCompiledGraph(graphInstance); + if (tools) { + span.setAttribute(GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, JSON.stringify(tools)); + } + + // Parse input messages + const recordInputs = options.recordInputs; + const recordOutputs = options.recordOutputs; + const inputMessages = + args.length > 0 ? ((args[0] as { messages?: LangChainMessage[] }).messages ?? []) : []; + + if (inputMessages && recordInputs) { + const normalizedMessages = normalizeLangChainMessages(inputMessages); + const truncatedMessages = truncateGenAiMessages(normalizedMessages); + span.setAttribute(GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, JSON.stringify(truncatedMessages)); + } + + // Call original invoke + const result = await Reflect.apply(target, thisArg, args); + + // Set response attributes + if (recordOutputs) { + setResponseAttributes(span, inputMessages ?? null, result); + } + + return result; + } catch (error) { + span.setStatus({ code: SPAN_STATUS_ERROR, message: 'internal_error' }); + captureException(error, { + mechanism: { + handled: false, + type: 'auto.ai.langgraph.error', + }, + }); + throw error; + } + }, + ); + }, + }) as (...args: unknown[]) => Promise; +} diff --git a/packages/core/src/tracing/langgraph/types.ts b/packages/core/src/tracing/langgraph/types.ts new file mode 100644 index 000000000000..b16f9718c69e --- /dev/null +++ b/packages/core/src/tracing/langgraph/types.ts @@ -0,0 +1,85 @@ +export interface LangGraphOptions { + /** + * Enable or disable input recording. + */ + recordInputs?: boolean; + /** + * Enable or disable output recording. + */ + recordOutputs?: boolean; +} + +/** + * LangGraph Tool definition from lc_kwargs + */ +export interface LangGraphToolDefinition { + name?: string; + description?: string; + schema?: unknown; + func?: (...args: unknown[]) => unknown; +} + +/** + * LangGraph Tool object (DynamicTool, DynamicStructuredTool, etc.) + */ +export interface LangGraphTool { + [key: string]: unknown; + lc_kwargs?: LangGraphToolDefinition; + name?: string; + description?: string; +} + +/** + * LangGraph ToolNode with tools array + */ +export interface ToolNode { + [key: string]: unknown; + tools?: LangGraphTool[]; +} + +/** + * LangGraph PregelNode containing a ToolNode + */ +export interface PregelNode { + [key: string]: unknown; + runnable?: ToolNode; +} + +/** + * LangGraph StateGraph builder nodes + */ +export interface StateGraphNodes { + [key: string]: unknown; + tools?: PregelNode; +} + +/** + * LangGraph StateGraph builder + */ +export interface StateGraphBuilder { + [key: string]: unknown; + nodes?: StateGraphNodes; +} + +/** + * Basic interface for compiled graph + */ +export interface CompiledGraph { + [key: string]: unknown; + invoke?: (...args: unknown[]) => Promise; + name?: string; + graph_name?: string; + lc_kwargs?: { + [key: string]: unknown; + name?: string; + }; + builder?: StateGraphBuilder; +} + +/** + * LangGraph Integration interface for type safety + */ +export interface LangGraphIntegration { + name: string; + options: LangGraphOptions; +} diff --git a/packages/core/src/tracing/langgraph/utils.ts b/packages/core/src/tracing/langgraph/utils.ts new file mode 100644 index 000000000000..4b1990058924 --- /dev/null +++ b/packages/core/src/tracing/langgraph/utils.ts @@ -0,0 +1,187 @@ +import type { Span } from '../../types-hoist/span'; +import { + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; +import type { LangChainMessage } from '../langchain/types'; +import { normalizeLangChainMessages } from '../langchain/utils'; +import type { CompiledGraph, LangGraphTool } from './types'; + +/** + * Extract tool calls from messages + */ +export function extractToolCalls(messages: Array> | null): unknown[] | null { + if (!messages || messages.length === 0) { + return null; + } + + const toolCalls: unknown[] = []; + + for (const message of messages) { + if (message && typeof message === 'object') { + const msgToolCalls = message.tool_calls; + if (msgToolCalls && Array.isArray(msgToolCalls)) { + toolCalls.push(...msgToolCalls); + } + } + } + + return toolCalls.length > 0 ? toolCalls : null; +} + +/** + * Extract token usage from a message's usage_metadata or response_metadata + * Returns token counts without setting span attributes + */ +export function extractTokenUsageFromMessage(message: LangChainMessage): { + inputTokens: number; + outputTokens: number; + totalTokens: number; +} { + const msg = message as Record; + let inputTokens = 0; + let outputTokens = 0; + let totalTokens = 0; + + // Extract from usage_metadata (newer format) + if (msg.usage_metadata && typeof msg.usage_metadata === 'object') { + const usage = msg.usage_metadata as Record; + if (typeof usage.input_tokens === 'number') { + inputTokens = usage.input_tokens; + } + if (typeof usage.output_tokens === 'number') { + outputTokens = usage.output_tokens; + } + if (typeof usage.total_tokens === 'number') { + totalTokens = usage.total_tokens; + } + return { inputTokens, outputTokens, totalTokens }; + } + + // Fallback: Extract from response_metadata.tokenUsage + if (msg.response_metadata && typeof msg.response_metadata === 'object') { + const metadata = msg.response_metadata as Record; + if (metadata.tokenUsage && typeof metadata.tokenUsage === 'object') { + const tokenUsage = metadata.tokenUsage as Record; + if (typeof tokenUsage.promptTokens === 'number') { + inputTokens = tokenUsage.promptTokens; + } + if (typeof tokenUsage.completionTokens === 'number') { + outputTokens = tokenUsage.completionTokens; + } + if (typeof tokenUsage.totalTokens === 'number') { + totalTokens = tokenUsage.totalTokens; + } + } + } + + return { inputTokens, outputTokens, totalTokens }; +} + +/** + * Extract model and finish reason from a message's response_metadata + */ +export function extractModelMetadata(span: Span, message: LangChainMessage): void { + const msg = message as Record; + + if (msg.response_metadata && typeof msg.response_metadata === 'object') { + const metadata = msg.response_metadata as Record; + + if (metadata.model_name && typeof metadata.model_name === 'string') { + span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, metadata.model_name); + } + + if (metadata.finish_reason && typeof metadata.finish_reason === 'string') { + span.setAttribute(GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, [metadata.finish_reason]); + } + } +} + +/** + * Extract tools from compiled graph structure + * + * Tools are stored in: compiledGraph.builder.nodes.tools.runnable.tools + */ +export function extractToolsFromCompiledGraph(compiledGraph: CompiledGraph): unknown[] | null { + if (!compiledGraph.builder?.nodes?.tools?.runnable?.tools) { + return null; + } + + const tools = compiledGraph.builder?.nodes?.tools?.runnable?.tools; + + if (!tools || !Array.isArray(tools) || tools.length === 0) { + return null; + } + + // Extract name, description, and schema from each tool's lc_kwargs + return tools.map((tool: LangGraphTool) => ({ + name: tool.lc_kwargs?.name, + description: tool.lc_kwargs?.description, + schema: tool.lc_kwargs?.schema, + })); +} + +/** + * Set response attributes on the span + */ +export function setResponseAttributes(span: Span, inputMessages: LangChainMessage[] | null, result: unknown): void { + // Extract messages from result + const resultObj = result as { messages?: LangChainMessage[] } | undefined; + const outputMessages = resultObj?.messages; + + if (!outputMessages || !Array.isArray(outputMessages)) { + return; + } + + // Get new messages (delta between input and output) + const inputCount = inputMessages?.length ?? 0; + const newMessages = outputMessages.length > inputCount ? outputMessages.slice(inputCount) : []; + + if (newMessages.length === 0) { + return; + } + + // Extract and set tool calls from new messages BEFORE normalization + // (normalization strips tool_calls, so we need to extract them first) + const toolCalls = extractToolCalls(newMessages as Array>); + if (toolCalls) { + span.setAttribute(GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, JSON.stringify(toolCalls)); + } + + // Normalize the new messages + const normalizedNewMessages = normalizeLangChainMessages(newMessages); + span.setAttribute(GEN_AI_RESPONSE_TEXT_ATTRIBUTE, JSON.stringify(normalizedNewMessages)); + + // Accumulate token usage across all messages + let totalInputTokens = 0; + let totalOutputTokens = 0; + let totalTokens = 0; + + // Extract metadata from messages + for (const message of newMessages) { + // Accumulate token usage + const tokens = extractTokenUsageFromMessage(message); + totalInputTokens += tokens.inputTokens; + totalOutputTokens += tokens.outputTokens; + totalTokens += tokens.totalTokens; + + // Extract model metadata (last message's metadata wins for model/finish_reason) + extractModelMetadata(span, message); + } + + // Set accumulated token usage on span + if (totalInputTokens > 0) { + span.setAttribute(GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, totalInputTokens); + } + if (totalOutputTokens > 0) { + span.setAttribute(GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, totalOutputTokens); + } + if (totalTokens > 0) { + span.setAttribute(GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, totalTokens); + } +} diff --git a/packages/google-cloud-serverless/src/index.ts b/packages/google-cloud-serverless/src/index.ts index 3aa7da1cbab9..d7cd08e5b14e 100644 --- a/packages/google-cloud-serverless/src/index.ts +++ b/packages/google-cloud-serverless/src/index.ts @@ -58,6 +58,7 @@ export { onUnhandledRejectionIntegration, openAIIntegration, langChainIntegration, + langGraphIntegration, modulesIntegration, contextLinesIntegration, nodeContextIntegration, diff --git a/packages/node/src/index.ts b/packages/node/src/index.ts index e469fd75d2d2..a03b31619472 100644 --- a/packages/node/src/index.ts +++ b/packages/node/src/index.ts @@ -28,6 +28,7 @@ export { openAIIntegration } from './integrations/tracing/openai'; export { anthropicAIIntegration } from './integrations/tracing/anthropic-ai'; export { googleGenAIIntegration } from './integrations/tracing/google-genai'; export { langChainIntegration } from './integrations/tracing/langchain'; +export { langGraphIntegration } from './integrations/tracing/langgraph'; export { launchDarklyIntegration, buildLaunchDarklyFlagUsedHandler, diff --git a/packages/node/src/integrations/tracing/index.ts b/packages/node/src/integrations/tracing/index.ts index b586941d6530..dcd2efa5595c 100644 --- a/packages/node/src/integrations/tracing/index.ts +++ b/packages/node/src/integrations/tracing/index.ts @@ -14,6 +14,7 @@ import { honoIntegration, instrumentHono } from './hono'; import { instrumentKafka, kafkaIntegration } from './kafka'; import { instrumentKoa, koaIntegration } from './koa'; import { instrumentLangChain, langChainIntegration } from './langchain'; +import { instrumentLangGraph, langGraphIntegration } from './langgraph'; import { instrumentLruMemoizer, lruMemoizerIntegration } from './lrumemoizer'; import { instrumentMongo, mongoIntegration } from './mongo'; import { instrumentMongoose, mongooseIntegration } from './mongoose'; @@ -54,6 +55,7 @@ export function getAutoPerformanceIntegrations(): Integration[] { // AI providers // LangChain must come first to disable AI provider integrations before they instrument langChainIntegration(), + langGraphIntegration(), vercelAIIntegration(), openAIIntegration(), anthropicAIIntegration(), @@ -98,5 +100,6 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) => instrumentFirebase, instrumentAnthropicAi, instrumentGoogleGenAI, + instrumentLangGraph, ]; } diff --git a/packages/node/src/integrations/tracing/langgraph/index.ts b/packages/node/src/integrations/tracing/langgraph/index.ts new file mode 100644 index 000000000000..c302582e5908 --- /dev/null +++ b/packages/node/src/integrations/tracing/langgraph/index.ts @@ -0,0 +1,88 @@ +import type { IntegrationFn, LangGraphOptions } from '@sentry/core'; +import { defineIntegration, LANGGRAPH_INTEGRATION_NAME } from '@sentry/core'; +import { generateInstrumentOnce } from '@sentry/node-core'; +import { SentryLangGraphInstrumentation } from './instrumentation'; + +export const instrumentLangGraph = generateInstrumentOnce( + LANGGRAPH_INTEGRATION_NAME, + options => new SentryLangGraphInstrumentation(options), +); + +const _langGraphIntegration = ((options: LangGraphOptions = {}) => { + return { + name: LANGGRAPH_INTEGRATION_NAME, + setupOnce() { + instrumentLangGraph(options); + }, + }; +}) satisfies IntegrationFn; + +/** + * Adds Sentry tracing instrumentation for LangGraph. + * + * This integration is enabled by default. + * + * When configured, this integration automatically instruments LangGraph StateGraph and compiled graph instances + * to capture telemetry data following OpenTelemetry Semantic Conventions for Generative AI. + * + * @example + * ```javascript + * import * as Sentry from '@sentry/node'; + * + * Sentry.init({ + * integrations: [Sentry.langGraphIntegration()], + * }); + * ``` + * + * ## Options + * + * - `recordInputs`: Whether to record input messages (default: respects `sendDefaultPii` client option) + * - `recordOutputs`: Whether to record response text (default: respects `sendDefaultPii` client option) + * + * ### Default Behavior + * + * By default, the integration will: + * - Record inputs and outputs ONLY if `sendDefaultPii` is set to `true` in your Sentry client options + * - Otherwise, inputs and outputs are NOT recorded unless explicitly enabled + * + * @example + * ```javascript + * // Record inputs and outputs when sendDefaultPii is false + * Sentry.init({ + * integrations: [ + * Sentry.langGraphIntegration({ + * recordInputs: true, + * recordOutputs: true + * }) + * ], + * }); + * + * // Never record inputs/outputs regardless of sendDefaultPii + * Sentry.init({ + * sendDefaultPii: true, + * integrations: [ + * Sentry.langGraphIntegration({ + * recordInputs: false, + * recordOutputs: false + * }) + * ], + * }); + * ``` + * + * ## Captured Operations + * + * The integration captures the following LangGraph operations: + * - **Agent Creation** (`StateGraph.compile()`) - Creates a `gen_ai.create_agent` span + * - **Agent Invocation** (`CompiledGraph.invoke()`) - Creates a `gen_ai.invoke_agent` span + * + * ## Captured Data + * + * When `recordInputs` and `recordOutputs` are enabled, the integration captures: + * - Input messages from the graph state + * - Output messages and LLM responses + * - Tool calls made during agent execution + * - Agent and graph names + * - Available tools configured in the graph + * + */ +export const langGraphIntegration = defineIntegration(_langGraphIntegration); diff --git a/packages/node/src/integrations/tracing/langgraph/instrumentation.ts b/packages/node/src/integrations/tracing/langgraph/instrumentation.ts new file mode 100644 index 000000000000..ca1406e3e493 --- /dev/null +++ b/packages/node/src/integrations/tracing/langgraph/instrumentation.ts @@ -0,0 +1,90 @@ +import { + type InstrumentationConfig, + type InstrumentationModuleDefinition, + InstrumentationBase, + InstrumentationNodeModuleDefinition, + InstrumentationNodeModuleFile, +} from '@opentelemetry/instrumentation'; +import type { CompiledGraph, LangGraphOptions } from '@sentry/core'; +import { getClient, instrumentStateGraphCompile, SDK_VERSION } from '@sentry/core'; + +const supportedVersions = ['>=0.0.0 <2.0.0']; + +type LangGraphInstrumentationOptions = InstrumentationConfig & LangGraphOptions; + +/** + * Represents the patched shape of the LangGraph module export. + */ +interface PatchedModuleExports { + [key: string]: unknown; + StateGraph?: abstract new (...args: unknown[]) => unknown; +} + +/** + * Sentry LangGraph instrumentation using OpenTelemetry. + */ +export class SentryLangGraphInstrumentation extends InstrumentationBase { + public constructor(config: LangGraphInstrumentationOptions = {}) { + super('@sentry/instrumentation-langgraph', SDK_VERSION, config); + } + + /** + * Initializes the instrumentation by defining the modules to be patched. + */ + public init(): InstrumentationModuleDefinition { + const module = new InstrumentationNodeModuleDefinition( + '@langchain/langgraph', + supportedVersions, + this._patch.bind(this), + exports => exports, + [ + new InstrumentationNodeModuleFile( + /** + * In CJS, LangGraph packages re-export from dist/index.cjs files. + * Patching only the root module sometimes misses the real implementation or + * gets overwritten when that file is loaded. We add a file-level patch so that + * _patch runs again on the concrete implementation + */ + '@langchain/langgraph/dist/index.cjs', + supportedVersions, + this._patch.bind(this), + exports => exports, + ), + ], + ); + return module; + } + + /** + * Core patch logic applying instrumentation to the LangGraph module. + */ + private _patch(exports: PatchedModuleExports): PatchedModuleExports | void { + const client = getClient(); + const defaultPii = Boolean(client?.getOptions().sendDefaultPii); + + const config = this.getConfig(); + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + const recordInputs = config.recordInputs ?? defaultPii; + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + const recordOutputs = config.recordOutputs ?? defaultPii; + + const options: LangGraphOptions = { + recordInputs, + recordOutputs, + }; + + // Patch StateGraph.compile to instrument both compile() and invoke() + if (exports.StateGraph && typeof exports.StateGraph === 'function') { + const StateGraph = exports.StateGraph as { + prototype: Record; + }; + + StateGraph.prototype.compile = instrumentStateGraphCompile( + StateGraph.prototype.compile as (...args: unknown[]) => CompiledGraph, + options, + ); + } + + return exports; + } +} diff --git a/yarn.lock b/yarn.lock index 99e434f5efff..0b2a0f0f31dc 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4931,6 +4931,33 @@ zod "^3.25.32" zod-to-json-schema "^3.22.3" +"@langchain/langgraph-checkpoint@~0.0.17": + version "0.0.18" + resolved "https://registry.npmjs.org/@langchain/langgraph-checkpoint/-/langgraph-checkpoint-0.0.18.tgz#2f7a9cdeda948ccc8d312ba9463810709d71d0b8" + integrity sha512-IS7zJj36VgY+4pf8ZjsVuUWef7oTwt1y9ylvwu0aLuOn1d0fg05Om9DLm3v2GZ2Df6bhLV1kfWAM0IAl9O5rQQ== + dependencies: + uuid "^10.0.0" + +"@langchain/langgraph-sdk@~0.0.32": + version "0.0.112" + resolved "https://registry.npmjs.org/@langchain/langgraph-sdk/-/langgraph-sdk-0.0.112.tgz#3186919b60e3381aa8aa32ea9b9c39df1f02a9fd" + integrity sha512-/9W5HSWCqYgwma6EoOspL4BGYxGxeJP6lIquPSF4FA0JlKopaUv58ucZC3vAgdJyCgg6sorCIV/qg7SGpEcCLw== + dependencies: + "@types/json-schema" "^7.0.15" + p-queue "^6.6.2" + p-retry "4" + uuid "^9.0.0" + +"@langchain/langgraph@^0.2.32": + version "0.2.74" + resolved "https://registry.npmjs.org/@langchain/langgraph/-/langgraph-0.2.74.tgz#37367a1e8bafda3548037a91449a69a84f285def" + integrity sha512-oHpEi5sTZTPaeZX1UnzfM2OAJ21QGQrwReTV6+QnX7h8nDCBzhtipAw1cK616S+X8zpcVOjgOtJuaJhXa4mN8w== + dependencies: + "@langchain/langgraph-checkpoint" "~0.0.17" + "@langchain/langgraph-sdk" "~0.0.32" + uuid "^10.0.0" + zod "^3.23.8" + "@leichtgewicht/ip-codec@^2.0.1": version "2.0.4" resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz#b2ac626d6cb9c8718ab459166d4bb405b8ffa78b"