Skip to content

Commit ab61ac8

Browse files
authored
docs: Update AI SDK tracing docs for Next.js (#933)
1 parent 614162a commit ab61ac8

File tree

1 file changed

+52
-52
lines changed

1 file changed

+52
-52
lines changed

docs/observability/how_to_guides/trace_with_vercel_ai_sdk.mdx

Lines changed: 52 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -234,75 +234,75 @@ The resulting trace will look [like this](https://smith.langchain.com/public/296
234234

235235
### Next.js
236236

237-
The [default OTEL setup](https://nextjs.org/docs/app/api-reference/file-conventions/instrumentation) for Next.js will trace all routes, including
238-
those that do not contain LLM traces. We instead suggest manually instrumenting specific routes by creating and passing a tracer as shown below:
237+
First, install the [`@vercel/otel`](https://www.npmjs.com/package/@vercel/otel) package:
239238

240-
```ts
241-
import { generateText } from "ai";
242-
import { openai } from "@ai-sdk/openai";
243-
import { traceable } from "langsmith/traceable";
239+
<CodeTabs
240+
groupId="client-language"
241+
tabs={[
242+
{
243+
value: "npm",
244+
label: "npm",
245+
language: "bash",
246+
content: `npm install @vercel/otel`,
247+
},
248+
{
249+
value: "yarn",
250+
label: "yarn",
251+
language: "bash",
252+
content: `yarn add @vercel/otel`,
253+
},
254+
{
255+
value: "pnpm",
256+
label: "pnpm",
257+
language: "bash",
258+
content: `pnpm add @vercel/otel`,
259+
},
260+
]}
261+
/>
262+
263+
Then, set up an [`instrumentation.ts`](https://nextjs.org/docs/app/guides/instrumentation) file in your root directory.
264+
Call `initializeOTEL` and pass the resulting `DEFAULT_LANGSMITH_SPAN_PROCESSOR` into the `spanProcessors` field into your `registerOTEL(...)` call.
265+
It should look something like this:
244266

267+
```ts
268+
import { registerOTel } from "@vercel/otel";
245269
import { initializeOTEL } from "langsmith/experimental/otel/setup";
246-
import { LangSmithOTLPTraceExporter } from "langsmith/experimental/otel/exporter";
247-
import {
248-
BatchSpanProcessor,
249-
BasicTracerProvider,
250-
} from "@opentelemetry/sdk-trace-base";
251-
import { AsyncHooksContextManager } from "@opentelemetry/context-async-hooks";
252-
import { context } from "@opentelemetry/api";
253270

254-
import { after } from "next/server";
271+
const { DEFAULT_LANGSMITH_SPAN_PROCESSOR } = initializeOTEL({});
255272

256-
const exporter = new LangSmithOTLPTraceExporter();
257-
const processor = new BatchSpanProcessor(exporter);
258-
const contextManager = new AsyncHooksContextManager();
273+
export function register() {
274+
registerOTel({
275+
serviceName: "your-project-name",
276+
spanProcessors: [DEFAULT_LANGSMITH_SPAN_PROCESSOR],
277+
});
278+
}
279+
```
259280

260-
contextManager.enable();
261-
context.setGlobalContextManager(contextManager);
281+
And finally, in your API routes, call `initializeOTEL` as well and add an `experimental_telemetry` field to your AI SDK calls:
262282

263-
const provider = new BasicTracerProvider({
264-
spanProcessors: [processor],
265-
});
283+
```ts
284+
import { generateText } from "ai";
285+
import { openai } from "@ai-sdk/openai";
266286

267-
// Pass instantiated provider and context manager to LangSmith
268-
initializeOTEL({
269-
globalTracerProvider: provider,
270-
globalContextManager: contextManager,
271-
});
287+
import { initializeOTEL } from "langsmith/experimental/otel/setup";
272288

273-
// highlight-next-line
274-
const tracer = provider.getTracer("ai-sdk-telemetry");
289+
initializeOTEL();
275290

276291
export async function GET() {
277-
after(async () => {
278-
await provider.shutdown();
279-
});
280-
281-
const wrappedText = traceable(
282-
async (content: string) => {
283-
const { text } = await generateText({
284-
model: openai("gpt-4.1-nano"),
285-
messages: [{ role: "user", content }],
286-
experimental_telemetry: {
287-
isEnabled: true,
288-
// highlight-next-line
289-
tracer,
290-
},
291-
maxSteps: 10,
292-
});
293-
294-
return { text };
292+
const { text } = await generateText({
293+
model: openai("gpt-4.1-nano"),
294+
messages: [{ role: "user", content: "Why is the sky blue?" }],
295+
experimental_telemetry: {
296+
isEnabled: true,
295297
},
296-
// highlight-next-line
297-
{ name: "parentTraceable", tracer }
298-
);
299-
300-
const { text } = await wrappedText("Why is the sky blue?");
298+
});
301299

302300
return new Response(text);
303301
}
304302
```
305303

304+
You can also wrap parts of your code in `traceables` for more granularity.
305+
306306
### Sentry
307307

308308
If you're using Sentry, you can attach the LangSmith trace exporter to Sentry's default OpenTelemetry instrumentation as show in the example below.

0 commit comments

Comments
 (0)