Skip to content

Commit 5e6575a

Browse files
committed
descriptions
1 parent 190575c commit 5e6575a

File tree

2 files changed

+46
-50
lines changed

2 files changed

+46
-50
lines changed

docs/platforms/python/tracing/instrumentation/custom-instrumentation/agents-module.mdx

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -85,11 +85,11 @@ This span wraps the execution of a tool.
8585
- `gen_ai.request.presence_penalty`: Model configuration
8686
- `gen_ai.request.temperature`: Model configuration
8787
- `gen_ai.request.top_p`: Model configuration
88-
- `gen_ai.tool.description`:
89-
- `gen_ai.tool.input`: \{"max":10\}
90-
- `gen_ai.tool.name:`: "random_number"
91-
- `gen_ai.tool.output`:
92-
- `gen_ai.tool.type`:
88+
- `gen_ai.tool.description`: Description of the tool executed
89+
- `gen_ai.tool.input`: Input that was given to the executed too. (Example: \{"max":10\})
90+
- `gen_ai.tool.name:`: Name of the tool executed. (Example: "random_number")
91+
- `gen_ai.tool.output`: The output from the tool.
92+
- `gen_ai.tool.type`: The type of the tools. Can be `function`, `extension`, `datastore`
9393

9494
### AI Client Span
9595

@@ -98,20 +98,16 @@ This span wraps the request to an LLM.
9898
- `span.op` = `"gen_ai.{gen_ai.operation.name}"` (Example: `"gen_ai.chat"`)
9999
- `span.name` = `"{gen_ai.operation.name} {model.name}"` (Example: `"chat gpt-4o-mini"`)
100100
- Span attributes:
101-
- `gen_ai.request.available_tools`
101+
- `gen_ai.request.available_tools`: Array of objects describing the available tools.
102+
- `gen_ai.request.messages`: Array of objects describing the messages sent to the LLM (Each object has the format `{ role:"", content:""}` where role can be `user`, `assistant`, or `system` and `content` can either be a string or an array of objects.)
103+
- `gen_ai.response.tool_calls`: Array of objects returned from the LLM with information about what tools need to be called.
102104
- `gen_ai.request.frequency_penalty`
103105
- `gen_ai.request.max_tokens`
104-
- `gen_ai.request.messages`
105-
- `gen_ai.request.model`
106106
- `gen_ai.request.presence_penalty`
107107
- `gen_ai.request.temperature`
108108
- `gen_ai.request.top_p`
109-
- `gen_ai.response.tool_calls`
110-
- `gen_ai.system`
111-
- `gen_ai.system.message`
112-
- `gen_ai.usage.input_tokens`
113-
- `gen_ai.usage.input_tokens.cached`
114-
- `gen_ai.usage.output_tokens`
115-
- `gen_ai.usage.output_tokens.reasoning`
116-
- `gen_ai.usage.total_tokens`
117-
- `gen_ai.user.message`
109+
- `gen_ai.usage.input_tokens`: Input tokens used for the request. (excluding cached tokens)
110+
- `gen_ai.usage.input_tokens.cached`: Cached input tokens used for the request.
111+
- `gen_ai.usage.output_tokens`: Output tokens used for the request.
112+
- `gen_ai.usage.output_tokens.reasoning` Tokens used for reasoning.
113+
- `gen_ai.usage.total_tokens`: Total number of tokens used in the request.

src/instrumentation.ts

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,37 @@
11
import * as Sentry from '@sentry/nextjs';
22

3-
export function register() {
4-
if (process.env.NEXT_RUNTIME === 'nodejs') {
5-
Sentry.init({
6-
dsn: process.env.SENTRY_DSN,
7-
tracesSampleRate: 1,
8-
debug: false,
9-
environment: process.env.NODE_ENV === 'development' ? 'development' : undefined,
10-
spotlight: process.env.NODE_ENV === 'development',
11-
});
12-
}
3+
// export function register() {
4+
// if (process.env.NEXT_RUNTIME === 'nodejs') {
5+
// Sentry.init({
6+
// dsn: process.env.SENTRY_DSN,
7+
// tracesSampleRate: 1,
8+
// debug: false,
9+
// environment: process.env.NODE_ENV === 'development' ? 'development' : undefined,
10+
// spotlight: process.env.NODE_ENV === 'development',
11+
// });
12+
// }
1313

14-
if (process.env.NEXT_RUNTIME === 'edge') {
15-
Sentry.init({
16-
dsn: process.env.SENTRY_DSN,
17-
tracesSampleRate: 1,
18-
debug: false,
19-
environment: process.env.NODE_ENV === 'development' ? 'development' : undefined,
20-
// temporary change for investigating edge middleware tx names
21-
beforeSendTransaction(event) {
22-
if (
23-
event.transaction?.includes('middleware GET') &&
24-
event.contexts?.trace?.data
25-
) {
26-
event.contexts.trace.data = {
27-
...event.contexts.trace.data,
28-
'sentry.source': 'custom',
29-
};
30-
}
31-
return event;
32-
},
33-
});
34-
}
35-
}
14+
// if (process.env.NEXT_RUNTIME === 'edge') {
15+
// Sentry.init({
16+
// dsn: process.env.SENTRY_DSN,
17+
// tracesSampleRate: 1,
18+
// debug: false,
19+
// environment: process.env.NODE_ENV === 'development' ? 'development' : undefined,
20+
// // temporary change for investigating edge middleware tx names
21+
// beforeSendTransaction(event) {
22+
// if (
23+
// event.transaction?.includes('middleware GET') &&
24+
// event.contexts?.trace?.data
25+
// ) {
26+
// event.contexts.trace.data = {
27+
// ...event.contexts.trace.data,
28+
// 'sentry.source': 'custom',
29+
// };
30+
// }
31+
// return event;
32+
// },
33+
// });
34+
// }
35+
// }
3636

37-
export const onRequestError = Sentry.captureRequestError;
37+
// export const onRequestError = Sentry.captureRequestError;

0 commit comments

Comments
 (0)