Skip to content

Commit 8e05349

Browse files
authored
fix(ai-sdk): Add conversion to opentelemetry semantic convention (#845)
1 parent 0d3f3d8 commit 8e05349

File tree

77 files changed

+1968
-1168
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

77 files changed

+1968
-1168
lines changed

packages/ai-semantic-conventions/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
},
3636
"dependencies": {
3737
"@opentelemetry/api": "^1.9.0",
38-
"@opentelemetry/semantic-conventions": "^1.36.0"
38+
"@opentelemetry/semantic-conventions": "^1.38.0"
3939
},
4040
"homepage": "https://github.com/traceloop/openllmetry-js/tree/main/packages/ai-semantic-conventions",
4141
"gitHead": "ef1e70d6037f7b5c061056ef2be16e3f55f02ed5"

packages/ai-semantic-conventions/src/SemanticAttributes.ts

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -14,34 +14,13 @@
1414
* limitations under the License.
1515
*/
1616

17-
import {
18-
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
19-
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
20-
// @ts-expect-error - Using exports path that TypeScript doesn't recognize but works at runtime
21-
} from "@opentelemetry/semantic-conventions/incubating";
22-
2317
export const SpanAttributes = {
24-
LLM_SYSTEM: "gen_ai.system",
25-
LLM_REQUEST_MODEL: "gen_ai.request.model",
26-
LLM_REQUEST_MAX_TOKENS: "gen_ai.request.max_tokens",
27-
LLM_REQUEST_TEMPERATURE: "gen_ai.request.temperature",
28-
LLM_REQUEST_TOP_P: "gen_ai.request.top_p",
29-
LLM_PROMPTS: "gen_ai.prompt",
30-
LLM_COMPLETIONS: "gen_ai.completion",
31-
LLM_INPUT_MESSAGES: "gen_ai.input.messages",
32-
LLM_OUTPUT_MESSAGES: "gen_ai.output.messages",
33-
LLM_RESPONSE_MODEL: "gen_ai.response.model",
34-
LLM_USAGE_PROMPT_TOKENS: "gen_ai.usage.prompt_tokens",
35-
LLM_USAGE_COMPLETION_TOKENS: "gen_ai.usage.completion_tokens",
36-
LLM_USAGE_INPUT_TOKENS: ATTR_GEN_AI_USAGE_INPUT_TOKENS,
37-
LLM_USAGE_OUTPUT_TOKENS: ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
18+
// Attributes not yet in @opentelemetry/semantic-conventions
3819
GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS:
3920
"gen_ai.usage.cache_creation_input_tokens",
4021
GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS: "gen_ai.usage.cache_read_input_tokens",
4122
GEN_AI_USAGE_REASONING_TOKENS: "gen_ai.usage.reasoning_tokens",
4223

43-
GEN_AI_AGENT_NAME: "gen_ai.agent.name",
44-
4524
// LLM
4625
LLM_REQUEST_TYPE: "llm.request.type",
4726
LLM_USAGE_TOTAL_TOKENS: "llm.usage.total_tokens",

packages/ai-semantic-conventions/tsconfig.json

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
"extends": "../../tsconfig.base.json",
33
"compilerOptions": {
44
"outDir": "dist",
5-
"rootDir": "."
5+
"rootDir": ".",
6+
"moduleResolution": "node16",
7+
"module": "node16"
68
},
79
"files": [],
810
"include": ["src/**/*.ts", "test/**/*.ts"],

packages/instrumentation-anthropic/package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
"build": "rollup -c",
1111
"lint": "eslint .",
1212
"lint:fix": "eslint . --fix",
13-
"test": "ts-mocha -p tsconfig.json 'test/**/*.test.ts'"
13+
"test": "ts-mocha -p tsconfig.test.json 'test/**/*.test.ts'"
1414
},
1515
"keywords": [
1616
"opentelemetry",
@@ -41,7 +41,7 @@
4141
"@opentelemetry/api": "^1.9.0",
4242
"@opentelemetry/core": "^2.0.1",
4343
"@opentelemetry/instrumentation": "^0.203.0",
44-
"@opentelemetry/semantic-conventions": "^1.36.0",
44+
"@opentelemetry/semantic-conventions": "^1.38.0",
4545
"@traceloop/ai-semantic-conventions": "workspace:*",
4646
"tslib": "^2.8.1"
4747
},

packages/instrumentation-anthropic/src/instrumentation.ts

Lines changed: 35 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,18 @@ import {
3131
CONTEXT_KEY_ALLOW_TRACE_CONTENT,
3232
SpanAttributes,
3333
} from "@traceloop/ai-semantic-conventions";
34+
import {
35+
ATTR_GEN_AI_COMPLETION,
36+
ATTR_GEN_AI_PROMPT,
37+
ATTR_GEN_AI_REQUEST_MAX_TOKENS,
38+
ATTR_GEN_AI_REQUEST_MODEL,
39+
ATTR_GEN_AI_REQUEST_TEMPERATURE,
40+
ATTR_GEN_AI_REQUEST_TOP_P,
41+
ATTR_GEN_AI_RESPONSE_MODEL,
42+
ATTR_GEN_AI_SYSTEM,
43+
ATTR_GEN_AI_USAGE_COMPLETION_TOKENS,
44+
ATTR_GEN_AI_USAGE_PROMPT_TOKENS,
45+
} from "@opentelemetry/semantic-conventions/incubating";
3446
import { AnthropicInstrumentationConfig } from "./types";
3547
import { version } from "../package.json";
3648
import type * as anthropic from "@anthropic-ai/sdk";
@@ -204,14 +216,14 @@ export class AnthropicInstrumentation extends InstrumentationBase {
204216
};
205217
}): Span {
206218
const attributes: Attributes = {
207-
[SpanAttributes.LLM_SYSTEM]: "Anthropic",
219+
[ATTR_GEN_AI_SYSTEM]: "Anthropic",
208220
[SpanAttributes.LLM_REQUEST_TYPE]: type,
209221
};
210222

211223
try {
212-
attributes[SpanAttributes.LLM_REQUEST_MODEL] = params.model;
213-
attributes[SpanAttributes.LLM_REQUEST_TEMPERATURE] = params.temperature;
214-
attributes[SpanAttributes.LLM_REQUEST_TOP_P] = params.top_p;
224+
attributes[ATTR_GEN_AI_REQUEST_MODEL] = params.model;
225+
attributes[ATTR_GEN_AI_REQUEST_TEMPERATURE] = params.temperature;
226+
attributes[ATTR_GEN_AI_REQUEST_TOP_P] = params.top_p;
215227
attributes[SpanAttributes.LLM_TOP_K] = params.top_k;
216228

217229
// Handle thinking parameters (for beta messages)
@@ -223,10 +235,10 @@ export class AnthropicInstrumentation extends InstrumentationBase {
223235
}
224236

225237
if (type === "completion") {
226-
attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] =
238+
attributes[ATTR_GEN_AI_REQUEST_MAX_TOKENS] =
227239
params.max_tokens_to_sample;
228240
} else {
229-
attributes[SpanAttributes.LLM_REQUEST_MAX_TOKENS] = params.max_tokens;
241+
attributes[ATTR_GEN_AI_REQUEST_MAX_TOKENS] = params.max_tokens;
230242
}
231243

232244
if (
@@ -244,8 +256,8 @@ export class AnthropicInstrumentation extends InstrumentationBase {
244256

245257
// If a system prompt is provided, it should always be first
246258
if ("system" in params && params.system !== undefined) {
247-
attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`] = "system";
248-
attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`] =
259+
attributes[`${ATTR_GEN_AI_PROMPT}.0.role`] = "system";
260+
attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] =
249261
typeof params.system === "string"
250262
? params.system
251263
: JSON.stringify(params.system);
@@ -254,21 +266,19 @@ export class AnthropicInstrumentation extends InstrumentationBase {
254266

255267
params.messages.forEach((message, index) => {
256268
const currentIndex = index + promptIndex;
257-
attributes[`${SpanAttributes.LLM_PROMPTS}.${currentIndex}.role`] =
269+
attributes[`${ATTR_GEN_AI_PROMPT}.${currentIndex}.role`] =
258270
message.role;
259271
if (typeof message.content === "string") {
260-
attributes[
261-
`${SpanAttributes.LLM_PROMPTS}.${currentIndex}.content`
262-
] = (message.content as string) || "";
272+
attributes[`${ATTR_GEN_AI_PROMPT}.${currentIndex}.content`] =
273+
(message.content as string) || "";
263274
} else {
264-
attributes[
265-
`${SpanAttributes.LLM_PROMPTS}.${currentIndex}.content`
266-
] = JSON.stringify(message.content);
275+
attributes[`${ATTR_GEN_AI_PROMPT}.${currentIndex}.content`] =
276+
JSON.stringify(message.content);
267277
}
268278
});
269279
} else {
270-
attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`] = "user";
271-
attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`] = params.prompt;
280+
attributes[`${ATTR_GEN_AI_PROMPT}.0.role`] = "user";
281+
attributes[`${ATTR_GEN_AI_PROMPT}.0.content`] = params.prompt;
272282
}
273283
}
274284
} catch (e) {
@@ -477,46 +487,40 @@ export class AnthropicInstrumentation extends InstrumentationBase {
477487
result: Completion;
478488
}) {
479489
try {
480-
span.setAttribute(SpanAttributes.LLM_RESPONSE_MODEL, result.model);
490+
span.setAttribute(ATTR_GEN_AI_RESPONSE_MODEL, result.model);
481491
if (type === "chat" && result.usage) {
482492
span.setAttribute(
483493
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
484494
result.usage?.input_tokens + result.usage?.output_tokens,
485495
);
486496
span.setAttribute(
487-
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
497+
ATTR_GEN_AI_USAGE_COMPLETION_TOKENS,
488498
result.usage?.output_tokens,
489499
);
490500
span.setAttribute(
491-
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
501+
ATTR_GEN_AI_USAGE_PROMPT_TOKENS,
492502
result.usage?.input_tokens,
493503
);
494504
}
495505

496506
if (result.stop_reason) {
497507
span.setAttribute(
498-
`${SpanAttributes.LLM_COMPLETIONS}.0.finish_reason`,
508+
`${ATTR_GEN_AI_COMPLETION}.0.finish_reason`,
499509
result.stop_reason,
500510
);
501511
}
502512

503513
if (this._shouldSendPrompts()) {
504514
if (type === "chat") {
515+
span.setAttribute(`${ATTR_GEN_AI_COMPLETION}.0.role`, "assistant");
505516
span.setAttribute(
506-
`${SpanAttributes.LLM_COMPLETIONS}.0.role`,
507-
"assistant",
508-
);
509-
span.setAttribute(
510-
`${SpanAttributes.LLM_COMPLETIONS}.0.content`,
517+
`${ATTR_GEN_AI_COMPLETION}.0.content`,
511518
JSON.stringify(result.content),
512519
);
513520
} else {
521+
span.setAttribute(`${ATTR_GEN_AI_COMPLETION}.0.role`, "assistant");
514522
span.setAttribute(
515-
`${SpanAttributes.LLM_COMPLETIONS}.0.role`,
516-
"assistant",
517-
);
518-
span.setAttribute(
519-
`${SpanAttributes.LLM_COMPLETIONS}.0.content`,
523+
`${ATTR_GEN_AI_COMPLETION}.0.content`,
520524
result.completion,
521525
);
522526
}

0 commit comments

Comments
 (0)