Skip to content

Commit a69be29

Browse files
committed
Regenerate semconv
1 parent c6ac4ac commit a69be29

File tree

2 files changed

+200
-46
lines changed

2 files changed

+200
-46
lines changed

packages/instrumentation-openai/src/instrumentation.ts

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,6 @@ import {
6161
ATTR_GEN_AI_TOKEN_TYPE,
6262
ATTR_GEN_AI_USAGE_INPUT_TOKENS,
6363
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS,
64-
EVENT_GEN_AI_ASSISTANT_MESSAGE,
65-
EVENT_GEN_AI_CHOICE,
66-
EVENT_GEN_AI_SYSTEM_MESSAGE,
67-
EVENT_GEN_AI_TOOL_MESSAGE,
68-
EVENT_GEN_AI_USER_MESSAGE,
6964
METRIC_GEN_AI_CLIENT_OPERATION_DURATION,
7065
METRIC_GEN_AI_CLIENT_TOKEN_USAGE,
7166
} from './semconv';
@@ -82,6 +77,16 @@ import {
8277
GenAIToolMessageEventBody,
8378
} from './internal-types';
8479

80+
81+
82+
// The JS semconv package doesn't yet emit constants for event names.
83+
// TODO: otel-js issue for semconv pkg not including event names
84+
export const EVENT_GEN_AI_SYSTEM_MESSAGE = 'gen_ai.system.message';
85+
export const EVENT_GEN_AI_USER_MESSAGE = 'gen_ai.user.message';
86+
export const EVENT_GEN_AI_ASSISTANT_MESSAGE = 'gen_ai.assistant.message';
87+
export const EVENT_GEN_AI_TOOL_MESSAGE = 'gen_ai.tool.message';
88+
export const EVENT_GEN_AI_CHOICE = 'gen_ai.choice';
89+
8590
export class OpenAIInstrumentation extends InstrumentationBase<OpenAIInstrumentationConfig> {
8691
private _genaiClientOperationDuration!: Histogram;
8792
private _genaiClientTokenUsage!: Histogram;

packages/instrumentation-openai/src/semconv.ts

Lines changed: 190 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -15,56 +15,205 @@
1515
*/
1616

1717
/*
18-
* Copyright The OpenTelemetry Authors
18+
* This file contains a copy of unstable semantic convention definitions
19+
* used by this package.
20+
* @see https://github.com/open-telemetry/opentelemetry-js/tree/main/semantic-conventions#unstable-semconv
21+
*/
22+
23+
/**
24+
* Identifies the class / type of event.
1925
*
20-
* Licensed under the Apache License, Version 2.0 (the "License");
21-
* you may not use this file except in compliance with the License.
22-
* You may obtain a copy of the License at
26+
* @example browser.mouse.click
27+
* @example device.app.lifecycle
2328
*
24-
* https://www.apache.org/licenses/LICENSE-2.0
29+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
2530
*
26-
* Unless required by applicable law or agreed to in writing, software
27-
* distributed under the License is distributed on an "AS IS" BASIS,
28-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
29-
* See the License for the specific language governing permissions and
30-
* limitations under the License.
31+
* @deprecated Replaced by EventName top-level field on the LogRecord.
3132
*/
32-
export const ATTR_SERVER_ADDRESS = 'server.address';
33-
export const ATTR_SERVER_PORT = 'server.port';
33+
export const ATTR_EVENT_NAME = 'event.name' as const;
3434

35-
// -- Unstable semconv
35+
/**
36+
* The name of the operation being performed.
37+
*
38+
* @note If one of the predefined values applies, but specific system uses a different name it's **RECOMMENDED** to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries **SHOULD** use applicable predefined value.
39+
*
40+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
41+
*/
42+
export const ATTR_GEN_AI_OPERATION_NAME = 'gen_ai.operation.name' as const;
3643

37-
export const ATTR_EVENT_NAME = 'event.name';
38-
export const ATTR_GEN_AI_OPERATION_NAME = 'gen_ai.operation.name';
44+
/**
45+
* The encoding formats requested in an embeddings operation, if specified.
46+
*
47+
* @example ["base64"]
48+
* @example ["float", "binary"]
49+
*
50+
* @note In some GenAI systems the encoding formats are called embedding types. Also, some GenAI systems only accept a single format per request.
51+
*
52+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
53+
*/
54+
export const ATTR_GEN_AI_REQUEST_ENCODING_FORMATS =
55+
'gen_ai.request.encoding_formats' as const;
56+
57+
/**
58+
* The frequency penalty setting for the GenAI request.
59+
*
60+
* @example 0.1
61+
*
62+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
63+
*/
3964
export const ATTR_GEN_AI_REQUEST_FREQUENCY_PENALTY =
40-
'gen_ai.request.frequency_penalty';
41-
export const ATTR_GEN_AI_REQUEST_MAX_TOKENS = 'gen_ai.request.max_tokens';
42-
export const ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model';
65+
'gen_ai.request.frequency_penalty' as const;
66+
67+
/**
68+
* The maximum number of tokens the model generates for a request.
69+
*
70+
* @example 100
71+
*
72+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
73+
*/
74+
export const ATTR_GEN_AI_REQUEST_MAX_TOKENS =
75+
'gen_ai.request.max_tokens' as const;
76+
77+
/**
78+
* The name of the GenAI model a request is being made to.
79+
*
80+
* @example "gpt-4"
81+
*
82+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
83+
*/
84+
export const ATTR_GEN_AI_REQUEST_MODEL = 'gen_ai.request.model' as const;
85+
86+
/**
87+
* The presence penalty setting for the GenAI request.
88+
*
89+
* @example 0.1
90+
*
91+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
92+
*/
4393
export const ATTR_GEN_AI_REQUEST_PRESENCE_PENALTY =
44-
'gen_ai.request.presence_penalty';
45-
export const ATTR_GEN_AI_REQUEST_TEMPERATURE = 'gen_ai.request.temperature';
94+
'gen_ai.request.presence_penalty' as const;
95+
96+
/**
97+
* List of sequences that the model will use to stop generating further tokens.
98+
*
99+
* @example ["forest", "lived"]
100+
*
101+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
102+
*/
46103
export const ATTR_GEN_AI_REQUEST_STOP_SEQUENCES =
47-
'gen_ai.request.stop_sequences';
48-
export const ATTR_GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p';
104+
'gen_ai.request.stop_sequences' as const;
105+
106+
/**
107+
* The temperature setting for the GenAI request.
108+
*
109+
* @example 0.0
110+
*
111+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
112+
*/
113+
export const ATTR_GEN_AI_REQUEST_TEMPERATURE =
114+
'gen_ai.request.temperature' as const;
115+
116+
/**
117+
* The top_p sampling setting for the GenAI request.
118+
*
119+
* @example 1.0
120+
*
121+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
122+
*/
123+
export const ATTR_GEN_AI_REQUEST_TOP_P = 'gen_ai.request.top_p' as const;
124+
125+
/**
126+
* Array of reasons the model stopped generating tokens, corresponding to each generation received.
127+
*
128+
* @example ["stop"]
129+
* @example ["stop", "length"]
130+
*
131+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
132+
*/
49133
export const ATTR_GEN_AI_RESPONSE_FINISH_REASONS =
50-
'gen_ai.response.finish_reasons';
51-
export const ATTR_GEN_AI_RESPONSE_ID = 'gen_ai.response.id';
52-
export const ATTR_GEN_AI_RESPONSE_MODEL = 'gen_ai.response.model';
53-
export const ATTR_GEN_AI_SYSTEM = 'gen_ai.system';
54-
export const ATTR_GEN_AI_TOKEN_TYPE = 'gen_ai.token.type';
55-
export const ATTR_GEN_AI_USAGE_INPUT_TOKENS = 'gen_ai.usage.input_tokens';
56-
export const ATTR_GEN_AI_USAGE_OUTPUT_TOKENS = 'gen_ai.usage.output_tokens';
134+
'gen_ai.response.finish_reasons' as const;
135+
136+
/**
137+
* The unique identifier for the completion.
138+
*
139+
* @example chatcmpl-123
140+
*
141+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
142+
*/
143+
export const ATTR_GEN_AI_RESPONSE_ID = 'gen_ai.response.id' as const;
144+
145+
/**
146+
* The name of the model that generated the response.
147+
*
148+
* @example gpt-4-0613
149+
*
150+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
151+
*/
152+
export const ATTR_GEN_AI_RESPONSE_MODEL = 'gen_ai.response.model' as const;
153+
154+
/**
155+
* The Generative AI product as identified by the client or server instrumentation.
156+
*
157+
* @example "openai"
158+
*
159+
* @note The `gen_ai.system` describes a family of GenAI models with specific model identified
160+
* by `gen_ai.request.model` and `gen_ai.response.model` attributes.
161+
*
162+
* The actual GenAI product may differ from the one identified by the client.
163+
* Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI client
164+
* libraries. In such cases, the `gen_ai.system` is set to `openai` based on the
165+
* instrumentation's best knowledge, instead of the actual system. The `server.address`
166+
* attribute may help identify the actual system in use for `openai`.
167+
*
168+
* For custom model, a custom friendly name **SHOULD** be used.
169+
* If none of these options apply, the `gen_ai.system` **SHOULD** be set to `_OTHER`.
170+
*
171+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
172+
*/
173+
export const ATTR_GEN_AI_SYSTEM = 'gen_ai.system' as const;
174+
175+
/**
176+
* The type of token being counted.
177+
*
178+
* @example input
179+
* @example output
180+
*
181+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
182+
*/
183+
export const ATTR_GEN_AI_TOKEN_TYPE = 'gen_ai.token.type' as const;
184+
185+
/**
186+
* The number of tokens used in the GenAI input (prompt).
187+
*
188+
* @example 100
189+
*
190+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
191+
*/
192+
export const ATTR_GEN_AI_USAGE_INPUT_TOKENS =
193+
'gen_ai.usage.input_tokens' as const;
194+
195+
/**
196+
* The number of tokens used in the GenAI response (completion).
197+
*
198+
* @example 180
199+
*
200+
* @experimental This attribute is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
201+
*/
202+
export const ATTR_GEN_AI_USAGE_OUTPUT_TOKENS =
203+
'gen_ai.usage.output_tokens' as const;
204+
205+
/**
206+
* GenAI operation duration
207+
*
208+
* @experimental This metric is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
209+
*/
57210
export const METRIC_GEN_AI_CLIENT_OPERATION_DURATION =
58-
'gen_ai.client.operation.duration';
59-
export const METRIC_GEN_AI_CLIENT_TOKEN_USAGE = 'gen_ai.client.token.usage';
211+
'gen_ai.client.operation.duration' as const;
60212

61-
export const ATTR_GEN_AI_REQUEST_ENCODING_FORMATS =
62-
'gen_ai.request.encoding_formats';
63-
64-
// The JS semconv package doesn't yet emit constants for event names.
65-
// TODO: otel-js issue for semconv pkg not including event names
66-
export const EVENT_GEN_AI_SYSTEM_MESSAGE = 'gen_ai.system.message';
67-
export const EVENT_GEN_AI_USER_MESSAGE = 'gen_ai.user.message';
68-
export const EVENT_GEN_AI_ASSISTANT_MESSAGE = 'gen_ai.assistant.message';
69-
export const EVENT_GEN_AI_TOOL_MESSAGE = 'gen_ai.tool.message';
70-
export const EVENT_GEN_AI_CHOICE = 'gen_ai.choice';
213+
/**
214+
* Measures number of input and output tokens used
215+
*
216+
* @experimental This metric is experimental and is subject to breaking changes in minor releases of `@opentelemetry/semantic-conventions`.
217+
*/
218+
export const METRIC_GEN_AI_CLIENT_TOKEN_USAGE =
219+
'gen_ai.client.token.usage' as const;

0 commit comments

Comments
 (0)