|
1 | 1 | # generated by datamodel-codegen: |
2 | 2 | # filename: llm_span_attributes.json |
3 | | -# timestamp: 2024-07-24T15:56:12+00:00 |
| 3 | +# timestamp: 2025-01-28T14:02:08+00:00 |
4 | 4 |
|
5 | 5 | from __future__ import annotations |
6 | 6 |
|
7 | | -from typing import List, Optional |
| 7 | +from typing import List, Optional, Union |
8 | 8 |
|
9 | 9 | from pydantic import BaseModel, ConfigDict, Field |
10 | 10 |
|
11 | 11 |
|
12 | 12 | class LLMSpanAttributes(BaseModel): |
13 | | - model_config = ConfigDict(extra="allow") |
14 | | - |
| 13 | + model_config = ConfigDict( |
| 14 | + extra='allow', |
| 15 | + ) |
| 16 | + langtrace_span_name: Optional[str] = Field( |
| 17 | + None, alias='langtrace.span.name', description='Name of the span' |
| 18 | + ) |
15 | 19 | langtrace_service_name: str = Field( |
16 | 20 | ..., |
17 | | - alias="langtrace.service.name", |
18 | | - description="Name of the service. Includes all supported service providers by langtrace", |
| 21 | + alias='langtrace.service.name', |
| 22 | + description='Name of the service. Includes all supported service providers by langtrace', |
19 | 23 | ) |
20 | 24 | langtrace_service_type: str = Field( |
21 | 25 | ..., |
22 | | - alias="langtrace.service.type", |
23 | | - description="Type of the service. Allowed values: [llm, vectordb, framework]", |
| 26 | + alias='langtrace.service.type', |
| 27 | + description='Type of the service. Allowed values: [llm, vectordb, framework]', |
24 | 28 | ) |
25 | 29 | langtrace_service_version: Optional[str] = Field( |
26 | 30 | None, |
27 | | - alias="langtrace.service.version", |
28 | | - description="Version of the service provider client", |
| 31 | + alias='langtrace.service.version', |
| 32 | + description='Version of the service provider client', |
29 | 33 | ) |
30 | | - langtrace_version: str = Field(..., alias="langtrace.version") |
31 | | - langtrace_sdk_name: str = Field(..., alias="langtrace.sdk.name") |
32 | | - url_full: str = Field(..., alias="url.full", description="Full URL of the request") |
33 | | - url_path: str = Field(..., alias="url.path", description="Path of the request") |
| 34 | + langtrace_version: str = Field(..., alias='langtrace.version') |
| 35 | + langtrace_sdk_name: str = Field(..., alias='langtrace.sdk.name') |
| 36 | + url_full: str = Field(..., alias='url.full', description='Full URL of the request') |
| 37 | + url_path: str = Field(..., alias='url.path', description='Path of the request') |
34 | 38 | gen_ai_operation_name: str = Field( |
35 | 39 | ..., |
36 | | - alias="gen_ai.operation.name", |
37 | | - description="The name of the operation being performed.", |
| 40 | + alias='gen_ai.operation.name', |
| 41 | + description='The name of the operation being performed.', |
38 | 42 | ) |
39 | 43 | gen_ai_request_model: str = Field( |
40 | 44 | ..., |
41 | | - alias="gen_ai.request.model", |
42 | | - description="Model name from the input request", |
| 45 | + alias='gen_ai.request.model', |
| 46 | + description='Model name from the input request', |
43 | 47 | ) |
44 | 48 | gen_ai_response_model: Optional[str] = Field( |
45 | | - None, alias="gen_ai.response.model", description="Model name from the response" |
| 49 | + None, alias='gen_ai.response.model', description='Model name from the response' |
46 | 50 | ) |
47 | 51 | gen_ai_request_temperature: Optional[float] = Field( |
48 | 52 | None, |
49 | | - alias="gen_ai.request.temperature", |
50 | | - description="Temperature value from the input request", |
| 53 | + alias='gen_ai.request.temperature', |
| 54 | + description='Temperature value from the input request', |
51 | 55 | ) |
52 | 56 | gen_ai_request_logit_bias: Optional[str] = Field( |
53 | 57 | None, |
54 | | - alias="gen_ai.request.logit_bias", |
55 | | - description="Likelihood bias of the specified tokens the input request.", |
| 58 | + alias='gen_ai.request.logit_bias', |
| 59 | + description='Likelihood bias of the specified tokens the input request.', |
56 | 60 | ) |
57 | 61 | gen_ai_request_logprobs: Optional[bool] = Field( |
58 | 62 | None, |
59 | | - alias="gen_ai.request.logprobs", |
60 | | - description="Logprobs flag returns log probabilities.", |
| 63 | + alias='gen_ai.request.logprobs', |
| 64 | + description='Logprobs flag returns log probabilities.', |
61 | 65 | ) |
62 | 66 | gen_ai_request_top_logprobs: Optional[float] = Field( |
63 | 67 | None, |
64 | | - alias="gen_ai.request.top_logprobs", |
65 | | - description="Integer between 0 and 5 specifying the number of most likely tokens to return.", |
| 68 | + alias='gen_ai.request.top_logprobs', |
| 69 | + description='Integer between 0 and 5 specifying the number of most likely tokens to return.', |
66 | 70 | ) |
67 | 71 | gen_ai_request_top_p: Optional[float] = Field( |
68 | 72 | None, |
69 | | - alias="gen_ai.request.top_p", |
70 | | - description="Top P value from the input request", |
| 73 | + alias='gen_ai.request.top_p', |
| 74 | + description='Top P value from the input request', |
71 | 75 | ) |
72 | 76 | gen_ai_request_top_k: Optional[float] = Field( |
73 | 77 | None, |
74 | | - alias="gen_ai.request.top_k", |
75 | | - description="Top K results to return from the input request", |
| 78 | + alias='gen_ai.request.top_k', |
| 79 | + description='Top K results to return from the input request', |
76 | 80 | ) |
77 | 81 | gen_ai_user: Optional[str] = Field( |
78 | | - None, alias="gen_ai.user", description="User ID from the input request" |
| 82 | + None, alias='gen_ai.user', description='User ID from the input request' |
79 | 83 | ) |
80 | 84 | gen_ai_prompt: Optional[str] = Field( |
81 | | - None, alias="gen_ai.prompt", description="Prompt text from the input request" |
| 85 | + None, alias='gen_ai.prompt', description='Prompt text from the input request' |
82 | 86 | ) |
83 | 87 | gen_ai_completion: Optional[str] = Field( |
84 | 88 | None, |
85 | | - alias="gen_ai.completion", |
| 89 | + alias='gen_ai.completion', |
86 | 90 | description='Completion text from the response. This will be an array of json objects with the following format {"role": "", "content": ""}. Role can be one of the following values: [system, user, assistant, tool]', |
87 | 91 | ) |
88 | 92 | gen_ai_request_stream: Optional[bool] = Field( |
89 | 93 | None, |
90 | | - alias="gen_ai.request.stream", |
91 | | - description="Stream flag from the input request", |
| 94 | + alias='gen_ai.request.stream', |
| 95 | + description='Stream flag from the input request', |
92 | 96 | ) |
93 | 97 | gen_ai_request_encoding_formats: Optional[List[str]] = Field( |
94 | 98 | None, |
95 | | - alias="gen_ai.request.encoding_formats", |
| 99 | + alias='gen_ai.request.encoding_formats', |
96 | 100 | description="Encoding formats from the input request. Allowed values: ['float', 'int8','uint8', 'binary', 'ubinary', 'base64']", |
97 | 101 | ) |
98 | 102 | gen_ai_completion_chunk: Optional[str] = Field( |
99 | 103 | None, |
100 | | - alias="gen_ai.completion.chunk", |
101 | | - description="Chunk text from the response", |
| 104 | + alias='gen_ai.completion.chunk', |
| 105 | + description='Chunk text from the response', |
102 | 106 | ) |
103 | 107 | gen_ai_request_dimensions: Optional[float] = Field( |
104 | 108 | None, |
105 | | - alias="gen_ai.request.dimensions", |
106 | | - description="Dimensions from the input request", |
| 109 | + alias='gen_ai.request.dimensions', |
| 110 | + description='Dimensions from the input request', |
107 | 111 | ) |
108 | 112 | gen_ai_response_id: Optional[str] = Field( |
109 | 113 | None, |
110 | | - alias="gen_ai.response_id", |
111 | | - description="Response ID from the output response", |
| 114 | + alias='gen_ai.response_id', |
| 115 | + description='Response ID from the output response', |
112 | 116 | ) |
113 | 117 | gen_ai_response_finish_reasons: Optional[List[str]] = Field( |
114 | 118 | None, |
115 | | - alias="gen_ai.response.finish_reasons", |
116 | | - description="Array of reasons the model stopped generating tokens, corresponding to each generation received", |
| 119 | + alias='gen_ai.response.finish_reasons', |
| 120 | + description='Array of reasons the model stopped generating tokens, corresponding to each generation received', |
117 | 121 | ) |
118 | 122 | gen_ai_system_fingerprint: Optional[str] = Field( |
119 | 123 | None, |
120 | | - alias="gen_ai.system_fingerprint", |
121 | | - description="System fingerprint of the system that generated the response", |
| 124 | + alias='gen_ai.system_fingerprint', |
| 125 | + description='System fingerprint of the system that generated the response', |
122 | 126 | ) |
123 | 127 | gen_ai_request_documents: Optional[str] = Field( |
124 | 128 | None, |
125 | | - alias="gen_ai.request.documents", |
126 | | - description="Array of documents from the input request json stringified", |
| 129 | + alias='gen_ai.request.documents', |
| 130 | + description='Array of documents from the input request json stringified', |
127 | 131 | ) |
128 | 132 | gen_ai_request_is_search_required: Optional[bool] = Field( |
129 | 133 | None, |
130 | | - alias="gen_ai.request.is_search_required", |
131 | | - description="Search flag from the input request", |
| 134 | + alias='gen_ai.request.is_search_required', |
| 135 | + description='Search flag from the input request', |
132 | 136 | ) |
133 | 137 | gen_ai_request_tool_choice: Optional[str] = Field( |
134 | 138 | None, |
135 | | - alias="gen_ai.request.tool_choice", |
136 | | - description="Tool choice from the input request", |
| 139 | + alias='gen_ai.request.tool_choice', |
| 140 | + description='Tool choice from the input request', |
137 | 141 | ) |
138 | 142 | gen_ai_response_tool_calls: Optional[str] = Field( |
139 | 143 | None, |
140 | | - alias="gen_ai.response.tool_calls", |
141 | | - description="Array of tool calls from the response json stringified", |
| 144 | + alias='gen_ai.response.tool_calls', |
| 145 | + description='Array of tool calls from the response json stringified', |
142 | 146 | ) |
143 | 147 | gen_ai_request_max_tokens: Optional[float] = Field( |
144 | 148 | None, |
145 | | - alias="gen_ai.request.max_tokens", |
146 | | - description="The maximum number of tokens the LLM generates for a request.", |
| 149 | + alias='gen_ai.request.max_tokens', |
| 150 | + description='The maximum number of tokens the LLM generates for a request.', |
147 | 151 | ) |
148 | 152 | gen_ai_usage_input_tokens: Optional[float] = Field( |
149 | 153 | None, |
150 | | - alias="gen_ai.usage.input_tokens", |
151 | | - description="The number of tokens used in the llm prompt.", |
| 154 | + alias='gen_ai.usage.input_tokens', |
| 155 | + description='The number of tokens used in the llm prompt.', |
152 | 156 | ) |
153 | 157 | gen_ai_usage_total_tokens: Optional[float] = Field( |
154 | 158 | None, |
155 | | - alias="gen_ai.usage.total_tokens", |
156 | | - description="The total number of tokens used in the llm request.", |
| 159 | + alias='gen_ai.usage.total_tokens', |
| 160 | + description='The total number of tokens used in the llm request.', |
157 | 161 | ) |
158 | 162 | gen_ai_usage_output_tokens: Optional[float] = Field( |
159 | 163 | None, |
160 | | - alias="gen_ai.usage.output_tokens", |
161 | | - description="The number of tokens in the llm response.", |
| 164 | + alias='gen_ai.usage.output_tokens', |
| 165 | + description='The number of tokens in the llm response.', |
162 | 166 | ) |
163 | 167 | gen_ai_usage_search_units: Optional[float] = Field( |
164 | 168 | None, |
165 | | - alias="gen_ai.usage.search_units", |
166 | | - description="The number of search units used in the request.", |
| 169 | + alias='gen_ai.usage.search_units', |
| 170 | + description='The number of search units used in the request.', |
167 | 171 | ) |
168 | | - gen_ai_request_seed: Optional[str | int] = Field( |
169 | | - None, alias="gen_ai.request.seed", description="Seed from the input request" |
| 172 | + gen_ai_request_seed: Optional[Union[str, int]] = Field( |
| 173 | + None, alias='gen_ai.request.seed', description='Seed from the input request' |
170 | 174 | ) |
171 | 175 | gen_ai_request_frequency_penalty: Optional[float] = Field( |
172 | 176 | None, |
173 | | - alias="gen_ai.request.frequency_penalty", |
174 | | - description="Frequency penalty from the input request", |
| 177 | + alias='gen_ai.request.frequency_penalty', |
| 178 | + description='Frequency penalty from the input request', |
175 | 179 | ) |
176 | 180 | gen_ai_request_presence_penalty: Optional[float] = Field( |
177 | 181 | None, |
178 | | - alias="gen_ai.request.presence_penalty", |
179 | | - description="Presence penalty from the input request", |
| 182 | + alias='gen_ai.request.presence_penalty', |
| 183 | + description='Presence penalty from the input request', |
180 | 184 | ) |
181 | 185 | gen_ai_request_connectors: Optional[str] = Field( |
182 | 186 | None, |
183 | | - alias="gen_ai.request.connectors", |
184 | | - description="An array of connectors from the input request json stringified", |
| 187 | + alias='gen_ai.request.connectors', |
| 188 | + description='An array of connectors from the input request json stringified', |
185 | 189 | ) |
186 | 190 | gen_ai_request_tools: Optional[str] = Field( |
187 | 191 | None, |
188 | | - alias="gen_ai.request.tools", |
189 | | - description="An array of tools from the input request json stringified", |
| 192 | + alias='gen_ai.request.tools', |
| 193 | + description='An array of tools from the input request json stringified', |
190 | 194 | ) |
191 | 195 | gen_ai_request_tool_results: Optional[str] = Field( |
192 | 196 | None, |
193 | | - alias="gen_ai.request.tool_results", |
194 | | - description="An array of tool results from the input request json stringified", |
| 197 | + alias='gen_ai.request.tool_results', |
| 198 | + description='An array of tool results from the input request json stringified', |
195 | 199 | ) |
196 | 200 | gen_ai_request_embedding_inputs: Optional[str] = Field( |
197 | 201 | None, |
198 | | - alias="gen_ai.request.embedding_inputs", |
199 | | - description="An array of embedding inputs from the input request json stringified", |
| 202 | + alias='gen_ai.request.embedding_inputs', |
| 203 | + description='An array of embedding inputs from the input request json stringified', |
200 | 204 | ) |
201 | 205 | gen_ai_request_embedding_dataset_id: Optional[str] = Field( |
202 | 206 | None, |
203 | | - alias="gen_ai.request.embedding_dataset_id", |
204 | | - description="Embedding dataset ID from the input request", |
| 207 | + alias='gen_ai.request.embedding_dataset_id', |
| 208 | + description='Embedding dataset ID from the input request', |
205 | 209 | ) |
206 | 210 | gen_ai_request_embedding_input_type: Optional[str] = Field( |
207 | 211 | None, |
208 | | - alias="gen_ai.request.embedding_input_type", |
| 212 | + alias='gen_ai.request.embedding_input_type', |
209 | 213 | description="Embedding input type from the input request. Allowed values: [ 'search_document', 'search_query', 'classification', 'clustering']", |
210 | 214 | ) |
211 | 215 | gen_ai_request_embedding_job_name: Optional[str] = Field( |
212 | 216 | None, |
213 | | - alias="gen_ai.request.embedding_job_name", |
214 | | - description="Embedding job name from the input request", |
| 217 | + alias='gen_ai.request.embedding_job_name', |
| 218 | + description='Embedding job name from the input request', |
215 | 219 | ) |
216 | 220 | gen_ai_image_size: Optional[str] = Field( |
217 | 221 | None, |
218 | | - alias="gen_ai.image.size", |
| 222 | + alias='gen_ai.image.size', |
219 | 223 | description="Image size from the input request. Allowed values: ['256x256', '512x512', '1024x1024']", |
220 | 224 | ) |
221 | 225 | gen_ai_request_response_format: Optional[str] = Field( |
222 | 226 | None, |
223 | | - alias="gen_ai.request.response_format", |
| 227 | + alias='gen_ai.request.response_format', |
224 | 228 | description="Response format from the input request. Allowed values: ['url', 'b64_json']", |
225 | 229 | ) |
226 | | - http_max_retries: Optional[int] = Field(None, alias="http.max.retries") |
227 | | - http_timeout: Optional[int] = Field(None, alias="http.timeout") |
| 230 | + http_max_retries: Optional[int] = Field(None, alias='http.max.retries') |
| 231 | + http_timeout: Optional[int] = Field(None, alias='http.timeout') |
228 | 232 | gen_ai_cohere_rerank_query: Optional[str] = Field( |
229 | 233 | None, |
230 | | - alias="gen_ai.cohere.rerank.query", |
231 | | - description="Query from the input request for the rerank api", |
| 234 | + alias='gen_ai.cohere.rerank.query', |
| 235 | + description='Query from the input request for the rerank api', |
232 | 236 | ) |
233 | 237 | gen_ai_cohere_rerank_results: Optional[str] = Field( |
234 | 238 | None, |
235 | | - alias="gen_ai.cohere.rerank.results", |
236 | | - description="Results from the rerank api", |
| 239 | + alias='gen_ai.cohere.rerank.results', |
| 240 | + description='Results from the rerank api', |
237 | 241 | ) |
0 commit comments