1717 )
1818
1919# up top to prevent circular import due to integration import
20- DEFAULT_MAX_VALUE_LENGTH = 1024
20+ # This is more or less an arbitrary large-ish value for now, so that we allow
21+ # pretty long strings (like LLM prompts), but still have *some* upper limit
22+ # until we verify that removing the trimming completely is safe.
23+ DEFAULT_MAX_VALUE_LENGTH = 100_000
2124
2225DEFAULT_MAX_STACK_FRAMES = 100
2326DEFAULT_ADD_FULL_STACK = False
@@ -167,6 +170,7 @@ class SPANDATA:
167170 AI_PIPELINE_NAME = "ai.pipeline.name"
168171 """
169172 Name of the AI pipeline or chain being executed.
173+ DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
170174 Example: "qa-pipeline"
171175 """
172176
@@ -233,6 +237,7 @@ class SPANDATA:
233237 AI_STREAMING = "ai.streaming"
234238 """
235239 Whether or not the AI model call's response was streamed back asynchronously
240+ DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
236241 Example: true
237242 """
238243
@@ -382,6 +387,24 @@ class SPANDATA:
382387 Example: "chat"
383388 """
384389
390+ GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name"
391+ """
392+ Name of the AI pipeline or chain being executed.
393+ Example: "qa-pipeline"
394+ """
395+
396+ GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
397+ """
398+ Exact model identifier used to generate the response
399+ Example: gpt-4o-mini-2024-07-18
400+ """
401+
402+ GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming"
403+ """
404+ Whether or not the AI model call's response was streamed back asynchronously
405+ Example: true
406+ """
407+
385408 GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
386409 """
387410 The model's response text messages.
@@ -421,7 +444,7 @@ class SPANDATA:
421444 GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
422445 """
423446 The model identifier being used for the request.
424- Example: "gpt-4-turbo-preview "
447+ Example: "gpt-4-turbo"
425448 """
426449
427450 GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
@@ -659,9 +682,11 @@ class OP:
659682 FUNCTION_AWS = "function.aws"
660683 FUNCTION_GCP = "function.gcp"
661684 GEN_AI_CHAT = "gen_ai.chat"
685+ GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
662686 GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
663687 GEN_AI_HANDOFF = "gen_ai.handoff"
664688 GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
689+ GEN_AI_RESPONSES = "gen_ai.responses"
665690 GRAPHQL_EXECUTE = "graphql.execute"
666691 GRAPHQL_MUTATION = "graphql.mutation"
667692 GRAPHQL_PARSE = "graphql.parse"
@@ -686,8 +711,6 @@ class OP:
686711 MIDDLEWARE_STARLITE = "middleware.starlite"
687712 MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
688713 MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
689- OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
690- OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
691714 HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
692715 "ai.chat_completions.create.huggingface_hub"
693716 )
0 commit comments