|
19 | 19 | from ddtrace.llmobs._constants import INPUT_MESSAGES |
20 | 20 | from ddtrace.llmobs._constants import INPUT_TOKENS_METRIC_KEY |
21 | 21 | from ddtrace.llmobs._constants import INPUT_VALUE |
22 | | -from ddtrace.llmobs._constants import LITELLM_ROUTER_INSTANCE_KEY |
23 | 22 | from ddtrace.llmobs._constants import METADATA |
24 | 23 | from ddtrace.llmobs._constants import OAI_HANDOFF_TOOL_ARG |
25 | 24 | from ddtrace.llmobs._constants import OUTPUT_MESSAGES |
|
39 | 38 |
|
40 | 39 | logger = get_logger(__name__) |
41 | 40 |
|
42 | | -OPENAI_SKIPPED_COMPLETION_TAGS = ( |
43 | | - "model", |
44 | | - "prompt", |
45 | | - "api_key", |
46 | | - "user_api_key", |
47 | | - "user_api_key_hash", |
48 | | - LITELLM_ROUTER_INSTANCE_KEY, |
| 41 | +COMMON_METADATA_KEYS = ( |
| 42 | + "stream", |
| 43 | + "temperature", |
| 44 | + "top_p", |
| 45 | + "user", |
49 | 46 | ) |
50 | | -OPENAI_SKIPPED_CHAT_TAGS = ( |
51 | | - "model", |
52 | | - "messages", |
| 47 | +OPENAI_METADATA_RESPONSE_KEYS = ( |
| 48 | + "background", |
| 49 | + "include", |
| 50 | + "max_output_tokens", |
| 51 | + "max_tool_calls", |
| 52 | + "parallel_tool_calls", |
| 53 | + "previous_response_id", |
| 54 | + "prompt", |
| 55 | + "reasoning", |
| 56 | + "service_tier", |
| 57 | + "store", |
| 58 | + "text", |
| 59 | + "tool_choice", |
53 | 60 | "tools", |
54 | | - "functions", |
55 | | - "api_key", |
56 | | - "user_api_key", |
57 | | - "user_api_key_hash", |
58 | | - LITELLM_ROUTER_INSTANCE_KEY, |
| 61 | + "top_logprobs", |
| 62 | + "truncation", |
| 63 | +) |
| 64 | +OPENAI_METADATA_CHAT_KEYS = ( |
| 65 | + "audio", |
| 66 | + "frequency_penalty", |
| 67 | + "function_call", |
| 68 | + "logit_bias", |
| 69 | + "logprobs", |
| 70 | + "max_completion_tokens", |
| 71 | + "max_tokens", |
| 72 | + "modalities", |
| 73 | + "n", |
| 74 | + "parallel_tool_calls", |
| 75 | + "prediction", |
| 76 | + "presence_penalty", |
| 77 | + "reasoning_effort", |
| 78 | + "response_format", |
| 79 | + "seed", |
| 80 | + "service_tier", |
| 81 | + "stop", |
| 82 | + "store", |
| 83 | + "stream_options", |
| 84 | + "tool_choice", |
| 85 | + "top_logprobs", |
| 86 | + "web_search_options", |
| 87 | +) |
| 88 | +OPENAI_METADATA_COMPLETION_KEYS = ( |
| 89 | + "best_of", |
| 90 | + "echo", |
| 91 | + "frequency_penalty", |
| 92 | + "logit_bias", |
| 93 | + "logprobs", |
| 94 | + "max_tokens", |
| 95 | + "n", |
| 96 | + "presence_penalty", |
| 97 | + "seed", |
| 98 | + "stop", |
| 99 | + "stream_options", |
| 100 | + "suffix", |
59 | 101 | ) |
60 | 102 |
|
61 | 103 | LITELLM_METADATA_CHAT_KEYS = ( |
62 | 104 | "timeout", |
63 | | - "temperature", |
64 | | - "top_p", |
65 | 105 | "n", |
66 | | - "stream", |
67 | 106 | "stream_options", |
68 | 107 | "stop", |
69 | 108 | "max_completion_tokens", |
|
73 | 112 | "presence_penalty", |
74 | 113 | "frequency_penalty", |
75 | 114 | "logit_bias", |
76 | | - "user", |
77 | 115 | "response_format", |
78 | 116 | "seed", |
79 | 117 | "tool_choice", |
|
97 | 135 | "n", |
98 | 136 | "presence_penalty", |
99 | 137 | "stop", |
100 | | - "stream", |
101 | 138 | "stream_options", |
102 | 139 | "suffix", |
103 | | - "temperature", |
104 | | - "top_p", |
105 | | - "user", |
106 | 140 | "api_base", |
107 | 141 | "api_version", |
108 | 142 | "model_list", |
@@ -471,12 +505,12 @@ def get_metadata_from_kwargs( |
471 | 505 | kwargs: Dict[str, Any], integration_name: str = "openai", operation: str = "chat" |
472 | 506 | ) -> Dict[str, Any]: |
473 | 507 | metadata = {} |
| 508 | + keys_to_include: Tuple[str, ...] = COMMON_METADATA_KEYS |
474 | 509 | if integration_name == "openai": |
475 | | - keys_to_skip = OPENAI_SKIPPED_CHAT_TAGS if operation == "chat" else OPENAI_SKIPPED_COMPLETION_TAGS |
476 | | - metadata = {k: v for k, v in kwargs.items() if k not in keys_to_skip} |
| 510 | + keys_to_include += OPENAI_METADATA_CHAT_KEYS if operation == "chat" else OPENAI_METADATA_COMPLETION_KEYS |
477 | 511 | elif integration_name == "litellm": |
478 | | - keys_to_include = LITELLM_METADATA_CHAT_KEYS if operation == "chat" else LITELLM_METADATA_COMPLETION_KEYS |
479 | | - metadata = {k: v for k, v in kwargs.items() if k in keys_to_include} |
| 512 | + keys_to_include += LITELLM_METADATA_CHAT_KEYS if operation == "chat" else LITELLM_METADATA_COMPLETION_KEYS |
| 513 | + metadata = {k: v for k, v in kwargs.items() if k in keys_to_include} |
480 | 514 | return metadata |
481 | 515 |
|
482 | 516 |
|
@@ -621,7 +655,7 @@ def openai_get_metadata_from_response( |
621 | 655 | metadata = {} |
622 | 656 |
|
623 | 657 | if kwargs: |
624 | | - metadata.update({k: v for k, v in kwargs.items() if k not in ("model", "input", "instructions")}) |
| 658 | + metadata.update({k: v for k, v in kwargs.items() if k in OPENAI_METADATA_RESPONSE_KEYS + COMMON_METADATA_KEYS}) |
625 | 659 |
|
626 | 660 | if not response: |
627 | 661 | return metadata |
|
0 commit comments