Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 25 additions & 14 deletions agentops/instrumentation/openai/wrappers/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,13 +83,14 @@
# Tool calls
if "tool_calls" in msg:
tool_calls = msg["tool_calls"]
for j, tool_call in enumerate(tool_calls):
if is_openai_v1() and hasattr(tool_call, "__dict__"):
tool_call = model_as_dict(tool_call)
function = tool_call.get("function", {})
attributes[f"{prefix}.tool_calls.{j}.id"] = tool_call.get("id")
attributes[f"{prefix}.tool_calls.{j}.name"] = function.get("name")
attributes[f"{prefix}.tool_calls.{j}.arguments"] = function.get("arguments")
if tool_calls: # Check if tool_calls is not None
for j, tool_call in enumerate(tool_calls):
if is_openai_v1() and hasattr(tool_call, "__dict__"):
tool_call = model_as_dict(tool_call)
function = tool_call.get("function", {})
attributes[f"{prefix}.tool_calls.{j}.id"] = tool_call.get("id")
attributes[f"{prefix}.tool_calls.{j}.name"] = function.get("name")
attributes[f"{prefix}.tool_calls.{j}.arguments"] = function.get("arguments")

Check warning on line 93 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L86-L93

Added lines #L86 - L93 were not covered by tests

# Functions
if "functions" in kwargs:
Expand Down Expand Up @@ -121,6 +122,14 @@
response_dict = model_as_dict(return_value)
elif isinstance(return_value, dict):
response_dict = return_value
elif hasattr(return_value, "model_dump"):

Check warning on line 125 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L125

Added line #L125 was not covered by tests
# Handle Pydantic models directly
response_dict = return_value.model_dump()
elif hasattr(return_value, "__dict__"):

Check warning on line 128 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L127-L128

Added lines #L127 - L128 were not covered by tests
# Try to use model_as_dict even if it has __iter__(fallback)
response_dict = model_as_dict(return_value)

Check warning on line 130 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L130

Added line #L130 was not covered by tests

logger.debug(f"[OPENAI DEBUG] response_dict keys: {list(response_dict.keys()) if response_dict else 'empty'}")

Check warning on line 132 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L132

Added line #L132 was not covered by tests

# Basic response attributes
if "id" in response_dict:
Expand Down Expand Up @@ -174,17 +183,19 @@
# Function call
if "function_call" in message:
function_call = message["function_call"]
attributes[f"{prefix}.tool_calls.0.name"] = function_call.get("name")
attributes[f"{prefix}.tool_calls.0.arguments"] = function_call.get("arguments")
if function_call: # Check if function_call is not None
attributes[f"{prefix}.tool_calls.0.name"] = function_call.get("name")
attributes[f"{prefix}.tool_calls.0.arguments"] = function_call.get("arguments")

Check warning on line 188 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L186-L188

Added lines #L186 - L188 were not covered by tests

# Tool calls
if "tool_calls" in message:
tool_calls = message["tool_calls"]
for i, tool_call in enumerate(tool_calls):
function = tool_call.get("function", {})
attributes[f"{prefix}.tool_calls.{i}.id"] = tool_call.get("id")
attributes[f"{prefix}.tool_calls.{i}.name"] = function.get("name")
attributes[f"{prefix}.tool_calls.{i}.arguments"] = function.get("arguments")
if tool_calls: # Check if tool_calls is not None
for i, tool_call in enumerate(tool_calls):
function = tool_call.get("function", {})
attributes[f"{prefix}.tool_calls.{i}.id"] = tool_call.get("id")
attributes[f"{prefix}.tool_calls.{i}.name"] = function.get("name")
attributes[f"{prefix}.tool_calls.{i}.arguments"] = function.get("arguments")

Check warning on line 198 in agentops/instrumentation/openai/wrappers/chat.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/chat.py#L193-L198

Added lines #L193 - L198 were not covered by tests

# Prompt filter results
if "prompt_filter_results" in response_dict:
Expand Down
7 changes: 6 additions & 1 deletion agentops/instrumentation/openai/wrappers/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,12 @@
response_dict = model_as_dict(return_value)
elif isinstance(return_value, dict):
response_dict = return_value

elif hasattr(return_value, "model_dump"):

Check warning on line 65 in agentops/instrumentation/openai/wrappers/embeddings.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/embeddings.py#L65

Added line #L65 was not covered by tests
# Handle Pydantic models directly
response_dict = return_value.model_dump()
elif hasattr(return_value, "__dict__"):

Check warning on line 68 in agentops/instrumentation/openai/wrappers/embeddings.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/embeddings.py#L67-L68

Added lines #L67 - L68 were not covered by tests
# Try to use model_as_dict even if it has __iter__
response_dict = model_as_dict(return_value)

Check warning on line 70 in agentops/instrumentation/openai/wrappers/embeddings.py

View check run for this annotation

Codecov / codecov/patch

agentops/instrumentation/openai/wrappers/embeddings.py#L70

Added line #L70 was not covered by tests
# Basic response attributes
if "model" in response_dict:
attributes[SpanAttributes.LLM_RESPONSE_MODEL] = response_dict["model"]
Expand Down
Loading