Skip to content

Commit 174198d

Browse files
committed
comment clean up
1 parent bbcb2a0 commit 174198d

File tree

2 files changed

+2
-19
lines changed

2 files changed

+2
-19
lines changed

sentry_sdk/integrations/google_genai/streaming.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def accumulate_streaming_response(chunks):
7575
total_reasoning_tokens += extracted_usage_data["output_tokens_reasoning"]
7676
total_tokens += extracted_usage_data["total_tokens"]
7777

78-
# Create a synthetic response object with accumulated data
7978
accumulated_response = AccumulatedResponse(
8079
text="".join(accumulated_text),
8180
finish_reasons=finish_reasons,
@@ -97,7 +96,6 @@ def accumulate_streaming_response(chunks):
9796
def set_span_data_for_streaming_response(span, integration, accumulated_response):
9897
# type: (Span, Any, AccumulatedResponse) -> None
9998
"""Set span data for accumulated streaming response."""
100-
# Set response text
10199
if (
102100
should_send_default_pii()
103101
and integration.include_prompts
@@ -108,22 +106,19 @@ def set_span_data_for_streaming_response(span, integration, accumulated_response
108106
safe_serialize([accumulated_response["text"]]),
109107
)
110108

111-
# Set finish reasons
112109
if accumulated_response.get("finish_reasons"):
113110
set_data_normalized(
114111
span,
115112
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS,
116113
accumulated_response["finish_reasons"],
117114
)
118115

119-
# Set tool calls
120116
if accumulated_response.get("tool_calls"):
121117
span.set_data(
122118
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
123119
safe_serialize(accumulated_response["tool_calls"]),
124120
)
125121

126-
# Set response ID and model
127122
if accumulated_response.get("id"):
128123
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, accumulated_response["id"])
129124
if accumulated_response.get("model"):
@@ -141,7 +136,6 @@ def set_span_data_for_streaming_response(span, integration, accumulated_response
141136
accumulated_response["usage_metadata"]["input_tokens_cached"],
142137
)
143138

144-
# Output tokens already include reasoning tokens from extract_usage_data
145139
if accumulated_response["usage_metadata"]["output_tokens"]:
146140
span.set_data(
147141
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS,
@@ -154,7 +148,6 @@ def set_span_data_for_streaming_response(span, integration, accumulated_response
154148
accumulated_response["usage_metadata"]["output_tokens_reasoning"],
155149
)
156150

157-
# Set total token count if available
158151
if accumulated_response["usage_metadata"]["total_tokens"]:
159152
span.set_data(
160153
SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS,

sentry_sdk/integrations/google_genai/utils.py

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -82,14 +82,14 @@ def extract_usage_data(response):
8282

8383
candidates_tokens = usage.get("candidates_token_count", 0) or 0
8484
# python-genai reports output and reasoning tokens separately
85+
# reasoning should be sub-category of output tokens
8586
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
8687

8788
total_tokens = usage.get("total_token_count", 0) or 0
8889
usage_data["total_tokens"] = total_tokens
8990

9091
return usage_data
9192

92-
# Handle response object
9393
if not hasattr(response, "usage_metadata"):
9494
return usage_data
9595

@@ -209,7 +209,7 @@ def _format_tools_for_span(tools):
209209
# Check for predefined tool attributes - each of these tools
210210
# is an attribute of the tool object, by default set to None
211211
for attr_name, description in TOOL_ATTRIBUTES_MAP.items():
212-
if hasattr(tool, attr_name) and getattr(tool, attr_name) is not None:
212+
if getattr(tool, attr_name, None):
213213
formatted_tools.append(
214214
{
215215
"name": attr_name,
@@ -434,11 +434,9 @@ def set_span_data_for_request(span, integration, model, contents, kwargs):
434434
span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
435435
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
436436

437-
# Set streaming flag
438437
if kwargs.get("stream", False):
439438
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
440439

441-
# Set model configuration parameters
442440
config = kwargs.get("config")
443441

444442
if config is None:
@@ -506,35 +504,29 @@ def set_span_data_for_response(span, integration, response):
506504
if not response:
507505
return
508506

509-
# Extract and set response text
510507
if should_send_default_pii() and integration.include_prompts:
511508
response_texts = _extract_response_text(response)
512509
if response_texts:
513510
# Format as JSON string array as per documentation
514511
span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(response_texts))
515512

516-
# Extract and set tool calls
517513
tool_calls = extract_tool_calls(response)
518514
if tool_calls:
519515
# Tool calls should be JSON serialized
520516
span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls))
521517

522-
# Extract and set finish reasons
523518
finish_reasons = extract_finish_reasons(response)
524519
if finish_reasons:
525520
set_data_normalized(
526521
span, SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons
527522
)
528523

529-
# Set response ID if available
530524
if getattr(response, "response_id", None):
531525
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response.response_id)
532526

533-
# Set response model if available
534527
if getattr(response, "model_version", None):
535528
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_version)
536529

537-
# Set token usage if available
538530
usage_data = extract_usage_data(response)
539531

540532
if usage_data["input_tokens"]:
@@ -555,7 +547,6 @@ def set_span_data_for_response(span, integration, response):
555547
usage_data["output_tokens_reasoning"],
556548
)
557549

558-
# Set total token count if available
559550
if usage_data["total_tokens"]:
560551
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage_data["total_tokens"])
561552

@@ -567,7 +558,6 @@ def prepare_generate_content_args(args, kwargs):
567558
contents = args[1] if len(args) > 1 else kwargs.get("contents")
568559
model_name = get_model_name(model)
569560

570-
# Wrap config with tools
571561
config = kwargs.get("config")
572562
wrapped_config = wrapped_config_with_tools(config)
573563
if wrapped_config is not config:

0 commit comments

Comments
 (0)