Skip to content

Commit 9e7db44

Browse files
committed
delete redundant comments
1 parent 4bcffb8 commit 9e7db44

File tree

2 files changed

+36
-54
lines changed

2 files changed

+36
-54
lines changed

packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/responses_wrappers.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -435,14 +435,12 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
435435
try:
436436
response = wrapped(*args, **kwargs)
437437
if isinstance(response, Stream):
438-
# Create a span for the streaming response
439438
span = tracer.start_span(
440439
SPAN_NAME,
441440
kind=SpanKind.CLIENT,
442441
start_time=start_time,
443442
)
444443

445-
# Wrap the stream with ResponseStream to capture telemetry
446444
return ResponseStream(
447445
span=span,
448446
response=response,
@@ -799,11 +797,9 @@ def __init__(
799797
response_reasoning_effort=None,
800798
)
801799

802-
# Accumulated response data
803800
self._complete_response_data = None
804801
self._output_text = ""
805802

806-
# Cleanup state tracking to prevent duplicate operations
807803
self._cleanup_completed = False
808804
self._cleanup_lock = threading.Lock()
809805

@@ -862,22 +858,17 @@ async def __anext__(self):
862858

863859
def _process_chunk(self, chunk):
864860
"""Process a streaming chunk"""
865-
# Handle response events based on type
866861
if hasattr(chunk, "type"):
867-
# Handle text delta events
868862
if chunk.type == "response.output_text.delta":
869863
if hasattr(chunk, "delta") and chunk.delta:
870864
self._output_text += chunk.delta
871-
# Handle completion event
872865
elif chunk.type == "response.completed" and hasattr(chunk, "response"):
873866
self._complete_response_data = chunk.response
874867

875-
# Fallback: Extract text delta from chunk if it has a delta attribute with text
876868
if hasattr(chunk, "delta"):
877869
if hasattr(chunk.delta, "text") and chunk.delta.text:
878870
self._output_text += chunk.delta.text
879871

880-
# Store the complete response when we get it
881872
if hasattr(chunk, "response") and chunk.response:
882873
self._complete_response_data = chunk.response
883874

@@ -892,32 +883,26 @@ def _process_complete_response(self):
892883
if self._complete_response_data:
893884
parsed_response = parse_response(self._complete_response_data)
894885

895-
# Update traced data with response information
896886
self._traced_data.response_id = parsed_response.id
897887
self._traced_data.response_model = parsed_response.model
898888
self._traced_data.output_text = self._output_text
899889

900-
# Update usage if available
901890
if parsed_response.usage:
902891
self._traced_data.usage = parsed_response.usage
903892

904-
# Update output blocks
905893
if parsed_response.output:
906894
self._traced_data.output_blocks = {
907895
block.id: block for block in parsed_response.output
908896
}
909897

910-
# Store in global responses dict
911898
responses[parsed_response.id] = self._traced_data
912899

913-
# Set span attributes
914900
set_data_attributes(self._traced_data, self._span)
915901
self._span.set_status(StatusCode.OK)
916902
self._span.end()
917903
self._cleanup_completed = True
918904

919905
except Exception as e:
920-
# Log the error but don't fail
921906
if self._span and self._span.is_recording():
922907
self._span.set_attribute(ERROR_TYPE, e.__class__.__name__)
923908
self._span.set_status(StatusCode.ERROR, str(e))
@@ -947,7 +932,6 @@ def _ensure_cleanup(self):
947932
return
948933

949934
try:
950-
# Set whatever data we have so far
951935
if self._span and self._span.is_recording():
952936
set_data_attributes(self._traced_data, self._span)
953937
self._span.set_status(StatusCode.OK)
@@ -956,5 +940,4 @@ def _ensure_cleanup(self):
956940
self._cleanup_completed = True
957941

958942
except Exception:
959-
# Final fallback - just mark as completed
960943
self._cleanup_completed = True

packages/opentelemetry-instrumentation-openai/tests/traces/test_responses.py

Lines changed: 36 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
import pytest
32

43
from openai import OpenAI
@@ -7,7 +6,9 @@
76

87

98
@pytest.mark.vcr
10-
def test_responses(instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI):
9+
def test_responses(
10+
instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI
11+
):
1112
_ = openai_client.responses.create(
1213
model="gpt-4.1-nano",
1314
input="What is the capital of France?",
@@ -26,7 +27,9 @@ def test_responses(instrument_legacy, span_exporter: InMemorySpanExporter, opena
2627

2728

2829
@pytest.mark.vcr
29-
def test_responses_with_input_history(instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI):
30+
def test_responses_with_input_history(
31+
instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI
32+
):
3033
user_message = "Come up with an adjective in English. Respond with just one word."
3134
first_response = openai_client.responses.create(
3235
model="gpt-4.1-nano",
@@ -79,7 +82,9 @@ def test_responses_with_input_history(instrument_legacy, span_exporter: InMemory
7982

8083

8184
@pytest.mark.vcr
82-
def test_responses_tool_calls(instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI):
85+
def test_responses_tool_calls(
86+
instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI
87+
):
8388
tools = [
8489
{
8590
"type": "function",
@@ -90,11 +95,11 @@ def test_responses_tool_calls(instrument_legacy, span_exporter: InMemorySpanExpo
9095
"properties": {
9196
"location": {
9297
"type": "string",
93-
"description": "The city and state, e.g. San Francisco, CA"
98+
"description": "The city and state, e.g. San Francisco, CA",
9499
}
95100
},
96-
"required": ["location"]
97-
}
101+
"required": ["location"],
102+
},
98103
}
99104
]
100105
openai_client.responses.create(
@@ -103,11 +108,11 @@ def test_responses_tool_calls(instrument_legacy, span_exporter: InMemorySpanExpo
103108
{
104109
"type": "message",
105110
"role": "user",
106-
"content": "What's the weather in London?"
111+
"content": "What's the weather in London?",
107112
}
108113
],
109114
tools=tools,
110-
tool_choice="auto"
115+
tool_choice="auto",
111116
)
112117

113118
spans = span_exporter.get_finished_spans()
@@ -145,16 +150,17 @@ def test_responses_tool_calls(instrument_legacy, span_exporter: InMemorySpanExpo
145150

146151

147152
@pytest.mark.vcr
148-
@pytest.mark.skipif(not is_reasoning_supported(),
149-
reason="Reasoning is not supported in older OpenAI library versions")
150-
def test_responses_reasoning(instrument_legacy, span_exporter: InMemorySpanExporter,
151-
openai_client: OpenAI):
153+
@pytest.mark.skipif(
154+
not is_reasoning_supported(),
155+
reason="Reasoning is not supported in older OpenAI library versions",
156+
)
157+
def test_responses_reasoning(
158+
instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI
159+
):
152160
openai_client.responses.create(
153161
model="gpt-5-nano",
154162
input="Count r's in strawberry",
155-
reasoning={
156-
"effort": "low", "summary": None
157-
},
163+
reasoning={"effort": "low", "summary": None},
158164
)
159165

160166
spans = span_exporter.get_finished_spans()
@@ -172,18 +178,18 @@ def test_responses_reasoning(instrument_legacy, span_exporter: InMemorySpanExpor
172178

173179

174180
@pytest.mark.vcr
175-
@pytest.mark.skipif(not is_reasoning_supported(),
176-
reason="Reasoning is not supported in older OpenAI library versions")
177-
def test_responses_reasoning_dict_issue(instrument_legacy, span_exporter: InMemorySpanExporter,
178-
openai_client: OpenAI):
181+
@pytest.mark.skipif(
182+
not is_reasoning_supported(),
183+
reason="Reasoning is not supported in older OpenAI library versions",
184+
)
185+
def test_responses_reasoning_dict_issue(
186+
instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI
187+
):
179188
"""Test for issue #3350 - reasoning dict causing invalid type warning"""
180189
openai_client.responses.create(
181190
model="gpt-5-nano",
182191
input="Explain why the sky is blue",
183-
reasoning={
184-
"effort": "medium",
185-
"summary": "auto"
186-
},
192+
reasoning={"effort": "medium", "summary": "auto"},
187193
)
188194

189195
spans = span_exporter.get_finished_spans()
@@ -205,7 +211,9 @@ def test_responses_reasoning_dict_issue(instrument_legacy, span_exporter: InMemo
205211

206212

207213
@pytest.mark.vcr
208-
def test_responses_streaming(instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI):
214+
def test_responses_streaming(
215+
instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI
216+
):
209217
"""Test for streaming responses.create() - reproduces customer issue"""
210218
stream = openai_client.responses.create(
211219
model="gpt-4.1-nano",
@@ -216,19 +224,13 @@ def test_responses_streaming(instrument_legacy, span_exporter: InMemorySpanExpor
216224
# Consume the stream
217225
full_text = ""
218226
for item in stream:
219-
# Debug: print the structure of the item
220-
# print(f"Item type: {type(item)}, Item: {item}")
221-
# The response API streaming events have a different structure
222-
# They have type="response.output_text.delta" with a "delta" field
223227
if hasattr(item, "type") and item.type == "response.output_text.delta":
224228
if hasattr(item, "delta") and item.delta:
225229
full_text += item.delta
226-
# Also handle if there's a delta attribute with text
227230
elif hasattr(item, "delta") and item.delta:
228231
if hasattr(item.delta, "text") and item.delta.text:
229232
full_text += item.delta.text
230233

231-
# Check that spans were created
232234
spans = span_exporter.get_finished_spans()
233235
assert len(spans) == 1, f"Expected 1 span but got {len(spans)}"
234236

@@ -241,28 +243,25 @@ def test_responses_streaming(instrument_legacy, span_exporter: InMemorySpanExpor
241243

242244
@pytest.mark.vcr
243245
@pytest.mark.asyncio
244-
async def test_responses_streaming_async(instrument_legacy, span_exporter: InMemorySpanExporter, async_openai_client):
246+
async def test_responses_streaming_async(
247+
instrument_legacy, span_exporter: InMemorySpanExporter, async_openai_client
248+
):
245249
"""Test for async streaming responses.create() - reproduces customer issue"""
246250
stream = await async_openai_client.responses.create(
247251
model="gpt-4.1-nano",
248252
input="Tell me a three sentence bedtime story about a unicorn.",
249253
stream=True,
250254
)
251255

252-
# Consume the stream
253256
full_text = ""
254257
async for item in stream:
255-
# The response API streaming events have a different structure
256-
# They have type="response.output_text.delta" with a "delta" field
257258
if hasattr(item, "type") and item.type == "response.output_text.delta":
258259
if hasattr(item, "delta") and item.delta:
259260
full_text += item.delta
260-
# Also handle if there's a delta attribute with text
261261
elif hasattr(item, "delta") and item.delta:
262262
if hasattr(item.delta, "text") and item.delta.text:
263263
full_text += item.delta.text
264264

265-
# Check that spans were created
266265
spans = span_exporter.get_finished_spans()
267266
assert len(spans) == 1, f"Expected 1 span but got {len(spans)}"
268267

0 commit comments

Comments
 (0)