Skip to content

Commit 01d1617

Browse files
committed
openai: Remove reduntant unit tests for raw responses and add explicit parsing to existing tests
1 parent 2cbf2ae commit 01d1617

File tree

2 files changed

+4
-241
lines changed

2 files changed

+4
-241
lines changed

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/cassettes/test_chat_stream_with_raw_response_parsed.yaml

Lines changed: 0 additions & 116 deletions
This file was deleted.

instrumentation/elastic-opentelemetry-instrumentation-openai/tests/test_chat_completions.py

Lines changed: 4 additions & 125 deletions
Original file line numberDiff line numberDiff line change
@@ -1171,65 +1171,6 @@ def test_chat_stream_with_raw_response(default_openai_env, trace_exporter, metri
11711171
}
11721172
]
11731173

1174-
chat_completion = client.chat.completions.with_raw_response.create(
1175-
model=TEST_CHAT_MODEL, messages=messages, stream=True
1176-
)
1177-
1178-
chunks = [chunk.choices[0].delta.content or "" for chunk in chat_completion if chunk.choices]
1179-
assert "".join(chunks) == "Atlantic Ocean"
1180-
1181-
spans = trace_exporter.get_finished_spans()
1182-
assert len(spans) == 1
1183-
1184-
span = spans[0]
1185-
assert span.name == f"chat {TEST_CHAT_MODEL}"
1186-
assert span.kind == SpanKind.CLIENT
1187-
assert span.status.status_code == StatusCode.UNSET
1188-
1189-
address, port = address_and_port(client)
1190-
assert dict(span.attributes) == {
1191-
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
1192-
GEN_AI_OPERATION_NAME: "chat",
1193-
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
1194-
GEN_AI_SYSTEM: "openai",
1195-
GEN_AI_RESPONSE_ID: "chatcmpl-BDDnEHqYLBd36X8hHNTQfPKx4KMJT",
1196-
GEN_AI_RESPONSE_MODEL: TEST_CHAT_RESPONSE_MODEL,
1197-
GEN_AI_RESPONSE_FINISH_REASONS: ("stop",),
1198-
SERVER_ADDRESS: address,
1199-
SERVER_PORT: port,
1200-
}
1201-
1202-
logs = logs_exporter.get_finished_logs()
1203-
assert len(logs) == 2
1204-
log_records = logrecords_from_logs(logs)
1205-
user_message, choice = log_records
1206-
assert dict(user_message.attributes) == {"gen_ai.system": "openai", "event.name": "gen_ai.user.message"}
1207-
assert dict(user_message.body) == {}
1208-
1209-
assert_stop_log_record(choice)
1210-
1211-
(operation_duration_metric,) = get_sorted_metrics(metrics_reader)
1212-
attributes = {
1213-
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
1214-
GEN_AI_RESPONSE_MODEL: TEST_CHAT_RESPONSE_MODEL,
1215-
}
1216-
assert_operation_duration_metric(
1217-
client, "chat", operation_duration_metric, attributes=attributes, min_data_point=0.006761051714420319
1218-
)
1219-
1220-
1221-
@pytest.mark.skipif(OPENAI_VERSION < (1, 8, 0), reason="LegacyAPIResponse available")
1222-
@pytest.mark.vcr()
1223-
def test_chat_stream_with_raw_response_parsed(default_openai_env, trace_exporter, metrics_reader, logs_exporter):
1224-
client = openai.OpenAI()
1225-
1226-
messages = [
1227-
{
1228-
"role": "user",
1229-
"content": TEST_CHAT_INPUT,
1230-
}
1231-
]
1232-
12331174
raw_response = client.chat.completions.with_raw_response.create(
12341175
model=TEST_CHAT_MODEL, messages=messages, stream=True
12351176
)
@@ -1238,7 +1179,7 @@ def test_chat_stream_with_raw_response_parsed(default_openai_env, trace_exporter
12381179
chat_completion = raw_response.parse()
12391180

12401181
chunks = [chunk.choices[0].delta.content or "" for chunk in chat_completion if chunk.choices]
1241-
assert "".join(chunks) == "South Atlantic Ocean."
1182+
assert "".join(chunks) == "Atlantic Ocean"
12421183

12431184
spans = trace_exporter.get_finished_spans()
12441185
assert len(spans) == 1
@@ -1254,7 +1195,7 @@ def test_chat_stream_with_raw_response_parsed(default_openai_env, trace_exporter
12541195
GEN_AI_OPERATION_NAME: "chat",
12551196
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
12561197
GEN_AI_SYSTEM: "openai",
1257-
GEN_AI_RESPONSE_ID: "chatcmpl-BRzdBETW1h4E9Vy0Se8CSvrYEXMtC",
1198+
GEN_AI_RESPONSE_ID: "chatcmpl-BDDnEHqYLBd36X8hHNTQfPKx4KMJT",
12581199
GEN_AI_RESPONSE_MODEL: TEST_CHAT_RESPONSE_MODEL,
12591200
GEN_AI_RESPONSE_FINISH_REASONS: ("stop",),
12601201
SERVER_ADDRESS: address,
@@ -2288,68 +2229,6 @@ async def test_chat_async_stream_with_raw_response(default_openai_env, trace_exp
22882229
}
22892230
]
22902231

2291-
chat_completion = await client.chat.completions.with_raw_response.create(
2292-
model=TEST_CHAT_MODEL, messages=messages, stream=True
2293-
)
2294-
2295-
chunks = [chunk.choices[0].delta.content or "" async for chunk in chat_completion if chunk.choices]
2296-
assert "".join(chunks) == "Atlantic Ocean"
2297-
2298-
spans = trace_exporter.get_finished_spans()
2299-
assert len(spans) == 1
2300-
2301-
span = spans[0]
2302-
assert span.name == f"chat {TEST_CHAT_MODEL}"
2303-
assert span.kind == SpanKind.CLIENT
2304-
assert span.status.status_code == StatusCode.UNSET
2305-
2306-
address, port = address_and_port(client)
2307-
assert dict(span.attributes) == {
2308-
GEN_AI_OPENAI_RESPONSE_SERVICE_TIER: "default",
2309-
GEN_AI_OPERATION_NAME: "chat",
2310-
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
2311-
GEN_AI_SYSTEM: "openai",
2312-
GEN_AI_RESPONSE_ID: "chatcmpl-BDDnEHqYLBd36X8hHNTQfPKx4KMJT",
2313-
GEN_AI_RESPONSE_MODEL: TEST_CHAT_RESPONSE_MODEL,
2314-
GEN_AI_RESPONSE_FINISH_REASONS: ("stop",),
2315-
SERVER_ADDRESS: address,
2316-
SERVER_PORT: port,
2317-
}
2318-
2319-
logs = logs_exporter.get_finished_logs()
2320-
assert len(logs) == 2
2321-
log_records = logrecords_from_logs(logs)
2322-
user_message, choice = log_records
2323-
assert dict(user_message.attributes) == {"gen_ai.system": "openai", "event.name": "gen_ai.user.message"}
2324-
assert dict(user_message.body) == {}
2325-
2326-
assert_stop_log_record(choice)
2327-
2328-
(operation_duration_metric,) = get_sorted_metrics(metrics_reader)
2329-
attributes = {
2330-
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
2331-
GEN_AI_RESPONSE_MODEL: TEST_CHAT_RESPONSE_MODEL,
2332-
}
2333-
assert_operation_duration_metric(
2334-
client, "chat", operation_duration_metric, attributes=attributes, min_data_point=0.006761051714420319
2335-
)
2336-
2337-
2338-
@pytest.mark.skipif(OPENAI_VERSION < (1, 8, 0), reason="LegacyAPIResponse available")
2339-
@pytest.mark.vcr()
2340-
@pytest.mark.asyncio
2341-
async def test_chat_async_stream_with_raw_response_parsed(
2342-
default_openai_env, trace_exporter, metrics_reader, logs_exporter
2343-
):
2344-
client = openai.AsyncOpenAI()
2345-
2346-
messages = [
2347-
{
2348-
"role": "user",
2349-
"content": TEST_CHAT_INPUT,
2350-
}
2351-
]
2352-
23532232
raw_response = await client.chat.completions.with_raw_response.create(
23542233
model=TEST_CHAT_MODEL, messages=messages, stream=True
23552234
)
@@ -2358,7 +2237,7 @@ async def test_chat_async_stream_with_raw_response_parsed(
23582237
chat_completion = raw_response.parse()
23592238

23602239
chunks = [chunk.choices[0].delta.content or "" async for chunk in chat_completion if chunk.choices]
2361-
assert "".join(chunks) == "South Atlantic Ocean."
2240+
assert "".join(chunks) == "Atlantic Ocean"
23622241

23632242
spans = trace_exporter.get_finished_spans()
23642243
assert len(spans) == 1
@@ -2374,7 +2253,7 @@ async def test_chat_async_stream_with_raw_response_parsed(
23742253
GEN_AI_OPERATION_NAME: "chat",
23752254
GEN_AI_REQUEST_MODEL: TEST_CHAT_MODEL,
23762255
GEN_AI_SYSTEM: "openai",
2377-
GEN_AI_RESPONSE_ID: "chatcmpl-BRzdBETW1h4E9Vy0Se8CSvrYEXMtC",
2256+
GEN_AI_RESPONSE_ID: "chatcmpl-BDDnEHqYLBd36X8hHNTQfPKx4KMJT",
23782257
GEN_AI_RESPONSE_MODEL: TEST_CHAT_RESPONSE_MODEL,
23792258
GEN_AI_RESPONSE_FINISH_REASONS: ("stop",),
23802259
SERVER_ADDRESS: address,

0 commit comments

Comments
 (0)