@@ -1218,6 +1218,68 @@ def test_chat_stream_with_raw_response(default_openai_env, trace_exporter, metri
12181218 )
12191219
12201220
1221+ @pytest .mark .skipif (OPENAI_VERSION < (1 , 8 , 0 ), reason = "LegacyAPIResponse available" )
1222+ @pytest .mark .vcr ()
1223+ def test_chat_stream_with_raw_response_parsed (default_openai_env , trace_exporter , metrics_reader , logs_exporter ):
1224+ client = openai .OpenAI ()
1225+
1226+ messages = [
1227+ {
1228+ "role" : "user" ,
1229+ "content" : TEST_CHAT_INPUT ,
1230+ }
1231+ ]
1232+
1233+ raw_response = client .chat .completions .with_raw_response .create (
1234+ model = TEST_CHAT_MODEL , messages = messages , stream = True
1235+ )
1236+
1237+ # Explicit parse of the raw response
1238+ chat_completion = raw_response .parse ()
1239+
1240+ chunks = [chunk .choices [0 ].delta .content or "" for chunk in chat_completion if chunk .choices ]
1241+ assert "" .join (chunks ) == "South Atlantic Ocean."
1242+
1243+ spans = trace_exporter .get_finished_spans ()
1244+ assert len (spans ) == 1
1245+
1246+ span = spans [0 ]
1247+ assert span .name == f"chat { TEST_CHAT_MODEL } "
1248+ assert span .kind == SpanKind .CLIENT
1249+ assert span .status .status_code == StatusCode .UNSET
1250+
1251+ address , port = address_and_port (client )
1252+ assert dict (span .attributes ) == {
1253+ GEN_AI_OPENAI_RESPONSE_SERVICE_TIER : "default" ,
1254+ GEN_AI_OPERATION_NAME : "chat" ,
1255+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
1256+ GEN_AI_SYSTEM : "openai" ,
1257+ GEN_AI_RESPONSE_ID : "chatcmpl-BRzdBETW1h4E9Vy0Se8CSvrYEXMtC" ,
1258+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
1259+ GEN_AI_RESPONSE_FINISH_REASONS : ("stop" ,),
1260+ SERVER_ADDRESS : address ,
1261+ SERVER_PORT : port ,
1262+ }
1263+
1264+ logs = logs_exporter .get_finished_logs ()
1265+ assert len (logs ) == 2
1266+ log_records = logrecords_from_logs (logs )
1267+ user_message , choice = log_records
1268+ assert dict (user_message .attributes ) == {"gen_ai.system" : "openai" , "event.name" : "gen_ai.user.message" }
1269+ assert dict (user_message .body ) == {}
1270+
1271+ assert_stop_log_record (choice )
1272+
1273+ (operation_duration_metric ,) = get_sorted_metrics (metrics_reader )
1274+ attributes = {
1275+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
1276+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
1277+ }
1278+ assert_operation_duration_metric (
1279+ client , "chat" , operation_duration_metric , attributes = attributes , min_data_point = 0.006761051714420319
1280+ )
1281+
1282+
12211283@pytest .mark .skipif (OPENAI_VERSION < (1 , 35 , 0 ), reason = "service tier added in 1.35.0" )
12221284@pytest .mark .vcr ()
12231285def test_chat_stream_all_the_client_options (default_openai_env , trace_exporter , metrics_reader , logs_exporter ):
@@ -2273,6 +2335,71 @@ async def test_chat_async_stream_with_raw_response(default_openai_env, trace_exp
22732335 )
22742336
22752337
2338+ @pytest .mark .skipif (OPENAI_VERSION < (1 , 8 , 0 ), reason = "LegacyAPIResponse available" )
2339+ @pytest .mark .vcr ()
2340+ @pytest .mark .asyncio
2341+ async def test_chat_async_stream_with_raw_response_parsed (
2342+ default_openai_env , trace_exporter , metrics_reader , logs_exporter
2343+ ):
2344+ client = openai .AsyncOpenAI ()
2345+
2346+ messages = [
2347+ {
2348+ "role" : "user" ,
2349+ "content" : TEST_CHAT_INPUT ,
2350+ }
2351+ ]
2352+
2353+ raw_response = await client .chat .completions .with_raw_response .create (
2354+ model = TEST_CHAT_MODEL , messages = messages , stream = True
2355+ )
2356+
2357+ # Explicit parse of the raw response
2358+ chat_completion = raw_response .parse ()
2359+
2360+ chunks = [chunk .choices [0 ].delta .content or "" async for chunk in chat_completion if chunk .choices ]
2361+ assert "" .join (chunks ) == "South Atlantic Ocean."
2362+
2363+ spans = trace_exporter .get_finished_spans ()
2364+ assert len (spans ) == 1
2365+
2366+ span = spans [0 ]
2367+ assert span .name == f"chat { TEST_CHAT_MODEL } "
2368+ assert span .kind == SpanKind .CLIENT
2369+ assert span .status .status_code == StatusCode .UNSET
2370+
2371+ address , port = address_and_port (client )
2372+ assert dict (span .attributes ) == {
2373+ GEN_AI_OPENAI_RESPONSE_SERVICE_TIER : "default" ,
2374+ GEN_AI_OPERATION_NAME : "chat" ,
2375+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
2376+ GEN_AI_SYSTEM : "openai" ,
2377+ GEN_AI_RESPONSE_ID : "chatcmpl-BRzdBETW1h4E9Vy0Se8CSvrYEXMtC" ,
2378+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
2379+ GEN_AI_RESPONSE_FINISH_REASONS : ("stop" ,),
2380+ SERVER_ADDRESS : address ,
2381+ SERVER_PORT : port ,
2382+ }
2383+
2384+ logs = logs_exporter .get_finished_logs ()
2385+ assert len (logs ) == 2
2386+ log_records = logrecords_from_logs (logs )
2387+ user_message , choice = log_records
2388+ assert dict (user_message .attributes ) == {"gen_ai.system" : "openai" , "event.name" : "gen_ai.user.message" }
2389+ assert dict (user_message .body ) == {}
2390+
2391+ assert_stop_log_record (choice )
2392+
2393+ (operation_duration_metric ,) = get_sorted_metrics (metrics_reader )
2394+ attributes = {
2395+ GEN_AI_REQUEST_MODEL : TEST_CHAT_MODEL ,
2396+ GEN_AI_RESPONSE_MODEL : TEST_CHAT_RESPONSE_MODEL ,
2397+ }
2398+ assert_operation_duration_metric (
2399+ client , "chat" , operation_duration_metric , attributes = attributes , min_data_point = 0.006761051714420319
2400+ )
2401+
2402+
22762403@pytest .mark .vcr ()
22772404@pytest .mark .asyncio
22782405async def test_chat_async_stream_with_capture_message_content (
0 commit comments