1- from unittest import mock
1+ import re
22import pytest
33from unittest .mock import MagicMock , patch
44import os
55
66from sentry_sdk .integrations .openai_agents import OpenAIAgentsIntegration
7+ from sentry_sdk .integrations .openai_agents .utils import safe_serialize
78
89import agents
910from agents import (
@@ -360,22 +361,24 @@ def simple_test_tool(message: str) -> str:
360361 ai_client_span2 ,
361362 ) = spans
362363
363- available_tools = [
364- {
365- "name" : "simple_test_tool" ,
366- "description" : "A simple tool" ,
367- "params_json_schema" : {
368- "properties" : {"message" : {"title" : "Message" , "type" : "string" }},
369- "required" : ["message" ],
370- "title" : "simple_test_tool_args" ,
371- "type" : "object" ,
372- "additionalProperties" : False ,
373- },
374- "on_invoke_tool" : mock .ANY ,
375- "strict_json_schema" : True ,
376- "is_enabled" : True ,
377- }
378- ]
364+ available_tools = safe_serialize (
365+ [
366+ {
367+ "name" : "simple_test_tool" ,
368+ "description" : "A simple tool" ,
369+ "params_json_schema" : {
370+ "properties" : {"message" : {"title" : "Message" , "type" : "string" }},
371+ "required" : ["message" ],
372+ "title" : "simple_test_tool_args" ,
373+ "type" : "object" ,
374+ "additionalProperties" : False ,
375+ },
376+ "on_invoke_tool" : "<function agents.tool.function_tool.<locals>._create_function_tool.<locals>._on_invoke_tool>" ,
377+ "strict_json_schema" : True ,
378+ "is_enabled" : True ,
379+ }
380+ ]
381+ )
379382
380383 assert transaction ["transaction" ] == "test_agent workflow"
381384 assert transaction ["contexts" ]["trace" ]["origin" ] == "auto.ai.openai_agents"
@@ -397,16 +400,22 @@ def simple_test_tool(message: str) -> str:
397400 assert ai_client_span1 ["data" ]["gen_ai.agent.name" ] == "test_agent"
398401 assert ai_client_span1 ["data" ]["gen_ai.request.available_tools" ] == available_tools
399402 assert ai_client_span1 ["data" ]["gen_ai.request.max_tokens" ] == 100
400- assert ai_client_span1 ["data" ]["gen_ai.request.messages" ] == [
401- {
402- "role" : "system" ,
403- "content" : [{"type" : "text" , "text" : "You are a helpful test assistant." }],
404- },
405- {
406- "role" : "user" ,
407- "content" : [{"type" : "text" , "text" : "Please use the simple test tool" }],
408- },
409- ]
403+ assert ai_client_span1 ["data" ]["gen_ai.request.messages" ] == safe_serialize (
404+ [
405+ {
406+ "role" : "system" ,
407+ "content" : [
408+ {"type" : "text" , "text" : "You are a helpful test assistant." }
409+ ],
410+ },
411+ {
412+ "role" : "user" ,
413+ "content" : [
414+ {"type" : "text" , "text" : "Please use the simple test tool" }
415+ ],
416+ },
417+ ]
418+ )
410419 assert ai_client_span1 ["data" ]["gen_ai.request.model" ] == "gpt-4"
411420 assert ai_client_span1 ["data" ]["gen_ai.request.temperature" ] == 0.7
412421 assert ai_client_span1 ["data" ]["gen_ai.request.top_p" ] == 1.0
@@ -415,22 +424,35 @@ def simple_test_tool(message: str) -> str:
415424 assert ai_client_span1 ["data" ]["gen_ai.usage.output_tokens" ] == 5
416425 assert ai_client_span1 ["data" ]["gen_ai.usage.output_tokens.reasoning" ] == 0
417426 assert ai_client_span1 ["data" ]["gen_ai.usage.total_tokens" ] == 15
418- assert ai_client_span1 ["data" ]["gen_ai.response.tool_calls" ] == [
419- {
420- "arguments" : '{"message": "hello"}' ,
421- "call_id" : "call_123" ,
422- "name" : "simple_test_tool" ,
423- "type" : "function_call" ,
424- "id" : "call_123" ,
425- "status" : None ,
426- "function" : mock .ANY ,
427- }
428- ]
427+ assert re .sub (
428+ r"SerializationIterator\(.*\)" ,
429+ "NOT_CHECKED" ,
430+ ai_client_span1 ["data" ]["gen_ai.response.tool_calls" ],
431+ ) == safe_serialize (
432+ [
433+ {
434+ "arguments" : '{"message": "hello"}' ,
435+ "call_id" : "call_123" ,
436+ "name" : "simple_test_tool" ,
437+ "type" : "function_call" ,
438+ "id" : "call_123" ,
439+ "status" : None ,
440+ "function" : "NOT_CHECKED" ,
441+ }
442+ ]
443+ )
429444
430445 assert tool_span ["description" ] == "execute_tool simple_test_tool"
431446 assert tool_span ["data" ]["gen_ai.agent.name" ] == "test_agent"
432447 assert tool_span ["data" ]["gen_ai.operation.name" ] == "execute_tool"
433- assert tool_span ["data" ]["gen_ai.request.available_tools" ] == available_tools
448+ assert (
449+ re .sub (
450+ "<.*>(,)" ,
451+ r"'NOT_CHECKED'\1" ,
452+ agent_span ["data" ]["gen_ai.request.available_tools" ],
453+ )
454+ == available_tools
455+ )
434456 assert tool_span ["data" ]["gen_ai.request.max_tokens" ] == 100
435457 assert tool_span ["data" ]["gen_ai.request.model" ] == "gpt-4"
436458 assert tool_span ["data" ]["gen_ai.request.temperature" ] == 0.7
@@ -445,47 +467,64 @@ def simple_test_tool(message: str) -> str:
445467 assert ai_client_span2 ["description" ] == "chat gpt-4"
446468 assert ai_client_span2 ["data" ]["gen_ai.agent.name" ] == "test_agent"
447469 assert ai_client_span2 ["data" ]["gen_ai.operation.name" ] == "chat"
448- assert ai_client_span2 ["data" ]["gen_ai.request.available_tools" ] == available_tools
470+ assert (
471+ re .sub (
472+ "<.*>(,)" ,
473+ r"'NOT_CHECKED'\1" ,
474+ agent_span ["data" ]["gen_ai.request.available_tools" ],
475+ )
476+ == available_tools
477+ )
449478 assert ai_client_span2 ["data" ]["gen_ai.request.max_tokens" ] == 100
450- assert ai_client_span2 ["data" ]["gen_ai.request.messages" ] == [
451- {
452- "role" : "system" ,
453- "content" : [{"type" : "text" , "text" : "You are a helpful test assistant." }],
454- },
455- {
456- "role" : "user" ,
457- "content" : [{"type" : "text" , "text" : "Please use the simple test tool" }],
458- },
459- {
460- "role" : "assistant" ,
461- "content" : [
462- {
463- "arguments" : '{"message": "hello"}' ,
464- "call_id" : "call_123" ,
465- "name" : "simple_test_tool" ,
466- "type" : "function_call" ,
467- "id" : "call_123" ,
468- "function" : mock .ANY ,
469- }
470- ],
471- },
472- {
473- "role" : "tool" ,
474- "content" : [
475- {
476- "call_id" : "call_123" ,
477- "output" : "Tool executed with: hello" ,
478- "type" : "function_call_output" ,
479- }
480- ],
481- },
482- ]
479+ assert re .sub (
480+ r"SerializationIterator\(.*\)" ,
481+ "NOT_CHECKED" ,
482+ ai_client_span2 ["data" ]["gen_ai.request.messages" ],
483+ ) == safe_serialize (
484+ [
485+ {
486+ "role" : "system" ,
487+ "content" : [
488+ {"type" : "text" , "text" : "You are a helpful test assistant." }
489+ ],
490+ },
491+ {
492+ "role" : "user" ,
493+ "content" : [
494+ {"type" : "text" , "text" : "Please use the simple test tool" }
495+ ],
496+ },
497+ {
498+ "role" : "assistant" ,
499+ "content" : [
500+ {
501+ "arguments" : '{"message": "hello"}' ,
502+ "call_id" : "call_123" ,
503+ "name" : "simple_test_tool" ,
504+ "type" : "function_call" ,
505+ "id" : "call_123" ,
506+ "function" : "NOT_CHECKED" ,
507+ }
508+ ],
509+ },
510+ {
511+ "role" : "tool" ,
512+ "content" : [
513+ {
514+ "call_id" : "call_123" ,
515+ "output" : "Tool executed with: hello" ,
516+ "type" : "function_call_output" ,
517+ }
518+ ],
519+ },
520+ ]
521+ )
483522 assert ai_client_span2 ["data" ]["gen_ai.request.model" ] == "gpt-4"
484523 assert ai_client_span2 ["data" ]["gen_ai.request.temperature" ] == 0.7
485524 assert ai_client_span2 ["data" ]["gen_ai.request.top_p" ] == 1.0
486- assert ai_client_span2 ["data" ]["gen_ai.response.text" ] == [
487- "Task completed using the tool"
488- ]
525+ assert ai_client_span2 ["data" ]["gen_ai.response.text" ] == safe_serialize (
526+ [ "Task completed using the tool" ]
527+ )
489528 assert ai_client_span2 ["data" ]["gen_ai.system" ] == "openai"
490529 assert ai_client_span2 ["data" ]["gen_ai.usage.input_tokens.cached" ] == 0
491530 assert ai_client_span2 ["data" ]["gen_ai.usage.input_tokens" ] == 15
0 commit comments