|
12 | 12 | from openai.types.responses import ( |
13 | 13 | ResponseCompletedEvent, |
14 | 14 | ResponseTextDeltaEvent, |
| 15 | + ResponseFunctionToolCall, |
15 | 16 | ResponseFunctionWebSearch, |
16 | 17 | ResponseOutputItemDoneEvent, |
17 | | - ResponseReasoningTextDoneEvent, |
18 | 18 | ResponseCodeInterpreterToolCall, |
19 | | - ResponseReasoningTextDeltaEvent, |
20 | | - ResponseReasoningSummaryTextDoneEvent, |
| 19 | + ResponseReasoningSummaryPartDoneEvent, |
| 20 | + ResponseReasoningSummaryPartAddedEvent, |
21 | 21 | ResponseReasoningSummaryTextDeltaEvent, |
22 | 22 | ) |
23 | 23 |
|
|
29 | 29 | from agentex.lib.core.tracing.tracer import AsyncTracer |
30 | 30 | from agentex.types.task_message_delta import ( |
31 | 31 | TextDelta, |
32 | | - ReasoningContentDelta, |
33 | 32 | ReasoningSummaryDelta, |
34 | 33 | ) |
35 | 34 | from agentex.types.task_message_update import ( |
@@ -691,7 +690,7 @@ async def run_agent_streamed_auto_send( |
691 | 690 | if self.agentex_client is None: |
692 | 691 | raise ValueError("Agentex client must be provided for auto_send methods") |
693 | 692 |
|
694 | | - tool_call_map: dict[str, Any] = {} |
| 693 | + tool_call_map: dict[str, ResponseFunctionToolCall] = {} |
695 | 694 |
|
696 | 695 | if self.tracer is None: |
697 | 696 | raise RuntimeError("Tracer not initialized - ensure tracer is provided to OpenAIService") |
@@ -756,6 +755,8 @@ async def run_agent_streamed_auto_send( |
756 | 755 |
|
757 | 756 | item_id_to_streaming_context: dict[str, StreamingTaskMessageContext] = {} |
758 | 757 | unclosed_item_ids: set[str] = set() |
| 758 | + # Simple string to accumulate reasoning summary |
| 759 | + current_reasoning_summary: str = "" |
759 | 760 |
|
760 | 761 | try: |
761 | 762 | # Process streaming events with TaskMessage creation |
@@ -848,103 +849,75 @@ async def run_agent_streamed_auto_send( |
848 | 849 | type="delta", |
849 | 850 | ), |
850 | 851 | ) |
851 | | - |
852 | | - elif isinstance(event.data, ResponseReasoningSummaryTextDeltaEvent): |
853 | | - # Handle reasoning summary text delta |
| 852 | + # Reasoning step one: new summary part added |
| 853 | + elif isinstance(event.data, ResponseReasoningSummaryPartAddedEvent): |
| 854 | + # We need to create a new streaming context for this reasoning item |
854 | 855 | item_id = event.data.item_id |
855 | | - summary_index = event.data.summary_index |
| 856 | + |
| 857 | + # Reset the reasoning summary string |
| 858 | + current_reasoning_summary = "" |
| 859 | + |
| 860 | + streaming_context = self.streaming_service.streaming_task_message_context( |
| 861 | + task_id=task_id, |
| 862 | + initial_content=ReasoningContent( |
| 863 | + author="agent", |
| 864 | + summary=[], |
| 865 | + content=[], |
| 866 | + type="reasoning", |
| 867 | + style="active", |
| 868 | + ), |
| 869 | + ) |
856 | 870 |
|
857 | | - # Check if we already have a streaming context for this reasoning item |
858 | | - if item_id not in item_id_to_streaming_context: |
859 | | - # Create a new streaming context for this reasoning item |
860 | | - streaming_context = self.streaming_service.streaming_task_message_context( |
861 | | - task_id=task_id, |
862 | | - initial_content=ReasoningContent( |
863 | | - author="agent", |
864 | | - summary=[], |
865 | | - content=[], |
866 | | - type="reasoning", |
867 | | - style="active", |
868 | | - ), |
869 | | - ) |
870 | | - # Open the streaming context |
871 | | - item_id_to_streaming_context[item_id] = await streaming_context.open() |
872 | | - unclosed_item_ids.add(item_id) |
873 | | - else: |
874 | | - streaming_context = item_id_to_streaming_context[item_id] |
| 871 | + # Replace the existing streaming context (if it exists) |
| 872 | + # Why do we replace? Cause all the reasoning parts use the same item_id! |
| 873 | + item_id_to_streaming_context[item_id] = await streaming_context.open() |
| 874 | + unclosed_item_ids.add(item_id) |
| 875 | + |
| 876 | + # Reasoning step two: handling summary text delta |
| 877 | + elif isinstance(event.data, ResponseReasoningSummaryTextDeltaEvent): |
| 878 | + # Accumulate the delta into the string |
| 879 | + current_reasoning_summary += event.data.delta |
| 880 | + streaming_context = item_id_to_streaming_context[item_id] |
875 | 881 |
|
876 | 882 | # Stream the summary delta through the streaming service |
877 | 883 | await streaming_context.stream_update( |
878 | 884 | update=StreamTaskMessageDelta( |
879 | 885 | parent_task_message=streaming_context.task_message, |
880 | 886 | delta=ReasoningSummaryDelta( |
881 | | - summary_index=summary_index, |
| 887 | + summary_index=event.data.summary_index, |
882 | 888 | summary_delta=event.data.delta, |
883 | 889 | type="reasoning_summary", |
884 | 890 | ), |
885 | 891 | type="delta", |
886 | 892 | ), |
887 | 893 | ) |
888 | 894 |
|
889 | | - elif isinstance(event.data, ResponseReasoningTextDeltaEvent): |
890 | | - # Handle reasoning content text delta |
891 | | - item_id = event.data.item_id |
892 | | - content_index = event.data.content_index |
893 | | - |
894 | | - # Check if we already have a streaming context for this reasoning item |
895 | | - if item_id not in item_id_to_streaming_context: |
896 | | - # Create a new streaming context for this reasoning item |
897 | | - streaming_context = self.streaming_service.streaming_task_message_context( |
898 | | - task_id=task_id, |
899 | | - initial_content=ReasoningContent( |
900 | | - author="agent", |
901 | | - summary=[], |
902 | | - content=[], |
903 | | - type="reasoning", |
904 | | - style="active", |
905 | | - ), |
906 | | - ) |
907 | | - # Open the streaming context |
908 | | - item_id_to_streaming_context[item_id] = await streaming_context.open() |
909 | | - unclosed_item_ids.add(item_id) |
910 | | - else: |
911 | | - streaming_context = item_id_to_streaming_context[item_id] |
912 | | - |
913 | | - # Stream the content delta through the streaming service |
| 895 | + # Reasoning step three: handling summary text done, closing the streaming context |
| 896 | + elif isinstance(event.data, ResponseReasoningSummaryPartDoneEvent): |
| 897 | + # Handle reasoning summary text completion |
| 898 | + streaming_context = item_id_to_streaming_context[item_id] |
| 899 | + |
| 900 | + # Create the complete reasoning content with the accumulated summary |
| 901 | + complete_reasoning_content = ReasoningContent( |
| 902 | + author="agent", |
| 903 | + summary=[current_reasoning_summary], |
| 904 | + content=[], |
| 905 | + type="reasoning", |
| 906 | + style="static", |
| 907 | + ) |
| 908 | + |
| 909 | + # Send a full message update with the complete reasoning content |
914 | 910 | await streaming_context.stream_update( |
915 | | - update=StreamTaskMessageDelta( |
| 911 | + update=StreamTaskMessageFull( |
916 | 912 | parent_task_message=streaming_context.task_message, |
917 | | - delta=ReasoningContentDelta( |
918 | | - content_index=content_index, |
919 | | - content_delta=event.data.delta, |
920 | | - type="reasoning_content", |
921 | | - ), |
922 | | - type="delta", |
| 913 | + content=complete_reasoning_content, |
| 914 | + type="full", |
923 | 915 | ), |
924 | 916 | ) |
925 | | - |
926 | | - elif isinstance(event.data, ResponseReasoningSummaryTextDoneEvent): |
927 | | - # Handle reasoning summary text completion |
928 | | - item_id = event.data.item_id |
929 | | - summary_index = event.data.summary_index |
930 | | - |
931 | | - # We do NOT close the streaming context here as there can be multiple |
932 | | - # reasoning summaries. The context will be closed when the entire |
933 | | - # output item is done (ResponseOutputItemDoneEvent) |
934 | | - |
935 | | - # You would think they would use the event ResponseReasoningSummaryPartDoneEvent |
936 | | - # to close the streaming context, but they do!!! |
937 | | - # They output both a ResponseReasoningSummaryTextDoneEvent and a ResponseReasoningSummaryPartDoneEvent |
938 | | - # I have no idea why they do this. |
939 | | - |
940 | | - elif isinstance(event.data, ResponseReasoningTextDoneEvent): |
941 | | - # Handle reasoning content text completion |
942 | | - item_id = event.data.item_id |
943 | | - content_index = event.data.content_index |
944 | | - |
945 | | - # We do NOT close the streaming context here as there can be multiple |
946 | | - # reasoning content texts. The context will be closed when the entire |
947 | | - # output item is done (ResponseOutputItemDoneEvent) |
| 917 | + |
| 918 | + await streaming_context.close() |
| 919 | + unclosed_item_ids.discard(item_id) |
| 920 | + |
948 | 921 |
|
949 | 922 | elif isinstance(event.data, ResponseOutputItemDoneEvent): |
950 | 923 | # Handle item completion |
|
0 commit comments