@@ -539,12 +539,8 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r
539539 # NEW: Unified protocol dispatch path (Issue #1304)
540540 # Check if unified dispatch is enabled (opt-in for backward compatibility)
541541 if getattr (self , '_use_unified_llm_dispatch' , False ):
542- from .unified_chat_mixin import UnifiedChatMixin
543- if not isinstance (self , UnifiedChatMixin ):
544- # Dynamically add the mixin
545- self .__class__ = type (self .__class__ .__name__ + 'Unified' , (UnifiedChatMixin , self .__class__ ), {})
546-
547- return self ._unified_chat_completion (
542+ # Use composition instead of runtime class mutation for safety
543+ final_response = self ._execute_unified_chat_completion (
548544 messages = messages ,
549545 temperature = temperature ,
550546 tools = formatted_tools ,
@@ -557,8 +553,7 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r
557553 )
558554
559555 # LEGACY: Maintain existing dual execution paths for backward compatibility
560- # Use the custom LLM instance if available
561- if self ._using_custom_llm and hasattr (self , 'llm_instance' ):
556+ elif self ._using_custom_llm and hasattr (self , 'llm_instance' ):
562557 if stream :
563558 # Debug logs for tool info
564559 if formatted_tools :
@@ -774,6 +769,130 @@ def _chat_completion(self, messages, temperature=1.0, tools=None, stream=True, r
774769 _get_display_functions ()['display_error' ](f"Error in chat completion: { e } " )
775770 return None
776771
772+ def _execute_unified_chat_completion (
773+ self ,
774+ messages ,
775+ temperature = 1.0 ,
776+ tools = None ,
777+ stream = True ,
778+ reasoning_steps = False ,
779+ task_name = None ,
780+ task_description = None ,
781+ task_id = None ,
782+ response_format = None
783+ ):
784+ """
785+ Execute unified chat completion using composition instead of runtime class mutation.
786+
787+ This method provides the same functionality as UnifiedChatMixin but uses
788+ composition for safety and maintainability.
789+ """
790+ from ..llm import create_llm_dispatcher
791+
792+ # Get or create unified dispatcher
793+ if not hasattr (self , '_unified_dispatcher' ) or self ._unified_dispatcher is None :
794+ # Provider selection based on existing agent configuration
795+ if self ._using_custom_llm and hasattr (self , 'llm_instance' ):
796+ dispatcher = create_llm_dispatcher (llm_instance = self .llm_instance )
797+ else :
798+ # Initialize OpenAI client if not present
799+ if not hasattr (self , '_openai_client' ) or self ._openai_client is None :
800+ from ..llm import get_openai_client
801+ self ._openai_client = get_openai_client (api_key = getattr (self , 'api_key' , None ))
802+ dispatcher = create_llm_dispatcher (openai_client = self ._openai_client , model = self .llm )
803+
804+ # Cache the dispatcher
805+ self ._unified_dispatcher = dispatcher
806+
807+ # Execute unified dispatch with all necessary parameters
808+ try :
809+ final_response = self ._unified_dispatcher .chat_completion (
810+ messages = messages ,
811+ tools = tools ,
812+ temperature = temperature ,
813+ max_tokens = getattr (self , 'max_tokens' , None ),
814+ stream = stream ,
815+ response_format = response_format ,
816+ execute_tool_fn = getattr (self , 'execute_tool' , None ),
817+ console = self .console if (self .verbose or stream ) else None ,
818+ display_fn = self ._display_generating if self .verbose else None ,
819+ stream_callback = getattr (self .stream_emitter , 'emit' , None ) if hasattr (self , 'stream_emitter' ) else None ,
820+ emit_events = True ,
821+ verbose = self .verbose ,
822+ max_iterations = 10 ,
823+ reasoning_steps = reasoning_steps ,
824+ task_name = task_name ,
825+ task_description = task_description ,
826+ task_id = task_id
827+ )
828+ return final_response
829+
830+ except Exception as e :
831+ logging .error (f"Unified chat completion failed: { e } " )
832+ raise
833+
834+ async def _execute_unified_achat_completion (
835+ self ,
836+ messages ,
837+ temperature = 1.0 ,
838+ tools = None ,
839+ stream = True ,
840+ reasoning_steps = False ,
841+ task_name = None ,
842+ task_description = None ,
843+ task_id = None ,
844+ response_format = None
845+ ):
846+ """
847+ Execute unified async chat completion using composition instead of runtime class mutation.
848+
849+ This method provides the same functionality as UnifiedChatMixin but uses
850+ composition for safety and maintainability.
851+ """
852+ from ..llm import create_llm_dispatcher
853+
854+ # Get or create unified dispatcher
855+ if not hasattr (self , '_unified_dispatcher' ) or self ._unified_dispatcher is None :
856+ # Provider selection based on existing agent configuration
857+ if self ._using_custom_llm and hasattr (self , 'llm_instance' ):
858+ dispatcher = create_llm_dispatcher (llm_instance = self .llm_instance )
859+ else :
860+ # Initialize OpenAI client if not present
861+ if not hasattr (self , '_openai_client' ) or self ._openai_client is None :
862+ from ..llm import get_openai_client
863+ self ._openai_client = get_openai_client (api_key = getattr (self , 'api_key' , None ))
864+ dispatcher = create_llm_dispatcher (openai_client = self ._openai_client , model = self .llm )
865+
866+ # Cache the dispatcher
867+ self ._unified_dispatcher = dispatcher
868+
869+ # Execute unified async dispatch with all necessary parameters
870+ try :
871+ final_response = await self ._unified_dispatcher .achat_completion (
872+ messages = messages ,
873+ tools = tools ,
874+ temperature = temperature ,
875+ max_tokens = getattr (self , 'max_tokens' , None ),
876+ stream = stream ,
877+ response_format = response_format ,
878+ execute_tool_fn = getattr (self , 'execute_tool' , None ),
879+ console = self .console if (self .verbose or stream ) else None ,
880+ display_fn = self ._display_generating if self .verbose else None ,
881+ stream_callback = getattr (self .stream_emitter , 'emit' , None ) if hasattr (self , 'stream_emitter' ) else None ,
882+ emit_events = True ,
883+ verbose = self .verbose ,
884+ max_iterations = 10 ,
885+ reasoning_steps = reasoning_steps ,
886+ task_name = task_name ,
887+ task_description = task_description ,
888+ task_id = task_id
889+ )
890+ return final_response
891+
892+ except Exception as e :
893+ logging .error (f"Unified async chat completion failed: { e } " )
894+ raise
895+
777896 def _execute_callback_and_display (self , prompt : str , response : str , generation_time : float , task_name = None , task_description = None , task_id = None ):
778897 """Helper method to execute callbacks and display interaction.
779898
@@ -1722,13 +1841,8 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
17221841 # NEW: Unified protocol dispatch path (Issue #1304) - Async version
17231842 # Check if unified dispatch is enabled (opt-in for backward compatibility)
17241843 if getattr (self , '_use_unified_llm_dispatch' , False ):
1725- from .unified_chat_mixin import UnifiedChatMixin
1726- if not isinstance (self , UnifiedChatMixin ):
1727- # Dynamically add the mixin
1728- self .__class__ = type (self .__class__ .__name__ + 'Unified' , (UnifiedChatMixin , self .__class__ ), {})
1729-
1730- # Use unified async dispatch
1731- response = await self ._unified_achat_completion (
1844+ # Use composition instead of runtime class mutation for safety
1845+ response = await self ._execute_unified_achat_completion (
17321846 messages = messages ,
17331847 temperature = temperature ,
17341848 tools = formatted_tools ,
@@ -1737,70 +1851,58 @@ async def _achat_impl(self, prompt, temperature, tools, output_json, output_pyda
17371851 task_name = task_name ,
17381852 task_description = task_description ,
17391853 task_id = task_id ,
1740- response_format = None # Handle response_format in unified path
1854+ response_format = response_format
17411855 )
1742-
1743- # Process the unified response
1856+ # Continue to handle structured outputs and other processing
1857+ # Don't return immediately - fall through to existing logic
1858+ else :
1859+ # LEGACY: Check if OpenAI client is available
1860+ if self ._openai_client is None :
1861+ error_msg = "OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider."
1862+ _get_display_functions ()['display_error' ](error_msg )
1863+ return None
1864+
1865+ # Make the API call based on the type of request
17441866 if tools :
1867+ effective_tool_choice = tool_choice or getattr (self , '_yaml_tool_choice' , None )
1868+ tool_call_kwargs = dict (
1869+ model = self .llm ,
1870+ messages = messages ,
1871+ temperature = temperature ,
1872+ tools = formatted_tools ,
1873+ )
1874+ if effective_tool_choice :
1875+ tool_call_kwargs ['tool_choice' ] = effective_tool_choice
1876+ response = await self ._openai_client .async_client .chat .completions .create (
1877+ ** tool_call_kwargs
1878+ )
17451879 result = await self ._achat_completion (response , tools )
1880+ if get_logger ().getEffectiveLevel () == logging .DEBUG :
1881+ total_time = time .time () - start_time
1882+ logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1883+ # Execute callback after tool completion
1884+ self ._execute_callback_and_display (original_prompt , result , time .time () - start_time , task_name , task_description , task_id )
1885+ return await self ._atrigger_after_agent_hook (original_prompt , result , start_time )
1886+ elif output_json or output_pydantic :
1887+ response = await self ._openai_client .async_client .chat .completions .create (
1888+ model = self .llm ,
1889+ messages = messages ,
1890+ temperature = temperature ,
1891+ response_format = {"type" : "json_object" }
1892+ )
1893+ response_text = response .choices [0 ].message .content
1894+ if get_logger ().getEffectiveLevel () == logging .DEBUG :
1895+ total_time = time .time () - start_time
1896+ logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1897+ # Execute callback after JSON/Pydantic completion
1898+ self ._execute_callback_and_display (original_prompt , response_text , time .time () - start_time , task_name , task_description , task_id )
1899+ return await self ._atrigger_after_agent_hook (original_prompt , response_text , start_time )
17461900 else :
1747- result = response .choices [0 ].message .content if response and response .choices else ""
1748-
1749- if get_logger ().getEffectiveLevel () == logging .DEBUG :
1750- total_time = time .time () - start_time
1751- logging .debug (f"Agent.achat (unified) completed in { total_time :.2f} seconds" )
1752-
1753- # Execute callback after completion
1754- self ._execute_callback_and_display (original_prompt , result , time .time () - start_time , task_name , task_description , task_id )
1755- return await self ._atrigger_after_agent_hook (original_prompt , result , start_time )
1756-
1757- # LEGACY: Check if OpenAI client is available
1758- if self ._openai_client is None :
1759- error_msg = "OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider."
1760- _get_display_functions ()['display_error' ](error_msg )
1761- return None
1762-
1763- # Make the API call based on the type of request
1764- if tools :
1765- effective_tool_choice = tool_choice or getattr (self , '_yaml_tool_choice' , None )
1766- tool_call_kwargs = dict (
1767- model = self .llm ,
1768- messages = messages ,
1769- temperature = temperature ,
1770- tools = formatted_tools ,
1771- )
1772- if effective_tool_choice :
1773- tool_call_kwargs ['tool_choice' ] = effective_tool_choice
1774- response = await self ._openai_client .async_client .chat .completions .create (
1775- ** tool_call_kwargs
1776- )
1777- result = await self ._achat_completion (response , tools )
1778- if get_logger ().getEffectiveLevel () == logging .DEBUG :
1779- total_time = time .time () - start_time
1780- logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1781- # Execute callback after tool completion
1782- self ._execute_callback_and_display (original_prompt , result , time .time () - start_time , task_name , task_description , task_id )
1783- return await self ._atrigger_after_agent_hook (original_prompt , result , start_time )
1784- elif output_json or output_pydantic :
1785- response = await self ._openai_client .async_client .chat .completions .create (
1786- model = self .llm ,
1787- messages = messages ,
1788- temperature = temperature ,
1789- response_format = {"type" : "json_object" }
1790- )
1791- response_text = response .choices [0 ].message .content
1792- if get_logger ().getEffectiveLevel () == logging .DEBUG :
1793- total_time = time .time () - start_time
1794- logging .debug (f"Agent.achat completed in { total_time :.2f} seconds" )
1795- # Execute callback after JSON/Pydantic completion
1796- self ._execute_callback_and_display (original_prompt , response_text , time .time () - start_time , task_name , task_description , task_id )
1797- return await self ._atrigger_after_agent_hook (original_prompt , response_text , start_time )
1798- else :
1799- response = await self ._openai_client .async_client .chat .completions .create (
1800- model = self .llm ,
1801- messages = messages ,
1802- temperature = temperature
1803- )
1901+ response = await self ._openai_client .async_client .chat .completions .create (
1902+ model = self .llm ,
1903+ messages = messages ,
1904+ temperature = temperature
1905+ )
18041906
18051907 response_text = response .choices [0 ].message .content
18061908
0 commit comments