@@ -120,12 +120,17 @@ def run(system_prompt: str, initial_query: str, client, model: str,
120120
121121 if not config .get ('providers' ):
122122 logger .warning ("No providers configured, falling back to original client" )
123+ # Strip stream parameter to force complete response
124+ api_config = dict (request_config or {})
125+ api_config .pop ('stream' , None )
126+
123127 response = client .chat .completions .create (
124128 model = model ,
125129 messages = [
126130 {"role" : "system" , "content" : system_prompt },
127131 {"role" : "user" , "content" : initial_query }
128- ]
132+ ],
133+ ** api_config
129134 )
130135 # Return full response dict to preserve all usage information
131136 response_dict = response .model_dump () if hasattr (response , 'model_dump' ) else response
@@ -223,12 +228,17 @@ def run(system_prompt: str, initial_query: str, client, model: str,
223228 logger .error (f"Proxy plugin error: { e } " , exc_info = True )
224229 # Fallback to original client
225230 logger .info ("Falling back to original client" )
231+ # Strip stream parameter to force complete response
232+ api_config = dict (request_config or {})
233+ api_config .pop ('stream' , None )
234+
226235 response = client .chat .completions .create (
227236 model = model ,
228237 messages = [
229238 {"role" : "system" , "content" : system_prompt },
230239 {"role" : "user" , "content" : initial_query }
231- ]
240+ ],
241+ ** api_config
232242 )
233243 # Return full response dict to preserve all usage information
234244 response_dict = response .model_dump () if hasattr (response , 'model_dump' ) else response
0 commit comments