@@ -213,7 +213,7 @@ def test_init_with_parameters(self, monkeypatch):
213213 model = "gpt-4o-mini" ,
214214 streaming_callback = print_streaming_chunk ,
215215 api_base_url = "test-base-url" ,
216- generation_kwargs = {"max_tokens " : 10 , "some_test_param" : "test-params" },
216+ generation_kwargs = {"max_completion_tokens " : 10 , "some_test_param" : "test-params" },
217217 timeout = 40.0 ,
218218 max_retries = 1 ,
219219 tools = [tool ],
@@ -223,7 +223,7 @@ def test_init_with_parameters(self, monkeypatch):
223223 assert component .client .api_key == "test-api-key"
224224 assert component .model == "gpt-4o-mini"
225225 assert component .streaming_callback is print_streaming_chunk
226- assert component .generation_kwargs == {"max_tokens " : 10 , "some_test_param" : "test-params" }
226+ assert component .generation_kwargs == {"max_completion_tokens " : 10 , "some_test_param" : "test-params" }
227227 assert component .client .timeout == 40.0
228228 assert component .client .max_retries == 1
229229 assert component .tools == [tool ]
@@ -238,12 +238,12 @@ def test_init_with_parameters_and_env_vars(self, monkeypatch):
238238 model = "gpt-4o-mini" ,
239239 streaming_callback = print_streaming_chunk ,
240240 api_base_url = "test-base-url" ,
241- generation_kwargs = {"max_tokens " : 10 , "some_test_param" : "test-params" },
241+ generation_kwargs = {"max_completion_tokens " : 10 , "some_test_param" : "test-params" },
242242 )
243243 assert component .client .api_key == "test-api-key"
244244 assert component .model == "gpt-4o-mini"
245245 assert component .streaming_callback is print_streaming_chunk
246- assert component .generation_kwargs == {"max_tokens " : 10 , "some_test_param" : "test-params" }
246+ assert component .generation_kwargs == {"max_completion_tokens " : 10 , "some_test_param" : "test-params" }
247247 assert component .client .timeout == 100.0
248248 assert component .client .max_retries == 10
249249
@@ -278,7 +278,7 @@ def test_to_dict_with_parameters(self, monkeypatch, calendar_event_model):
278278 streaming_callback = print_streaming_chunk ,
279279 api_base_url = "test-base-url" ,
280280 generation_kwargs = {
281- "max_tokens " : 10 ,
281+ "max_completion_tokens " : 10 ,
282282 "some_test_param" : "test-params" ,
283283 "response_format" : calendar_event_model ,
284284 },
@@ -301,7 +301,7 @@ def test_to_dict_with_parameters(self, monkeypatch, calendar_event_model):
301301 "timeout" : 100.0 ,
302302 "streaming_callback" : "haystack.components.generators.utils.print_streaming_chunk" ,
303303 "generation_kwargs" : {
304- "max_tokens " : 10 ,
304+ "max_completion_tokens " : 10 ,
305305 "some_test_param" : "test-params" ,
306306 "response_format" : {
307307 "type" : "json_schema" ,
@@ -377,7 +377,7 @@ def test_from_dict(self, monkeypatch):
377377 "streaming_callback" : "haystack.components.generators.utils.print_streaming_chunk" ,
378378 "max_retries" : 10 ,
379379 "timeout" : 100.0 ,
380- "generation_kwargs" : {"max_tokens " : 10 , "some_test_param" : "test-params" },
380+ "generation_kwargs" : {"max_completion_tokens " : 10 , "some_test_param" : "test-params" },
381381 "tools" : [
382382 {
383383 "type" : "haystack.tools.tool.Tool" ,
@@ -399,7 +399,7 @@ def test_from_dict(self, monkeypatch):
399399 assert component .model == "gpt-4o-mini"
400400 assert component .streaming_callback is print_streaming_chunk
401401 assert component .api_base_url == "test-base-url"
402- assert component .generation_kwargs == {"max_tokens " : 10 , "some_test_param" : "test-params" }
402+ assert component .generation_kwargs == {"max_completion_tokens " : 10 , "some_test_param" : "test-params" }
403403 assert component .api_key == Secret .from_env_var ("OPENAI_API_KEY" )
404404 assert component .tools == [
405405 Tool (name = "name" , description = "description" , parameters = {"x" : {"type" : "string" }}, function = print )
@@ -419,7 +419,7 @@ def test_from_dict_fail_wo_env_var(self, monkeypatch):
419419 "organization" : None ,
420420 "api_base_url" : "test-base-url" ,
421421 "streaming_callback" : "haystack.components.generators.utils.print_streaming_chunk" ,
422- "generation_kwargs" : {"max_tokens " : 10 , "some_test_param" : "test-params" },
422+ "generation_kwargs" : {"max_completion_tokens " : 10 , "some_test_param" : "test-params" },
423423 "tools" : None ,
424424 },
425425 }
@@ -439,13 +439,14 @@ def test_run(self, chat_messages, openai_mock_chat_completion):
439439
440440 def test_run_with_params (self , chat_messages , openai_mock_chat_completion ):
441441 component = OpenAIChatGenerator (
442- api_key = Secret .from_token ("test-api-key" ), generation_kwargs = {"max_tokens" : 10 , "temperature" : 0.5 }
442+ api_key = Secret .from_token ("test-api-key" ),
443+ generation_kwargs = {"max_completion_tokens" : 10 , "temperature" : 0.5 },
443444 )
444445 response = component .run (chat_messages )
445446
446447 # check that the component calls the OpenAI API with the correct parameters
447448 _ , kwargs = openai_mock_chat_completion .call_args
448- assert kwargs ["max_tokens " ] == 10
449+ assert kwargs ["max_completion_tokens " ] == 10
449450 assert kwargs ["temperature" ] == 0.5
450451
451452 # check that the tools are not passed to the OpenAI API (the generator is initialized without tools)
@@ -573,7 +574,7 @@ def test_check_abnormal_completions(self, caplog):
573574 # check truncation warning
574575 message_template = (
575576 "The completion for index {index} has been truncated before reaching a natural stopping point. "
576- "Increase the max_tokens parameter to allow for longer completions."
577+ "Increase the max_completion_tokens parameter to allow for longer completions."
577578 )
578579
579580 for index in [1 , 3 ]:
0 commit comments