@@ -753,20 +753,20 @@ def test__convert_tool_message_to_parts__sets_tool_name(
753753def test_temperature_range_pydantic_validation () -> None :
754754 """Test that temperature is in the range [0.0, 2.0]."""
755755 with pytest .raises (ValidationError ):
756- ChatGoogleGenerativeAI (model = "gemini-2.0 -flash" , temperature = 2.1 )
756+ ChatGoogleGenerativeAI (model = "gemini-2.5 -flash" , temperature = 2.1 )
757757
758758 with pytest .raises (ValidationError ):
759- ChatGoogleGenerativeAI (model = "gemini-2.0 -flash" , temperature = - 0.1 )
759+ ChatGoogleGenerativeAI (model = "gemini-2.5 -flash" , temperature = - 0.1 )
760760
761761 llm = ChatGoogleGenerativeAI (
762- model = "gemini-2.0 -flash" ,
762+ model = "gemini-2.5 -flash" ,
763763 google_api_key = SecretStr ("..." ),
764764 temperature = 1.5 ,
765765 )
766766 ls_params = llm ._get_ls_params ()
767767 assert ls_params == {
768768 "ls_provider" : "google_genai" ,
769- "ls_model_name" : "gemini-2.0 -flash" ,
769+ "ls_model_name" : "gemini-2.5 -flash" ,
770770 "ls_model_type" : "chat" ,
771771 "ls_temperature" : 1.5 ,
772772 }
@@ -775,10 +775,10 @@ def test_temperature_range_pydantic_validation() -> None:
775775def test_temperature_range_model_validation () -> None :
776776 """Test that temperature is in the range [0.0, 2.0]."""
777777 with pytest .raises (ValueError ):
778- ChatGoogleGenerativeAI (model = "gemini-2.0 -flash" , temperature = 2.5 )
778+ ChatGoogleGenerativeAI (model = "gemini-2.5 -flash" , temperature = 2.5 )
779779
780780 with pytest .raises (ValueError ):
781- ChatGoogleGenerativeAI (model = "gemini-2.0 -flash" , temperature = - 0.5 )
781+ ChatGoogleGenerativeAI (model = "gemini-2.5 -flash" , temperature = - 0.5 )
782782
783783
784784def test_model_kwargs () -> None :
0 commit comments