@@ -115,7 +115,7 @@ def setUp(self):
115
115
116
116
def test_hello (self ):
117
117
# Generate text from text prompt
118
- model = generative_models .GenerativeModel (model_name = "gemini-pro " )
118
+ model = generative_models .GenerativeModel (model_name = "gemini-1.5-flash " )
119
119
120
120
self .responses ["generate_content" ].append (simple_response ("world!" ))
121
121
@@ -138,7 +138,7 @@ def test_hello(self):
138
138
)
139
139
def test_image (self , content ):
140
140
# Generate text from image
141
- model = generative_models .GenerativeModel ("gemini-pro " )
141
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
142
142
143
143
cat = "It's a cat"
144
144
self .responses ["generate_content" ].append (simple_response (cat ))
@@ -172,7 +172,7 @@ def test_image(self, content):
172
172
)
173
173
def test_generation_config_overwrite (self , config1 , config2 ):
174
174
# Generation config
175
- model = generative_models .GenerativeModel ("gemini-pro " , generation_config = config1 )
175
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , generation_config = config1 )
176
176
177
177
self .responses ["generate_content" ] = [
178
178
simple_response (" world!" ),
@@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
218
218
)
219
219
def test_safety_overwrite (self , safe1 , safe2 ):
220
220
# Safety
221
- model = generative_models .GenerativeModel ("gemini-pro " , safety_settings = safe1 )
221
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , safety_settings = safe1 )
222
222
223
223
self .responses ["generate_content" ] = [
224
224
simple_response (" world!" ),
@@ -253,7 +253,7 @@ def test_stream_basic(self):
253
253
chunks = ["first" , " second" , " third" ]
254
254
self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
255
255
256
- model = generative_models .GenerativeModel ("gemini-pro " )
256
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
257
257
response = model .generate_content ("Hello" , stream = True )
258
258
259
259
self .assertEqual (self .observed_requests [0 ].contents [0 ].parts [0 ].text , "Hello" )
@@ -267,7 +267,7 @@ def test_stream_lookahead(self):
267
267
chunks = ["first" , " second" , " third" ]
268
268
self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
269
269
270
- model = generative_models .GenerativeModel ("gemini-pro " )
270
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
271
271
response = model .generate_content ("Hello" , stream = True )
272
272
273
273
self .assertEqual (self .observed_requests [0 ].contents [0 ].parts [0 ].text , "Hello" )
@@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
287
287
]
288
288
self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
289
289
290
- model = generative_models .GenerativeModel ("gemini-pro " )
290
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
291
291
response = model .generate_content ("Bad stuff!" , stream = True )
292
292
293
293
self .assertEqual (
@@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
322
322
]
323
323
self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
324
324
325
- model = generative_models .GenerativeModel ("gemini-pro " )
325
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
326
326
response = model .generate_content ("Hello" , stream = True )
327
327
328
328
self .assertEqual (
@@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:
389
389
390
390
def test_chat (self ):
391
391
# Multi turn chat
392
- model = generative_models .GenerativeModel ("gemini-pro " )
392
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
393
393
chat = model .start_chat ()
394
394
395
395
self .responses ["generate_content" ] = [
@@ -423,7 +423,7 @@ def test_chat(self):
423
423
def test_chat_roles (self ):
424
424
self .responses ["generate_content" ] = [simple_response ("hello!" )]
425
425
426
- model = generative_models .GenerativeModel ("gemini-pro " )
426
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
427
427
chat = model .start_chat ()
428
428
response = chat .send_message ("hello?" )
429
429
history = chat .history
@@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
792
792
)
793
793
self .responses ["generate_content" ] = [simple_response ("echo echo" )]
794
794
795
- model = generative_models .GenerativeModel ("gemini-pro " , tools = tools )
795
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , tools = tools )
796
796
_ = model .generate_content ("Hello" , tools = [tools ], tool_config = tool_config )
797
797
798
798
req = self .observed_requests [0 ]
@@ -811,7 +811,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
811
811
)
812
812
def test_system_instruction (self , instruction , expected_instr ):
813
813
self .responses ["generate_content" ] = [simple_response ("echo echo" )]
814
- model = generative_models .GenerativeModel ("gemini-pro " , system_instruction = instruction )
814
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , system_instruction = instruction )
815
815
816
816
_ = model .generate_content ("test" )
817
817
@@ -852,7 +852,7 @@ def test_count_tokens_smoke(self, kwargs):
852
852
)
853
853
854
854
def test_repr_for_unary_non_streamed_response (self ):
855
- model = generative_models .GenerativeModel (model_name = "gemini-pro " )
855
+ model = generative_models .GenerativeModel (model_name = "gemini-1.5-flash " )
856
856
self .responses ["generate_content" ].append (simple_response ("world!" ))
857
857
response = model .generate_content ("Hello" )
858
858
@@ -885,7 +885,7 @@ def test_repr_for_streaming_start_to_finish(self):
885
885
chunks = ["first" , " second" , " third" ]
886
886
self .responses ["stream_generate_content" ] = [(simple_response (text ) for text in chunks )]
887
887
888
- model = generative_models .GenerativeModel ("gemini-pro " )
888
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
889
889
response = model .generate_content ("Hello" , stream = True )
890
890
iterator = iter (response )
891
891
@@ -980,7 +980,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
980
980
]
981
981
self .responses ["stream_generate_content" ] = [(chunk for chunk in chunks )]
982
982
983
- model = generative_models .GenerativeModel ("gemini-pro " )
983
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
984
984
response = model .generate_content ("Bad stuff!" , stream = True )
985
985
986
986
result = repr (response )
@@ -1096,7 +1096,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
1096
1096
1097
1097
def test_repr_for_multi_turn_chat (self ):
1098
1098
# Multi turn chat
1099
- model = generative_models .GenerativeModel ("gemini-pro " )
1099
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1100
1100
chat = model .start_chat ()
1101
1101
1102
1102
self .responses ["generate_content" ] = [
@@ -1119,7 +1119,7 @@ def test_repr_for_multi_turn_chat(self):
1119
1119
"""\
1120
1120
ChatSession(
1121
1121
model=genai.GenerativeModel(
1122
- model_name='models/gemini-pro ',
1122
+ model_name='models/gemini-1.5-flash ',
1123
1123
generation_config={},
1124
1124
safety_settings={},
1125
1125
tools=None,
@@ -1133,7 +1133,7 @@ def test_repr_for_multi_turn_chat(self):
1133
1133
1134
1134
def test_repr_for_incomplete_streaming_chat (self ):
1135
1135
# Multi turn chat
1136
- model = generative_models .GenerativeModel ("gemini-pro " )
1136
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1137
1137
chat = model .start_chat ()
1138
1138
1139
1139
self .responses ["stream_generate_content" ] = [
@@ -1148,7 +1148,7 @@ def test_repr_for_incomplete_streaming_chat(self):
1148
1148
"""\
1149
1149
ChatSession(
1150
1150
model=genai.GenerativeModel(
1151
- model_name='models/gemini-pro ',
1151
+ model_name='models/gemini-1.5-flash ',
1152
1152
generation_config={},
1153
1153
safety_settings={},
1154
1154
tools=None,
@@ -1162,7 +1162,7 @@ def test_repr_for_incomplete_streaming_chat(self):
1162
1162
1163
1163
def test_repr_for_broken_streaming_chat (self ):
1164
1164
# Multi turn chat
1165
- model = generative_models .GenerativeModel ("gemini-pro " )
1165
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1166
1166
chat = model .start_chat ()
1167
1167
1168
1168
self .responses ["stream_generate_content" ] = [
@@ -1193,7 +1193,7 @@ def test_repr_for_broken_streaming_chat(self):
1193
1193
"""\
1194
1194
ChatSession(
1195
1195
model=genai.GenerativeModel(
1196
- model_name='models/gemini-pro ',
1196
+ model_name='models/gemini-1.5-flash ',
1197
1197
generation_config={},
1198
1198
safety_settings={},
1199
1199
tools=None,
@@ -1206,7 +1206,7 @@ def test_repr_for_broken_streaming_chat(self):
1206
1206
self .assertEqual (expected , result )
1207
1207
1208
1208
def test_repr_for_system_instruction (self ):
1209
- model = generative_models .GenerativeModel ("gemini-pro " , system_instruction = "Be excellent." )
1209
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , system_instruction = "Be excellent." )
1210
1210
result = repr (model )
1211
1211
self .assertIn ("system_instruction='Be excellent.'" , result )
1212
1212
@@ -1237,7 +1237,7 @@ def test_chat_with_request_options(self):
1237
1237
)
1238
1238
request_options = {"timeout" : 120 }
1239
1239
1240
- model = generative_models .GenerativeModel ("gemini-pro " )
1240
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1241
1241
chat = model .start_chat ()
1242
1242
chat .send_message ("hello" , request_options = helper_types .RequestOptions (** request_options ))
1243
1243
0 commit comments