Skip to content

Commit 3a592a4

Browse files
committed
remove references to gemini-pro
Change-Id: Ied2f0b7112dd5d61390da3e84457a2fb3f770665
1 parent 61780f1 commit 3a592a4

File tree

8 files changed

+40
-40
lines changed

8 files changed

+40
-40
lines changed

docs/api/google/generativeai/ChatSession.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ Contains an ongoing conversation with the model.
3939
<!-- Placeholder for "Used in" -->
4040

4141
```
42-
>>> model = genai.GenerativeModel('models/gemini-pro')
42+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
4343
>>> chat = model.start_chat()
4444
>>> response = chat.send_message("Hello")
4545
>>> print(response.text)
@@ -136,7 +136,7 @@ Sends the conversation history with the added message and returns the model's re
136136
Appends the request and response to the conversation history.
137137

138138
```
139-
>>> model = genai.GenerativeModel('models/gemini-pro')
139+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
140140
>>> chat = model.start_chat()
141141
>>> response = chat.send_message("Hello")
142142
>>> print(response.text)

docs/api/google/generativeai/GenerativeModel.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ The `genai.GenerativeModel` class wraps default parameters for calls to <a href=
3131

3232
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
3333
<code>google.generativeai.GenerativeModel(
34-
model_name: str = &#x27;gemini-pro&#x27;,
34+
model_name: str = &#x27;gemini-1.5-flash&#x27;,
3535
safety_settings: (safety_types.SafetySettingOptions | None) = None,
3636
generation_config: (generation_types.GenerationConfigType | None) = None,
3737
tools: (content_types.FunctionLibraryType | None) = None,
@@ -51,7 +51,7 @@ requests. What media-types are supported for input and output is model-dependant
5151
>>> import google.generativeai as genai
5252
>>> import PIL.Image
5353
>>> genai.configure(api_key='YOUR_API_KEY')
54-
>>> model = genai.GenerativeModel('models/gemini-pro')
54+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
5555
>>> result = model.generate_content('Tell me a story about a magic backpack')
5656
>>> result.text
5757
"In the quaint little town of Lakeside, there lived a young girl named Lily..."
@@ -62,7 +62,7 @@ requests. What media-types are supported for input and output is model-dependant
6262

6363

6464
```
65-
>>> model = genai.GenerativeModel('models/gemini-pro')
65+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
6666
>>> result = model.generate_content([
6767
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
6868
>>> result.text
@@ -270,7 +270,7 @@ This <a href="../../google/generativeai/GenerativeModel.md#generate_content"><co
270270
conversations.
271271

272272
```
273-
>>> model = genai.GenerativeModel('models/gemini-pro')
273+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
274274
>>> response = model.generate_content('Tell me a story about a magic backpack')
275275
>>> response.text
276276
```

docs/api/google/generativeai/get_model.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ Calls the API to fetch a model by name.
3838

3939
```
4040
import pprint
41-
model = genai.get_model('models/gemini-pro')
41+
model = genai.get_model('models/gemini-1.5-flash')
4242
pprint.pprint(model)
4343
```
4444

google/generativeai/generative_models.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,14 @@ class GenerativeModel:
3636
>>> import google.generativeai as genai
3737
>>> import PIL.Image
3838
>>> genai.configure(api_key='YOUR_API_KEY')
39-
>>> model = genai.GenerativeModel('models/gemini-pro')
39+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
4040
>>> result = model.generate_content('Tell me a story about a magic backpack')
4141
>>> result.text
4242
"In the quaint little town of Lakeside, there lived a young girl named Lily..."
4343
4444
Multimodal input:
4545
46-
>>> model = genai.GenerativeModel('models/gemini-pro')
46+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
4747
>>> result = model.generate_content([
4848
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
4949
>>> result.text
@@ -250,7 +250,7 @@ def generate_content(
250250
This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn
251251
conversations.
252252
253-
>>> model = genai.GenerativeModel('models/gemini-pro')
253+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
254254
>>> response = model.generate_content('Tell me a story about a magic backpack')
255255
>>> response.text
256256
@@ -481,7 +481,7 @@ def start_chat(
481481
class ChatSession:
482482
"""Contains an ongoing conversation with the model.
483483
484-
>>> model = genai.GenerativeModel('models/gemini-pro')
484+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
485485
>>> chat = model.start_chat()
486486
>>> response = chat.send_message("Hello")
487487
>>> print(response.text)
@@ -524,7 +524,7 @@ def send_message(
524524
525525
Appends the request and response to the conversation history.
526526
527-
>>> model = genai.GenerativeModel('models/gemini-pro')
527+
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
528528
>>> chat = model.start_chat()
529529
>>> response = chat.send_message("Hello")
530530
>>> print(response.text)

google/generativeai/models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def get_model(
4040
4141
```
4242
import pprint
43-
model = genai.get_model('models/gemini-pro')
43+
model = genai.get_model('models/gemini-1.5-flash')
4444
pprint.pprint(model)
4545
```
4646

google/generativeai/notebook/text_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
from google.generativeai.types import generation_types
2121
from google.generativeai.notebook.lib import model as model_lib
2222

23-
_DEFAULT_MODEL = "models/gemini-pro"
23+
_DEFAULT_MODEL = "models/gemini-1.5-flash"
2424

2525

2626
class TextModel(model_lib.AbstractModel):

tests/test_generative_models.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ def setUp(self):
115115

116116
def test_hello(self):
117117
# Generate text from text prompt
118-
model = generative_models.GenerativeModel(model_name="gemini-pro")
118+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
119119

120120
self.responses["generate_content"].append(simple_response("world!"))
121121

@@ -138,7 +138,7 @@ def test_hello(self):
138138
)
139139
def test_image(self, content):
140140
# Generate text from image
141-
model = generative_models.GenerativeModel("gemini-pro")
141+
model = generative_models.GenerativeModel("gemini-1.5-flash")
142142

143143
cat = "It's a cat"
144144
self.responses["generate_content"].append(simple_response(cat))
@@ -172,7 +172,7 @@ def test_image(self, content):
172172
)
173173
def test_generation_config_overwrite(self, config1, config2):
174174
# Generation config
175-
model = generative_models.GenerativeModel("gemini-pro", generation_config=config1)
175+
model = generative_models.GenerativeModel("gemini-1.5-flash", generation_config=config1)
176176

177177
self.responses["generate_content"] = [
178178
simple_response(" world!"),
@@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
218218
)
219219
def test_safety_overwrite(self, safe1, safe2):
220220
# Safety
221-
model = generative_models.GenerativeModel("gemini-pro", safety_settings=safe1)
221+
model = generative_models.GenerativeModel("gemini-1.5-flash", safety_settings=safe1)
222222

223223
self.responses["generate_content"] = [
224224
simple_response(" world!"),
@@ -253,7 +253,7 @@ def test_stream_basic(self):
253253
chunks = ["first", " second", " third"]
254254
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
255255

256-
model = generative_models.GenerativeModel("gemini-pro")
256+
model = generative_models.GenerativeModel("gemini-1.5-flash")
257257
response = model.generate_content("Hello", stream=True)
258258

259259
self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
@@ -267,7 +267,7 @@ def test_stream_lookahead(self):
267267
chunks = ["first", " second", " third"]
268268
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
269269

270-
model = generative_models.GenerativeModel("gemini-pro")
270+
model = generative_models.GenerativeModel("gemini-1.5-flash")
271271
response = model.generate_content("Hello", stream=True)
272272

273273
self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
@@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
287287
]
288288
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
289289

290-
model = generative_models.GenerativeModel("gemini-pro")
290+
model = generative_models.GenerativeModel("gemini-1.5-flash")
291291
response = model.generate_content("Bad stuff!", stream=True)
292292

293293
self.assertEqual(
@@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
322322
]
323323
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
324324

325-
model = generative_models.GenerativeModel("gemini-pro")
325+
model = generative_models.GenerativeModel("gemini-1.5-flash")
326326
response = model.generate_content("Hello", stream=True)
327327

328328
self.assertEqual(
@@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:
389389

390390
def test_chat(self):
391391
# Multi turn chat
392-
model = generative_models.GenerativeModel("gemini-pro")
392+
model = generative_models.GenerativeModel("gemini-1.5-flash")
393393
chat = model.start_chat()
394394

395395
self.responses["generate_content"] = [
@@ -423,7 +423,7 @@ def test_chat(self):
423423
def test_chat_roles(self):
424424
self.responses["generate_content"] = [simple_response("hello!")]
425425

426-
model = generative_models.GenerativeModel("gemini-pro")
426+
model = generative_models.GenerativeModel("gemini-1.5-flash")
427427
chat = model.start_chat()
428428
response = chat.send_message("hello?")
429429
history = chat.history
@@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
792792
)
793793
self.responses["generate_content"] = [simple_response("echo echo")]
794794

795-
model = generative_models.GenerativeModel("gemini-pro", tools=tools)
795+
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
796796
_ = model.generate_content("Hello", tools=[tools], tool_config=tool_config)
797797

798798
req = self.observed_requests[0]
@@ -811,7 +811,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
811811
)
812812
def test_system_instruction(self, instruction, expected_instr):
813813
self.responses["generate_content"] = [simple_response("echo echo")]
814-
model = generative_models.GenerativeModel("gemini-pro", system_instruction=instruction)
814+
model = generative_models.GenerativeModel("gemini-1.5-flash", system_instruction=instruction)
815815

816816
_ = model.generate_content("test")
817817

@@ -852,7 +852,7 @@ def test_count_tokens_smoke(self, kwargs):
852852
)
853853

854854
def test_repr_for_unary_non_streamed_response(self):
855-
model = generative_models.GenerativeModel(model_name="gemini-pro")
855+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
856856
self.responses["generate_content"].append(simple_response("world!"))
857857
response = model.generate_content("Hello")
858858

@@ -885,7 +885,7 @@ def test_repr_for_streaming_start_to_finish(self):
885885
chunks = ["first", " second", " third"]
886886
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
887887

888-
model = generative_models.GenerativeModel("gemini-pro")
888+
model = generative_models.GenerativeModel("gemini-1.5-flash")
889889
response = model.generate_content("Hello", stream=True)
890890
iterator = iter(response)
891891

@@ -980,7 +980,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
980980
]
981981
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
982982

983-
model = generative_models.GenerativeModel("gemini-pro")
983+
model = generative_models.GenerativeModel("gemini-1.5-flash")
984984
response = model.generate_content("Bad stuff!", stream=True)
985985

986986
result = repr(response)
@@ -1096,7 +1096,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
10961096

10971097
def test_repr_for_multi_turn_chat(self):
10981098
# Multi turn chat
1099-
model = generative_models.GenerativeModel("gemini-pro")
1099+
model = generative_models.GenerativeModel("gemini-1.5-flash")
11001100
chat = model.start_chat()
11011101

11021102
self.responses["generate_content"] = [
@@ -1119,7 +1119,7 @@ def test_repr_for_multi_turn_chat(self):
11191119
"""\
11201120
ChatSession(
11211121
model=genai.GenerativeModel(
1122-
model_name='models/gemini-pro',
1122+
model_name='models/gemini-1.5-flash',
11231123
generation_config={},
11241124
safety_settings={},
11251125
tools=None,
@@ -1133,7 +1133,7 @@ def test_repr_for_multi_turn_chat(self):
11331133

11341134
def test_repr_for_incomplete_streaming_chat(self):
11351135
# Multi turn chat
1136-
model = generative_models.GenerativeModel("gemini-pro")
1136+
model = generative_models.GenerativeModel("gemini-1.5-flash")
11371137
chat = model.start_chat()
11381138

11391139
self.responses["stream_generate_content"] = [
@@ -1148,7 +1148,7 @@ def test_repr_for_incomplete_streaming_chat(self):
11481148
"""\
11491149
ChatSession(
11501150
model=genai.GenerativeModel(
1151-
model_name='models/gemini-pro',
1151+
model_name='models/gemini-1.5-flash',
11521152
generation_config={},
11531153
safety_settings={},
11541154
tools=None,
@@ -1162,7 +1162,7 @@ def test_repr_for_incomplete_streaming_chat(self):
11621162

11631163
def test_repr_for_broken_streaming_chat(self):
11641164
# Multi turn chat
1165-
model = generative_models.GenerativeModel("gemini-pro")
1165+
model = generative_models.GenerativeModel("gemini-1.5-flash")
11661166
chat = model.start_chat()
11671167

11681168
self.responses["stream_generate_content"] = [
@@ -1193,7 +1193,7 @@ def test_repr_for_broken_streaming_chat(self):
11931193
"""\
11941194
ChatSession(
11951195
model=genai.GenerativeModel(
1196-
model_name='models/gemini-pro',
1196+
model_name='models/gemini-1.5-flash',
11971197
generation_config={},
11981198
safety_settings={},
11991199
tools=None,
@@ -1206,7 +1206,7 @@ def test_repr_for_broken_streaming_chat(self):
12061206
self.assertEqual(expected, result)
12071207

12081208
def test_repr_for_system_instruction(self):
1209-
model = generative_models.GenerativeModel("gemini-pro", system_instruction="Be excellent.")
1209+
model = generative_models.GenerativeModel("gemini-1.5-flash", system_instruction="Be excellent.")
12101210
result = repr(model)
12111211
self.assertIn("system_instruction='Be excellent.'", result)
12121212

@@ -1237,7 +1237,7 @@ def test_chat_with_request_options(self):
12371237
)
12381238
request_options = {"timeout": 120}
12391239

1240-
model = generative_models.GenerativeModel("gemini-pro")
1240+
model = generative_models.GenerativeModel("gemini-1.5-flash")
12411241
chat = model.start_chat()
12421242
chat.send_message("hello", request_options=helper_types.RequestOptions(**request_options))
12431243

tests/test_generative_models_async.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async def count_tokens(
8080

8181
async def test_basic(self):
8282
# Generate text from text prompt
83-
model = generative_models.GenerativeModel(model_name="gemini-pro")
83+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
8484

8585
self.responses["generate_content"] = [simple_response("world!")]
8686

@@ -93,7 +93,7 @@ async def test_basic(self):
9393

9494
async def test_streaming(self):
9595
# Generate text from text prompt
96-
model = generative_models.GenerativeModel(model_name="gemini-pro")
96+
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
9797

9898
async def responses():
9999
for c in "world!":
@@ -195,7 +195,7 @@ async def test_tool_config(self, tool_config, expected_tool_config):
195195
)
196196
self.responses["generate_content"] = [simple_response("echo echo")]
197197

198-
model = generative_models.GenerativeModel("gemini-pro", tools=tools)
198+
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
199199
_ = await model.generate_content_async("Hello", tools=[tools], tool_config=tool_config)
200200

201201
req = self.observed_requests[0]

0 commit comments

Comments
 (0)