Skip to content

Commit 695b0fd

Browse files
Update model names (google#434)
1 parent 1827b09 commit 695b0fd

File tree

1 file changed

+13
-22
lines changed

1 file changed

+13
-22
lines changed

site/en/gemini-api/docs/get-started/python.ipynb

Lines changed: 13 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -231,8 +231,8 @@
231231
"\n",
232232
"Now you're ready to call the Gemini API. Use `list_models` to see the available Gemini models:\n",
233233
"\n",
234-
"* `gemini-pro`: optimized for text-only prompts.\n",
235-
"* `gemini-pro-vision`: optimized for text-and-images prompts."
234+
"* `gemini-1.5-pro`: optimized for high intelligence tasks, the most powerful Gemini model\n",
235+
"* `gemini-1.5-flash`: optimized for multi-modal use-cases where speed and cost are important"
236236
]
237237
},
238238
{
@@ -278,7 +278,7 @@
278278
},
279279
"outputs": [],
280280
"source": [
281-
"model = genai.GenerativeModel('gemini-pro')"
281+
"model = genai.GenerativeModel('gemini-1.5-flash')"
282282
]
283283
},
284284
{
@@ -712,7 +712,7 @@
712712
"id": "7r99TN2R8EUD"
713713
},
714714
"source": [
715-
"Use the `gemini-pro-vision` model and pass the image to the model with `generate_content`."
715+
"Use the `gemini-1.5-flash` model and pass the image to the model with `generate_content`."
716716
]
717717
},
718718
{
@@ -723,7 +723,7 @@
723723
},
724724
"outputs": [],
725725
"source": [
726-
"model = genai.GenerativeModel('gemini-pro-vision')"
726+
"model = genai.GenerativeModel('gemini-1.5-flash')"
727727
]
728728
},
729729
{
@@ -839,20 +839,11 @@
839839
}
840840
],
841841
"source": [
842-
"model = genai.GenerativeModel('gemini-pro')\n",
842+
"model = genai.GenerativeModel('gemini-1.5-flash')\n",
843843
"chat = model.start_chat(history=[])\n",
844844
"chat"
845845
]
846846
},
847-
{
848-
"cell_type": "markdown",
849-
"metadata": {
850-
"id": "88Il02N-km9j"
851-
},
852-
"source": [
853-
"Note: The vision model `gemini-pro-vision` is not optimized for multi-turn chat."
854-
]
855-
},
856847
{
857848
"cell_type": "markdown",
858849
"metadata": {
@@ -1139,7 +1130,7 @@
11391130
],
11401131
"source": [
11411132
"result = genai.embed_content(\n",
1142-
" model=\"models/embedding-001\",\n",
1133+
" model=\"models/text-embedding-004\",\n",
11431134
" content=\"What is the meaning of life?\",\n",
11441135
" task_type=\"retrieval_document\",\n",
11451136
" title=\"Embedding of single string\")\n",
@@ -1178,7 +1169,7 @@
11781169
],
11791170
"source": [
11801171
"result = genai.embed_content(\n",
1181-
" model=\"models/embedding-001\",\n",
1172+
" model=\"models/text-embedding-004\",\n",
11821173
" content=[\n",
11831174
" 'What is the meaning of life?',\n",
11841175
" 'How much wood would a woodchuck chuck?',\n",
@@ -1244,7 +1235,7 @@
12441235
],
12451236
"source": [
12461237
"result = genai.embed_content(\n",
1247-
" model = 'models/embedding-001',\n",
1238+
" model = 'models/text-embedding-004',\n",
12481239
" content = response.candidates[0].content)\n",
12491240
"\n",
12501241
"# 1 input > 1 vector output\n",
@@ -1317,7 +1308,7 @@
13171308
],
13181309
"source": [
13191310
"result = genai.embed_content(\n",
1320-
" model = 'models/embedding-001',\n",
1311+
" model = 'models/text-embedding-004',\n",
13211312
" content = chat.history)\n",
13221313
"\n",
13231314
"# 1 input > 1 vector output\n",
@@ -1537,7 +1528,7 @@
15371528
},
15381529
"outputs": [],
15391530
"source": [
1540-
"model = genai.GenerativeModel('gemini-pro-vision')\n",
1531+
"model = genai.GenerativeModel('gemini-1.5-flash')\n",
15411532
"response = model.generate_content(\n",
15421533
" glm.Content(\n",
15431534
" parts = [\n",
@@ -1631,7 +1622,7 @@
16311622
}
16321623
],
16331624
"source": [
1634-
"model = genai.GenerativeModel('gemini-pro')\n",
1625+
"model = genai.GenerativeModel('gemini-1.5-flash')\n",
16351626
"\n",
16361627
"messages = [\n",
16371628
" {'role':'user',\n",
@@ -1754,7 +1745,7 @@
17541745
},
17551746
"outputs": [],
17561747
"source": [
1757-
"model = genai.GenerativeModel('gemini-pro')\n",
1748+
"model = genai.GenerativeModel('gemini-1.5-flash')\n",
17581749
"response = model.generate_content(\n",
17591750
" 'Tell me a story about a magic backpack.',\n",
17601751
" generation_config=genai.types.GenerationConfig(\n",

0 commit comments

Comments
 (0)