|
231 | 231 | "\n", |
232 | 232 | "Now you're ready to call the Gemini API. Use `list_models` to see the available Gemini models:\n", |
233 | 233 | "\n", |
234 | | - "* `gemini-pro`: optimized for text-only prompts.\n", |
235 | | - "* `gemini-pro-vision`: optimized for text-and-images prompts." |
| 234 | + "* `gemini-1.5-pro`: optimized for high intelligence tasks, the most powerful Gemini model\n", |
| 235 | + "* `gemini-1.5-flash`: optimized for multi-modal use-cases where speed and cost are important" |
236 | 236 | ] |
237 | 237 | }, |
238 | 238 | { |
|
278 | 278 | }, |
279 | 279 | "outputs": [], |
280 | 280 | "source": [ |
281 | | - "model = genai.GenerativeModel('gemini-pro')" |
| 281 | + "model = genai.GenerativeModel('gemini-1.5-flash')" |
282 | 282 | ] |
283 | 283 | }, |
284 | 284 | { |
|
712 | 712 | "id": "7r99TN2R8EUD" |
713 | 713 | }, |
714 | 714 | "source": [ |
715 | | - "Use the `gemini-pro-vision` model and pass the image to the model with `generate_content`." |
| 715 | + "Use the `gemini-1.5-flash` model and pass the image to the model with `generate_content`." |
716 | 716 | ] |
717 | 717 | }, |
718 | 718 | { |
|
723 | 723 | }, |
724 | 724 | "outputs": [], |
725 | 725 | "source": [ |
726 | | - "model = genai.GenerativeModel('gemini-pro-vision')" |
| 726 | + "model = genai.GenerativeModel('gemini-1.5-flash')" |
727 | 727 | ] |
728 | 728 | }, |
729 | 729 | { |
|
839 | 839 | } |
840 | 840 | ], |
841 | 841 | "source": [ |
842 | | - "model = genai.GenerativeModel('gemini-pro')\n", |
| 842 | + "model = genai.GenerativeModel('gemini-1.5-flash')\n", |
843 | 843 | "chat = model.start_chat(history=[])\n", |
844 | 844 | "chat" |
845 | 845 | ] |
846 | 846 | }, |
847 | | - { |
848 | | - "cell_type": "markdown", |
849 | | - "metadata": { |
850 | | - "id": "88Il02N-km9j" |
851 | | - }, |
852 | | - "source": [ |
853 | | - "Note: The vision model `gemini-pro-vision` is not optimized for multi-turn chat." |
854 | | - ] |
855 | | - }, |
856 | 847 | { |
857 | 848 | "cell_type": "markdown", |
858 | 849 | "metadata": { |
|
1139 | 1130 | ], |
1140 | 1131 | "source": [ |
1141 | 1132 | "result = genai.embed_content(\n", |
1142 | | - " model=\"models/embedding-001\",\n", |
| 1133 | + " model=\"models/text-embedding-004\",\n", |
1143 | 1134 | " content=\"What is the meaning of life?\",\n", |
1144 | 1135 | " task_type=\"retrieval_document\",\n", |
1145 | 1136 | " title=\"Embedding of single string\")\n", |
|
1178 | 1169 | ], |
1179 | 1170 | "source": [ |
1180 | 1171 | "result = genai.embed_content(\n", |
1181 | | - " model=\"models/embedding-001\",\n", |
| 1172 | + " model=\"models/text-embedding-004\",\n", |
1182 | 1173 | " content=[\n", |
1183 | 1174 | " 'What is the meaning of life?',\n", |
1184 | 1175 | " 'How much wood would a woodchuck chuck?',\n", |
|
1244 | 1235 | ], |
1245 | 1236 | "source": [ |
1246 | 1237 | "result = genai.embed_content(\n", |
1247 | | - " model = 'models/embedding-001',\n", |
| 1238 | + " model = 'models/text-embedding-004',\n", |
1248 | 1239 | " content = response.candidates[0].content)\n", |
1249 | 1240 | "\n", |
1250 | 1241 | "# 1 input > 1 vector output\n", |
|
1317 | 1308 | ], |
1318 | 1309 | "source": [ |
1319 | 1310 | "result = genai.embed_content(\n", |
1320 | | - " model = 'models/embedding-001',\n", |
| 1311 | + " model = 'models/text-embedding-004',\n", |
1321 | 1312 | " content = chat.history)\n", |
1322 | 1313 | "\n", |
1323 | 1314 | "# 1 input > 1 vector output\n", |
|
1537 | 1528 | }, |
1538 | 1529 | "outputs": [], |
1539 | 1530 | "source": [ |
1540 | | - "model = genai.GenerativeModel('gemini-pro-vision')\n", |
| 1531 | + "model = genai.GenerativeModel('gemini-1.5-flash')\n", |
1541 | 1532 | "response = model.generate_content(\n", |
1542 | 1533 | " glm.Content(\n", |
1543 | 1534 | " parts = [\n", |
|
1631 | 1622 | } |
1632 | 1623 | ], |
1633 | 1624 | "source": [ |
1634 | | - "model = genai.GenerativeModel('gemini-pro')\n", |
| 1625 | + "model = genai.GenerativeModel('gemini-1.5-flash')\n", |
1635 | 1626 | "\n", |
1636 | 1627 | "messages = [\n", |
1637 | 1628 | " {'role':'user',\n", |
|
1754 | 1745 | }, |
1755 | 1746 | "outputs": [], |
1756 | 1747 | "source": [ |
1757 | | - "model = genai.GenerativeModel('gemini-pro')\n", |
| 1748 | + "model = genai.GenerativeModel('gemini-1.5-flash')\n", |
1758 | 1749 | "response = model.generate_content(\n", |
1759 | 1750 | " 'Tell me a story about a magic backpack.',\n", |
1760 | 1751 | " generation_config=genai.types.GenerationConfig(\n", |
|
0 commit comments