diff --git a/.gemini/styleguide.md b/.gemini/styleguide.md
index 9033578f6d6..6ec57690962 100644
--- a/.gemini/styleguide.md
+++ b/.gemini/styleguide.md
@@ -53,13 +53,36 @@ LOCATION = "global"
LOCATION = "us-central1"
```
+- Don't restart the kernel or use `!pip`, use `%pip` when installing
+
+**Correct**
+
+```sh
+%pip install
+```
+
+**Incorrect**
+
+```sh
+!pip install
+```
+
+```sh
+!pip3 install
+```
+
+```py
+app = IPython.Application.instance()
+app.kernel.do_shutdown(True)
+```
+
## Golden Rule: Use the Correct and Current SDK
-Always use the **Google GenAI SDK** (`google-genai`), which is the unified
+Always use the **Google Gen AI SDK** (`google-genai`), which is the unified
standard library for all Gemini API requests (AI Studio/Gemini Developer API
-and Vertex AI) as of 2025. Do not use legacy libraries and SDKs.
+and Vertex AI) as of 2026. Do not use legacy libraries and SDKs.
-- **Library Name:** Google GenAI SDK
+- **Library Name:** Google Gen AI SDK
- **Python Package:** `google-genai`
- **Legacy Library**: (`google-generativeai`) is deprecated.
diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt
index d2fb787bcdb..d2294aeda2a 100644
--- a/.github/actions/spelling/allow.txt
+++ b/.github/actions/spelling/allow.txt
@@ -39,6 +39,7 @@ ALDS
Alibaba
alisal
alloydb
+Alok
ALPHAFOLD
AMNOSH
amurensis
@@ -458,6 +459,8 @@ flac
Flahs
Flatform
Flipkart
+Flirble
+Flirbles
floormat
FLX
fmeasure
@@ -1017,6 +1020,7 @@ nunique
nvidia
NVIDIA
NVL
+NYT
oai
objc
ODb
diff --git a/.github/actions/spelling/excludes.txt b/.github/actions/spelling/excludes.txt
index aa53008f07c..8b2584819d9 100644
--- a/.github/actions/spelling/excludes.txt
+++ b/.github/actions/spelling/excludes.txt
@@ -115,3 +115,4 @@ ignore$
py\.typed$
^\Qaudio/speech/use-cases/storytelling/macbeth_the_sitcom.json\E$
.ruff.toml
+^\Q.gemini/styleguide.md\E$
diff --git a/.github/actions/spelling/line_forbidden.patterns b/.github/actions/spelling/line_forbidden.patterns
index 6a277726f07..c7ac283d9e8 100644
--- a/.github/actions/spelling/line_forbidden.patterns
+++ b/.github/actions/spelling/line_forbidden.patterns
@@ -211,10 +211,10 @@
# Should be Gemini
\sgemini\s\w
-# Should be `Gemini Version Size` (e.g. `Gemini 2.0 Flash`)
+# Should be `Gemini Version Size` (e.g. `Gemini 3 Flash`)
\bGemini\s(Pro|Flash|Ultra)\s?\d\.\d\b
-# Gemini Size should be capitalized (e.g. `Gemini 2.0 Flash`)
+# Gemini Size should be capitalized (e.g. `Gemini 3 Flash`)
\bGemini\s?\d\.\d\s(pro|flash|ultra)\b
# Don't say "Google Gemini" or "Google Gemini"
@@ -325,6 +325,3 @@ gemini-1\.[05]
# Use the Google Gen AI SDK `google-genai`
google-generativeai
from google import generativeai
-
-# Don't restart the kernel, use `%pip` when installing
-app\.kernel\.do_shutdown\(True\)
diff --git a/audio/speech/use-cases/storytelling/storytelling.ipynb b/audio/speech/use-cases/storytelling/storytelling.ipynb
index d95ccf91507..d77ae06d0a5 100644
--- a/audio/speech/use-cases/storytelling/storytelling.ipynb
+++ b/audio/speech/use-cases/storytelling/storytelling.ipynb
@@ -156,7 +156,7 @@
},
"outputs": [],
"source": [
- "%pip install --upgrade -q google-genai google-cloud-texttospeech pydub pandas tqdm"
+ "%pip install --upgrade -qqq google-genai google-cloud-texttospeech pydub pandas tqdm"
]
},
{
@@ -170,7 +170,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"metadata": {
"id": "BzCUF4oqciKR"
},
@@ -197,7 +197,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": null,
"metadata": {
"id": "GjSsu6cmUdEx"
},
@@ -208,7 +208,9 @@
"\n",
"from google import genai\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -233,7 +235,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {
"id": "vbNgv4q1T2Mi"
},
@@ -264,7 +266,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": null,
"metadata": {
"id": "PyQmSRbKA8r-"
},
@@ -274,11 +276,11 @@
"import random\n",
"import re\n",
"\n",
+ "import pandas as pd\n",
"from IPython.display import Audio\n",
"from google.api_core.client_options import ClientOptions\n",
"from google.cloud import texttospeech_v1beta1 as texttospeech\n",
"from google.genai.types import GenerateContentConfig\n",
- "import pandas as pd\n",
"from pydantic import BaseModel\n",
"from pydub import AudioSegment\n",
"from tqdm import tqdm"
@@ -295,7 +297,7 @@
},
{
"cell_type": "code",
- "execution_count": 28,
+ "execution_count": null,
"metadata": {
"id": "6Pl3un_YciKS"
},
@@ -318,7 +320,7 @@
"\n",
"SYSTEM_INSTRUCTION = \"\"\"You are a creative and ambitious play writer. Your goal is to write a play for audio performance. Include a narrator character to describe the scenes and actions occurring.\"\"\"\n",
"\n",
- "MODEL_ID = \"gemini-2.5-flash\"\n",
+ "MODEL_ID = \"gemini-3-flash-preview\"\n",
"\n",
"\n",
"class Character(BaseModel):\n",
@@ -353,7 +355,7 @@
},
{
"cell_type": "code",
- "execution_count": 29,
+ "execution_count": null,
"metadata": {
"id": "kYx2wwhjrmD6"
},
@@ -378,7 +380,6 @@
" characters: list[Character], voices: list[dict]\n",
") -> dict[str, str]:\n",
" \"\"\"Maps characters to voices based on gender identified by Gemini.\"\"\"\n",
- "\n",
" if len(characters) > len(voices):\n",
" print(f\"Too many characters {len(characters)}. Max {len(voices)}\")\n",
"\n",
@@ -515,9 +516,6 @@
" contents=PROMPT,\n",
" config=GenerateContentConfig(\n",
" system_instruction=SYSTEM_INSTRUCTION,\n",
- " max_output_tokens=65535,\n",
- " temperature=1.5,\n",
- " top_p=0.95,\n",
" response_mime_type=\"application/json\",\n",
" response_schema=Story,\n",
" ),\n",
@@ -536,7 +534,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": null,
"metadata": {
"id": "c3f5cac8983e"
},
@@ -653,7 +651,7 @@
},
{
"cell_type": "code",
- "execution_count": 35,
+ "execution_count": null,
"metadata": {
"id": "d_ax6LxzciKT"
},
diff --git a/gemini/agent-engine/intro_agent_engine.ipynb b/gemini/agent-engine/intro_agent_engine.ipynb
index 7be3454b31c..7bea9fbc123 100644
--- a/gemini/agent-engine/intro_agent_engine.ipynb
+++ b/gemini/agent-engine/intro_agent_engine.ipynb
@@ -333,7 +333,7 @@
"\n",
"
\n",
"\n",
- "Here you'll use the Gemini 2.0 model:"
+ "Here you'll use the Gemini 3 model:"
]
},
{
@@ -344,7 +344,7 @@
},
"outputs": [],
"source": [
- "model = \"gemini-2.0-flash\""
+ "model = \"gemini-3-flash-preview\""
]
},
{
@@ -729,7 +729,7 @@
"outputs": [],
"source": [
"## Model variant and version\n",
- "model = \"gemini-2.0-flash\"\n",
+ "model = \"gemini-3-flash-preview\"\n",
"\n",
"## Model safety settings\n",
"from langchain_google_vertexai import HarmBlockThreshold, HarmCategory\n",
diff --git a/gemini/batch-prediction/intro_batch_prediction.ipynb b/gemini/batch-prediction/intro_batch_prediction.ipynb
index adaa8445c5f..2f1651d1ea3 100644
--- a/gemini/batch-prediction/intro_batch_prediction.ipynb
+++ b/gemini/batch-prediction/intro_batch_prediction.ipynb
@@ -186,24 +186,6 @@
"### Import libraries\n"
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "fe00fa0b8bb7"
- },
- "outputs": [],
- "source": [
- "from datetime import datetime\n",
- "import time\n",
- "\n",
- "import fsspec\n",
- "from google import genai\n",
- "from google.cloud import bigquery\n",
- "from google.genai.types import CreateBatchJobConfig\n",
- "import pandas as pd"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -226,8 +208,18 @@
"outputs": [],
"source": [
"import os\n",
+ "import time\n",
+ "from datetime import datetime\n",
+ "\n",
+ "import fsspec\n",
+ "import pandas as pd\n",
+ "from google import genai\n",
+ "from google.cloud import bigquery\n",
+ "from google.genai.types import CreateBatchJobConfig\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -312,7 +304,9 @@
},
"outputs": [],
"source": [
- "INPUT_DATA = \"gs://cloud-samples-data/generative-ai/batch/batch_requests_for_multimodal_input_2.jsonl\" # @param {type:\"string\"}"
+ "# fmt: off\n",
+ "INPUT_DATA = \"gs://cloud-samples-data/generative-ai/batch/batch_requests_for_multimodal_input_2.jsonl\" # @param {type:\"string\"}\n",
+ "# fmt: on"
]
},
{
@@ -471,7 +465,7 @@
"Example output:\n",
"\n",
"```json\n",
- "{\"status\": \"\", \"processed_time\": \"2024-11-13T14:04:28.376+00:00\", \"request\": {\"contents\": [{\"parts\": [{\"file_data\": null, \"text\": \"List objects in this image.\"}, {\"file_data\": {\"file_uri\": \"gs://cloud-samples-data/generative-ai/image/gardening-tools.jpeg\", \"mime_type\": \"image/jpeg\"}, \"text\": null}], \"role\": \"user\"}], \"generationConfig\": {\"temperature\": 0.4}}, \"response\": {\"candidates\": [{\"avgLogprobs\": -0.10394711927934126, \"content\": {\"parts\": [{\"text\": \"Here's a list of the objects in the image:\\n\\n* **Watering can:** A green plastic watering can with a white rose head.\\n* **Plant:** A small plant (possibly oregano) in a terracotta pot.\\n* **Terracotta pots:** Two terracotta pots, one containing the plant and another empty, stacked on top of each other.\\n* **Gardening gloves:** A pair of striped gardening gloves.\\n* **Gardening tools:** A small trowel and a hand cultivator (hoe). Both are green with black handles.\"}], \"role\": \"model\"}, \"finishReason\": \"STOP\"}], \"modelVersion\": \"gemini-2.0-flash@default\", \"usageMetadata\": {\"candidatesTokenCount\": 110, \"promptTokenCount\": 264, \"totalTokenCount\": 374}}}\n",
+ "{\"status\": \"\", \"processed_time\": \"2024-11-13T14:04:28.376+00:00\", \"request\": {\"contents\": [{\"parts\": [{\"file_data\": null, \"text\": \"List objects in this image.\"}, {\"file_data\": {\"file_uri\": \"gs://cloud-samples-data/generative-ai/image/gardening-tools.jpeg\", \"mime_type\": \"image/jpeg\"}, \"text\": null}], \"role\": \"user\"}], \"generationConfig\": {\"temperature\": 0.4}}, \"response\": {\"candidates\": [{\"avgLogprobs\": -0.10394711927934126, \"content\": {\"parts\": [{\"text\": \"Here's a list of the objects in the image:\\n\\n* **Watering can:** A green plastic watering can with a white rose head.\\n* **Plant:** A small plant (possibly oregano) in a terracotta pot.\\n* **Terracotta pots:** Two terracotta pots, one containing the plant and another empty, stacked on top of each other.\\n* **Gardening gloves:** A pair of striped gardening gloves.\\n* **Gardening tools:** A small trowel and a hand cultivator (hoe). Both are green with black handles.\"}], \"role\": \"model\"}, \"finishReason\": \"STOP\"}], \"modelVersion\": \"gemini-3-flash-preview@default\", \"usageMetadata\": {\"candidatesTokenCount\": 110, \"promptTokenCount\": 264, \"totalTokenCount\": 374}}}\n",
"```\n"
]
},
@@ -550,7 +544,9 @@
},
"outputs": [],
"source": [
- "INPUT_DATA = \"bq://storage-samples.generative_ai.batch_requests_for_multimodal_input_2\" # @param {type:\"string\"}"
+ "# fmt: off\n",
+ "INPUT_DATA = \"bq://storage-samples.generative_ai.batch_requests_for_multimodal_input_2\" # @param {type:\"string\"}\n",
+ "# fmt: on"
]
},
{
diff --git a/gemini/code-execution/intro_code_execution.ipynb b/gemini/code-execution/intro_code_execution.ipynb
index 6d1e8bfe4ae..257cfc7f2b1 100644
--- a/gemini/code-execution/intro_code_execution.ipynb
+++ b/gemini/code-execution/intro_code_execution.ipynb
@@ -29,7 +29,7 @@
"id": "JAPoU8Sm5E6e"
},
"source": [
- "# Intro to Generating and Executing Python Code with Gemini 2.0\n",
+ "# Intro to Generating and Executing Python Code with Gemini 3\n",
"\n",
"
\n",
" \n",
@@ -98,7 +98,7 @@
"source": [
"## Overview\n",
"\n",
- "This notebook introduces the code execution capabilities of the [Gemini 2.0 Flash model](https://cloud.google.com/vertex-ai/generative-ai/docs/gemini-v2), a new multimodal generative AI model from Google [DeepMind](https://deepmind.google/). Gemini 2.0 Flash offers improvements in speed, quality, and advanced reasoning capabilities including enhanced understanding, coding, and instruction following.\n",
+ "This notebook introduces the code execution capabilities of the [Gemini 3 Flash model](https://cloud.google.com/vertex-ai/generative-ai/docs/gemini-v2), a new multimodal generative AI model from Google [DeepMind](https://deepmind.google/). Gemini 3 Flash offers improvements in speed, quality, and advanced reasoning capabilities including enhanced understanding, coding, and instruction following.\n",
"\n",
"## Code Execution\n",
"\n",
@@ -108,7 +108,7 @@
"\n",
"## Objectives\n",
"\n",
- "In this tutorial, you will learn how to generate and execute code using the Gemini API in Vertex AI and the Google Gen AI SDK for Python with the Gemini 2.0 Flash model.\n",
+ "In this tutorial, you will learn how to generate and execute code using the Gemini API in Vertex AI and the Google Gen AI SDK for Python with the Gemini 3 Flash model.\n",
"\n",
"You will complete the following tasks:\n",
"\n",
@@ -235,7 +235,9 @@
},
"outputs": [],
"source": [
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -259,11 +261,11 @@
"id": "x1vpnyk-q-fz"
},
"source": [
- "## Working with code execution in Gemini 2.0\n",
+ "## Working with code execution in Gemini 3\n",
"\n",
"### Load the Gemini model\n",
"\n",
- "The following code loads the Gemini 2.0 Flash model. You can learn about all Gemini models on Vertex AI by visiting the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models):"
+ "The following code loads the Gemini 3 Flash model. You can learn about all Gemini models on Vertex AI by visiting the [documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models):"
]
},
{
@@ -274,7 +276,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
diff --git a/gemini/context-caching/intro_context_caching.ipynb b/gemini/context-caching/intro_context_caching.ipynb
index 1e1eea0c31f..3a8dfe7618e 100644
--- a/gemini/context-caching/intro_context_caching.ipynb
+++ b/gemini/context-caching/intro_context_caching.ipynb
@@ -207,7 +207,9 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"LOCATION = \"us-central1\" # @param {type:\"string\"}\n",
"\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
@@ -291,7 +293,9 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.5-flash\" # @param [\"gemini-2.0-flash-001\", \"gemini-2.5-flash\", \"gemini-2.5-pro\"] {\"allow-input\":true, isTemplate: true}"
+ "# fmt: off\n",
+ "MODEL_ID = \"gemini-2.5-flash\" # @param [\"gemini-3-flash-preview\", \"gemini-2.5-flash\", \"gemini-2.5-pro\"] {\"allow-input\":true, isTemplate: true}\n",
+ "# fmt: on"
]
},
{
@@ -304,7 +308,7 @@
"\n",
"Implicit caching directly passes cache cost savings to developers without the need to create an explicit cache. Now, when you send a request to one of the Gemini 2.5 models, if the request shares a common prefix as one of previous requests, then it's eligible for a cache hit.\n",
"\n",
- "**Note** that implicit caching is enabled by default for all Gemini 2.0 and 2.5 models but cost savings only apply to Gemini 2.5 models. The minimum input token count for implicit caching is 2,048 for 2.5 Flash and 2.5 Pro."
+ "**Note** that implicit caching is enabled by default for all Gemini 3 and 2.5 models but cost savings only apply to Gemini 2.5 models. The minimum input token count for implicit caching is 2,048 for 2.5 Flash and 2.5 Pro."
]
},
{
diff --git a/gemini/function-calling/forced_function_calling.ipynb b/gemini/function-calling/forced_function_calling.ipynb
index 0e4d84eab5c..506cb8b7c11 100644
--- a/gemini/function-calling/forced_function_calling.ipynb
+++ b/gemini/function-calling/forced_function_calling.ipynb
@@ -241,11 +241,13 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "LOCATION = \"global\"\n",
"\n",
"from google import genai\n",
"\n",
@@ -269,8 +271,8 @@
},
"outputs": [],
"source": [
- "from IPython.display import Markdown, display\n",
"import arxiv\n",
+ "from IPython.display import Markdown, display\n",
"from google.genai.types import (\n",
" FunctionCallingConfig,\n",
" FunctionCallingConfigMode,\n",
@@ -305,7 +307,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -599,7 +601,7 @@
" )\n",
"\n",
" results = arxiv_client.results(search)\n",
- " results = str([r for r in results])"
+ " results = str(list(results))"
]
},
{
diff --git a/gemini/function-calling/function_calling_data_structures.ipynb b/gemini/function-calling/function_calling_data_structures.ipynb
index 9e298a6c3a4..2af0c0c2802 100644
--- a/gemini/function-calling/function_calling_data_structures.ipynb
+++ b/gemini/function-calling/function_calling_data_structures.ipynb
@@ -227,11 +227,13 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "LOCATION = \"global\"\n",
"\n",
"from google import genai\n",
"\n",
@@ -277,7 +279,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\""
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
diff --git a/gemini/function-calling/intro_function_calling.ipynb b/gemini/function-calling/intro_function_calling.ipynb
index 694f99ba310..df9cd61373e 100644
--- a/gemini/function-calling/intro_function_calling.ipynb
+++ b/gemini/function-calling/intro_function_calling.ipynb
@@ -135,7 +135,7 @@
"source": [
"### Objectives\n",
"\n",
- "In this tutorial, you will learn how to use the Gemini API in Vertex AI with the Vertex AI SDK for Python to make function calls via the Gemini 2.0 Flash (`gemini-2.0-flash`) model.\n",
+ "In this tutorial, you will learn how to use the Gemini API in Vertex AI with the Vertex AI SDK for Python to make function calls via the Gemini 3 Flash (`gemini-3-flash-preview`) model.\n",
"\n",
"You will complete the following tasks:\n",
"\n",
@@ -240,11 +240,13 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "LOCATION = \"global\"\n",
"\n",
"from google import genai\n",
"\n",
@@ -279,7 +281,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -299,9 +301,9 @@
},
"outputs": [],
"source": [
+ "import requests\n",
"from IPython.display import Markdown, display\n",
- "from google.genai.types import FunctionDeclaration, GenerateContentConfig, Part, Tool\n",
- "import requests"
+ "from google.genai.types import FunctionDeclaration, GenerateContentConfig, Part, Tool"
]
},
{
@@ -732,8 +734,7 @@
" country: str | None = None,\n",
" postalcode: str | None = None,\n",
") -> list[dict]:\n",
- " \"\"\"\n",
- " Get latitude and longitude for a given location.\n",
+ " \"\"\"Get latitude and longitude for a given location.\n",
"\n",
" Args:\n",
" amenity (str | None): Amenity or Point of interest.\n",
diff --git a/gemini/function-calling/multimodal_function_calling.ipynb b/gemini/function-calling/multimodal_function_calling.ipynb
index 6d838191f19..28a01529480 100644
--- a/gemini/function-calling/multimodal_function_calling.ipynb
+++ b/gemini/function-calling/multimodal_function_calling.ipynb
@@ -123,7 +123,7 @@
"source": [
"### Objectives\n",
"\n",
- "In this tutorial, you will learn how to use the Gemini API in Vertex AI with the Vertex AI SDK for Python to make function calls with multimodal inputs, using the Gemini 2.0 (`gemini-2.0-flash`) model. You'll explore how Gemini can process and understand various input types — including images, video, audio, and PDFs — to predict and execute functions.\n",
+ "In this tutorial, you will learn how to use the Gemini API in Vertex AI with the Vertex AI SDK for Python to make function calls with multimodal inputs, using the Gemini 3 (`gemini-3-flash-preview`) model. You'll explore how Gemini can process and understand various input types — including images, video, audio, and PDFs — to predict and execute functions.\n",
"\n",
"You will complete the following tasks:\n",
"\n",
@@ -267,11 +267,13 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "LOCATION = \"global\"\n",
"\n",
"from google import genai\n",
"\n",
@@ -304,6 +306,7 @@
},
"outputs": [],
"source": [
+ "import wikipedia\n",
"from IPython.display import Markdown, display\n",
"from google.genai.types import (\n",
" FunctionDeclaration,\n",
@@ -311,8 +314,7 @@
" Part,\n",
" Tool,\n",
" UserContent,\n",
- ")\n",
- "import wikipedia"
+ ")"
]
},
{
@@ -332,7 +334,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\""
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
@@ -464,7 +466,7 @@
},
"outputs": [],
"source": [
- "function_args = {key: value for key, value in response.function_calls[0].args.items()}\n",
+ "function_args = dict(response.function_calls[0].args.items())\n",
"function_args"
]
},
@@ -684,7 +686,7 @@
},
"outputs": [],
"source": [
- "function_args = {key: value for key, value in response.function_calls[0].args.items()}\n",
+ "function_args = dict(response.function_calls[0].args.items())\n",
"function_args"
]
},
@@ -794,7 +796,7 @@
"outputs": [],
"source": [
"response = client.models.generate_content(\n",
- " model=\"gemini-2.0-flash-001\",\n",
+ " model=\"gemini-3-flash-preview\",\n",
" contents=[\n",
" Part.from_uri(\n",
" file_uri=\"gs://github-repo/generative-ai/gemini/function-calling/google-cloud-sre-podcast-s2-e8.mp3\",\n",
@@ -845,7 +847,7 @@
},
"outputs": [],
"source": [
- "function_args = {key: value for key, value in response.function_calls[0].args.items()}\n",
+ "function_args = dict(response.function_calls[0].args.items())\n",
"function_args"
]
},
@@ -1013,7 +1015,7 @@
},
"outputs": [],
"source": [
- "function_args = {key: value for key, value in response.function_calls[0].args.items()}\n",
+ "function_args = dict(response.function_calls[0].args.items())\n",
"function_args"
]
},
diff --git a/gemini/function-calling/parallel_function_calling.ipynb b/gemini/function-calling/parallel_function_calling.ipynb
index 1171c10eed7..e30644903cc 100644
--- a/gemini/function-calling/parallel_function_calling.ipynb
+++ b/gemini/function-calling/parallel_function_calling.ipynb
@@ -236,11 +236,13 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "LOCATION = \"global\"\n",
"\n",
"from google import genai\n",
"\n",
@@ -268,6 +270,7 @@
"source": [
"from typing import Any\n",
"\n",
+ "import wikipedia\n",
"from IPython.display import Markdown, display\n",
"from google.genai.types import (\n",
" FunctionDeclaration,\n",
@@ -275,8 +278,7 @@
" GenerateContentResponse,\n",
" Part,\n",
" Tool,\n",
- ")\n",
- "import wikipedia"
+ ")"
]
},
{
@@ -376,7 +378,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\"\n",
+ "MODEL_ID = \"gemini-3-flash-preview\"\n",
"\n",
"chat = client.chats.create(\n",
" model=MODEL_ID, config=GenerateContentConfig(temperature=0, tools=[wikipedia_tool])\n",
diff --git a/gemini/getting-started/intro_gemini_chat.ipynb b/gemini/getting-started/intro_gemini_chat.ipynb
index 00503f9adc4..ff5e8ec085e 100644
--- a/gemini/getting-started/intro_gemini_chat.ipynb
+++ b/gemini/getting-started/intro_gemini_chat.ipynb
@@ -225,7 +225,9 @@
"\n",
"from google import genai\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -284,7 +286,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\""
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
@@ -493,7 +495,7 @@
"source": [
"### Start a chat session\n",
"\n",
- "You can start a chat by sending chat prompts to the Gemini 2.0 model directly. Gemini 2.0 doesn't support `SystemMessage` at the moment, but `SystemMessage` can be added to the first human message by setting the `convert_system_message_to_human` to `True`."
+ "You can start a chat by sending chat prompts to the Gemini 3 model directly. Gemini 3 doesn't support `SystemMessage` at the moment, but `SystemMessage` can be added to the first human message by setting the `convert_system_message_to_human` to `True`."
]
},
{
diff --git a/gemini/getting-started/intro_gemini_curl.ipynb b/gemini/getting-started/intro_gemini_curl.ipynb
index 602b7d041bc..5de546787ec 100644
--- a/gemini/getting-started/intro_gemini_curl.ipynb
+++ b/gemini/getting-started/intro_gemini_curl.ipynb
@@ -110,7 +110,7 @@
" \n",
"\n",
"\n",
- "In this tutorial, you learn how to use the Vertex AI REST API with cURL commands to interact with the Gemini 2.0 Flash model.\n",
+ "In this tutorial, you learn how to use the Vertex AI REST API with cURL commands to interact with the Gemini 3 Flash model.\n",
"\n",
"You will complete the following tasks:\n",
"\n",
@@ -223,7 +223,9 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -239,7 +241,7 @@
"id": "854fbf388e2b"
},
"source": [
- "## Use the Gemini 2.0 Flash model"
+ "## Use the Gemini 3 Flash model"
]
},
{
@@ -250,7 +252,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\"\n",
+ "MODEL_ID = \"gemini-3-flash-preview\"\n",
"\n",
"api_host = \"aiplatform.googleapis.com\"\n",
"if LOCATION != \"global\":\n",
@@ -803,7 +805,7 @@
"source": [
"## Search as a tool\n",
"\n",
- "Using Grounding with Google Search, you can improve the accuracy and recency of responses from the model. Starting with Gemini 2.0, Google Search is available as a tool. This means that the model can decide when to use Google Search."
+ "Using Grounding with Google Search, you can improve the accuracy and recency of responses from the model. Starting with Gemini 3, Google Search is available as a tool. This means that the model can decide when to use Google Search."
]
},
{
diff --git a/gemini/getting-started/intro_gemini_express.ipynb b/gemini/getting-started/intro_gemini_express.ipynb
index 375388a5f70..4a9a4582589 100644
--- a/gemini/getting-started/intro_gemini_express.ipynb
+++ b/gemini/getting-started/intro_gemini_express.ipynb
@@ -154,7 +154,9 @@
"\n",
"from google import genai\n",
"\n",
+ "# fmt: off\n",
"API_KEY = \"[your-api-key]\" # @param {type: \"string\", placeholder: \"[your-api-key]\", isTemplate: true}\n",
+ "# fmt: on\n",
"\n",
"if not API_KEY or API_KEY == \"[your-api-key]\":\n",
" API_KEY = os.environ.get(\"GOOGLE_API_KEY\")\n",
@@ -213,7 +215,7 @@
"id": "BY1nfXrqRxVX"
},
"source": [
- "### Load the Gemini 2.0 Flash model\n",
+ "### Load the Gemini 3 Flash model\n",
"\n",
"To learn more about all [Gemini API models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models).\n"
]
@@ -226,7 +228,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -850,7 +852,6 @@
"source": [
"from pydantic import BaseModel\n",
"\n",
- "\n",
"class Recipe(BaseModel):\n",
" name: str\n",
" description: str\n",
@@ -1037,7 +1038,7 @@
"\n",
"By grounding model responses in Google Search results, the model can access information at runtime that goes beyond its training data which can produce more accurate, up-to-date, and relevant responses.\n",
"\n",
- "Using Grounding with Google Search, you can improve the accuracy and recency of responses from the model. Starting with Gemini 2.0, Google Search is available as a tool. This means that the model can decide when to use Google Search.\n",
+ "Using Grounding with Google Search, you can improve the accuracy and recency of responses from the model. Starting with Gemini 3, Google Search is available as a tool. This means that the model can decide when to use Google Search.\n",
"\n",
"For more examples of Grounding, refer to [this notebook](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/grounding/intro-grounding-gemini.ipynb)."
]
@@ -1249,9 +1250,9 @@
"source": [
"## Spatial Understanding\n",
"\n",
- "Gemini 2.0 includes improved spatial understanding and object detection capabilities. Check out this notebook for examples:\n",
+ "Gemini 3 includes improved spatial understanding and object detection capabilities. Check out this notebook for examples:\n",
"\n",
- "- [2D spatial understanding with Gemini 2.0](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/use-cases/spatial-understanding/spatial_understanding.ipynb)"
+ "- [2D spatial understanding with Gemini 3](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/use-cases/spatial-understanding/spatial_understanding.ipynb)"
]
},
{
diff --git a/gemini/global-endpoint/intro_global_endpoint.ipynb b/gemini/global-endpoint/intro_global_endpoint.ipynb
deleted file mode 100644
index e53000edc6b..00000000000
--- a/gemini/global-endpoint/intro_global_endpoint.ipynb
+++ /dev/null
@@ -1,379 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "sqi5B7V_Rjim"
- },
- "outputs": [],
- "source": [
- "# Copyright 2025 Google LLC\n",
- "#\n",
- "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
- "# you may not use this file except in compliance with the License.\n",
- "# You may obtain a copy of the License at\n",
- "#\n",
- "# https://www.apache.org/licenses/LICENSE-2.0\n",
- "#\n",
- "# Unless required by applicable law or agreed to in writing, software\n",
- "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
- "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
- "# See the License for the specific language governing permissions and\n",
- "# limitations under the License."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "VyPmicX9RlZX"
- },
- "source": [
- "# Intro to Vertex AI Global Endpoint\n",
- "\n",
- "\n",
- "\n",
- " \n",
- " \n",
- "  Open in Colab\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  Open in Colab Enterprise\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  Open in Vertex AI Workbench\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  View on GitHub\n",
- " \n",
- " | \n",
- " \n",
- "\n",
- "\n",
- "\n",
- "Share to:\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- ""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "8MqT58L6Rm_q"
- },
- "source": [
- "| | |\n",
- "|-|-|\n",
- "| Author(s) | [Eric Dong](https://github.com/gericdong) |"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "nVxnv1D5RoZw"
- },
- "source": [
- "## Overview\n",
- "\n",
- "Vertex AI global endpoint enables developers to leverage Google's highly scalable and global infrastructure for Gemini models. Leveraging the global endpoint can significantly improve overall service availability and throughput, while reducing the likelihood of rate limiting (429 RESOURCE_EXHAUSTED errors).\n",
- "\n",
- "This tutorial demonstrates how to call the Vertex AI global endpoint using both the REST API and the Google Gen AI SDK.\n",
- "\n",
- "**Important**:\n",
- "\n",
- "- Use the global endpoint if you don't have strict regional restriction requirements.\n",
- "\n",
- "Learn more about [Deployments and endpoints](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations)."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "gPiTOAHURvTM"
- },
- "source": [
- "## Getting Started"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "CHRZUpfWSEpp"
- },
- "source": [
- "### Install Google Gen AI SDK\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "sG3_LKsWSD3A"
- },
- "outputs": [],
- "source": [
- "%pip install --upgrade --quiet google-genai"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "HlMVjiAWSMNX"
- },
- "source": [
- "### Authenticate your notebook environment\n",
- "\n",
- "If you are running this notebook on Google Colab, run the cell below to authenticate your environment."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "12fnq4V0SNV3"
- },
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "if \"google.colab\" in sys.modules:\n",
- " from google.colab import auth\n",
- "\n",
- " auth.authenticate_user()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "z6kSWg40PPzd"
- },
- "source": [
- "### Set Google Cloud project ID\n",
- "\n",
- "To start using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n",
- "\n",
- "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "UCgUOv4nSWhc"
- },
- "outputs": [],
- "source": [
- "import os\n",
- "\n",
- "# fmt: off\n",
- "PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\"}\n",
- "# fmt: on\n",
- "\n",
- "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
- " PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "BhGIYTj0PgsD"
- },
- "source": [
- "### Use a Gemini model\n",
- "\n",
- "Learn more about the [global endpoint supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations#supported_models).\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "R6rCOr8URsYU"
- },
- "outputs": [],
- "source": [
- "# fmt: off\n",
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}\n",
- "# fmt: on"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "IZxwWQocTXSx"
- },
- "source": [
- "## Use the Global Endpoint\n",
- "\n",
- "You set location to `global` to specify a global endpoint."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "cVN2UzZ8Wtap"
- },
- "outputs": [],
- "source": [
- "# fmt: off\n",
- "LOCATION = \"global\" # @param {type: \"string\"}\n",
- "# fmt: on"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Cjnw6Jt6Uqcm"
- },
- "source": [
- "### **Option 1**: Use global endpoint with REST API\n",
- "\n",
- "Next, send a REST API request to the Gemini model using cURL with the global endpoint. When forming the request URL for the global endpoint, ensure the hostname `API_HOST` does not include a region identifier (unlike regional endpoints), and specify `global` within the URL path where the location would normally appear.\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "262jPzPdUz_v"
- },
- "outputs": [],
- "source": [
- "API_HOST = \"aiplatform.googleapis.com\"\n",
- "\n",
- "os.environ[\"API_ENDPOINT\"] = (\n",
- " f\"{API_HOST}/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}\"\n",
- ")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "XX7zfX4leqVB"
- },
- "source": [
- "The following example shows a text generation that uses a global endpoint."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "Zu1p_H_NVvei"
- },
- "outputs": [],
- "source": [
- "%%bash\n",
- "\n",
- "curl -X POST \\\n",
- " -H \"Authorization: Bearer $(gcloud auth print-access-token)\" \\\n",
- " -H \"Content-Type: application/json\" \\\n",
- " https://${API_ENDPOINT}:generateContent \\\n",
- " -d '{\n",
- " \"contents\": {\n",
- " \"role\": \"USER\",\n",
- " \"parts\": { \"text\": \"Why is the sky blue?\" },\n",
- " },\n",
- " }'"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "zXgFIiDVag2j"
- },
- "source": [
- "### **Option 2**: Use global endpoint with Gen AI SDK\n",
- "\n",
- "When you use the Gen AI SDK, set the location as part of the client options. Here you create a `client` for Vertex AI service, and set location `global` to indicate to use a global endpoint."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "TpMdwQSpbEPt"
- },
- "outputs": [],
- "source": [
- "from google import genai\n",
- "\n",
- "client = genai.Client(vertexai=True, project=PROJECT_ID, location=LOCATION)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "6YModBSDcHXl"
- },
- "source": [
- "The following example shows a text generation that uses a global endpoint."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "PENzMhWQak2w"
- },
- "outputs": [],
- "source": [
- "response = client.models.generate_content(\n",
- " model=MODEL_ID, contents=\"Why is the sky blue?\"\n",
- ")\n",
- "\n",
- "print(response.text)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "eQwiONFdVHw5"
- },
- "source": [
- "## What's next\n",
- "\n",
- "- Learn more about [Generative AI on Vertex AI locations](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/locations).\n",
- "- Learn more about [Model versions and lifecycle](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versions).\n",
- "- Explore other notebooks in the [Google Cloud Generative AI repository](https://github.com/GoogleCloudPlatform/generative-ai)."
- ]
- }
- ],
- "metadata": {
- "colab": {
- "name": "intro_global_endpoint.ipynb",
- "toc_visible": true
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/gemini/grounding/grounding_with_vais.ipynb b/gemini/grounding/grounding_with_vais.ipynb
index 11821e7b6bb..03798638170 100644
--- a/gemini/grounding/grounding_with_vais.ipynb
+++ b/gemini/grounding/grounding_with_vais.ipynb
@@ -279,7 +279,7 @@
"engine_client = vais.EngineServiceClient(client_options=client_options)\n",
"\n",
"\n",
- "def wait_for_operation_finish(operation):\n",
+ "def wait_for_operation_finish(operation) -> None:\n",
" while not operation.done:\n",
" time.sleep(2) # sleep 2 seconds"
]
@@ -473,7 +473,7 @@
"from google.genai.types import GenerateContentConfig, Retrieval, Tool, VertexAISearch\n",
"\n",
"# fmt: off\n",
- "MODEL_ID = \"gemini-2.0-flash\" # @param {type: \"string\"}\n",
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}\n",
"# fmt: on\n",
"\n",
"\n",
diff --git a/gemini/grounding/intro-grounding-gemini.ipynb b/gemini/grounding/intro-grounding-gemini.ipynb
index 572c9c4bd53..18729654685 100644
--- a/gemini/grounding/intro-grounding-gemini.ipynb
+++ b/gemini/grounding/intro-grounding-gemini.ipynb
@@ -556,7 +556,7 @@
"\n",
"### Gemini model compatibility\n",
"\n",
- "Enterprise Web Search is compatible with all Gemini 2.0 models which support grounding. Gemini 2.0 Flash supports multimodal input (e.g. images, documents, videos). "
+ "Enterprise Web Search is compatible with all Gemini 3 models which support grounding. Gemini 3 Flash supports multimodal input (e.g. images, documents, videos). "
]
},
{
diff --git a/gemini/logprobs/intro_logprobs.ipynb b/gemini/logprobs/intro_logprobs.ipynb
index 699ea98b2d0..b679667e609 100644
--- a/gemini/logprobs/intro_logprobs.ipynb
+++ b/gemini/logprobs/intro_logprobs.ipynb
@@ -198,7 +198,9 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -224,9 +226,9 @@
"source": [
"import math\n",
"\n",
+ "import pandas as pd\n",
"from google import genai\n",
- "from google.genai.types import GenerateContentConfig\n",
- "import pandas as pd"
+ "from google.genai.types import GenerateContentConfig"
]
},
{
@@ -268,7 +270,9 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\" # @param [\"gemini-2.0-flash\", \"gemini-2.5-pro\", \"gemini-2.5-flash\"] {\"allow-input\":true, isTemplate: true}"
+ "# fmt: off\n",
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param [\"gemini-3-flash-preview\", \"gemini-2.5-pro\", \"gemini-2.5-flash\"] {\"allow-input\":true, isTemplate: true}\n",
+ "# fmt: on"
]
},
{
@@ -341,10 +345,8 @@
},
"outputs": [],
"source": [
- "def print_logprobs(response):\n",
- " \"\"\"\n",
- " Print log probabilities for each token in the response\n",
- " \"\"\"\n",
+ "def print_logprobs(response) -> None:\n",
+ " \"\"\"Print log probabilities for each token in the response.\"\"\"\n",
" if response.candidates and response.candidates[0].logprobs_result:\n",
" logprobs_result = response.candidates[0].logprobs_result\n",
" for i, chosen_candidate in enumerate(logprobs_result.chosen_candidates):\n",
@@ -449,9 +451,8 @@
},
"outputs": [],
"source": [
- "def check_for_ambiguity(response, ambiguity_margin=1.0):\n",
- " \"\"\"\n",
- " Check if the classification is ambiguous.\n",
+ "def check_for_ambiguity(response, ambiguity_margin=1.0) -> None:\n",
+ " \"\"\"Check if the classification is ambiguous.\n",
" Ambiguity is defined as the log probability of the top choice being\n",
" too close to the log probability of the second choice.\n",
" \"\"\"\n",
@@ -523,9 +524,7 @@
"outputs": [],
"source": [
"def accept_if_confident(response, threshold=0.90):\n",
- " \"\"\"\n",
- " Accepts the classification only if the confidence level is above a threshold.\n",
- " \"\"\"\n",
+ " \"\"\"Accepts the classification only if the confidence level is above a threshold.\"\"\"\n",
" if not (response.candidates and response.candidates[0].logprobs_result):\n",
" return None\n",
"\n",
@@ -541,9 +540,8 @@
" if probability >= threshold:\n",
" print(f\"Confidence is above {threshold:.0%}. Accepting result.\")\n",
" return chosen_candidate.token\n",
- " else:\n",
- " print(f\"Confidence is below {threshold:.0%}. Rejecting result.\")\n",
- " return None"
+ " print(f\"Confidence is below {threshold:.0%}. Rejecting result.\")\n",
+ " return None"
]
},
{
@@ -581,14 +579,12 @@
"outputs": [],
"source": [
"def get_autocomplete_suggestions(prompt: str) -> dict:\n",
- " \"\"\"\n",
- " Gets autocomplete suggestions for a given text prompt.\n",
- " \"\"\"\n",
+ " \"\"\"Gets autocomplete suggestions for a given text prompt.\"\"\"\n",
" system_instruction = (\n",
" \"You are acting as auto-complete. Complete the sentence with only one word.\",\n",
" )\n",
"\n",
- " response = client.models.generate_content(\n",
+ " return client.models.generate_content(\n",
" model=MODEL_ID,\n",
" contents=prompt,\n",
" config=GenerateContentConfig(\n",
@@ -600,13 +596,9 @@
" ),\n",
" )\n",
"\n",
- " return response\n",
- "\n",
"\n",
"def parse_suggestions(response: dict) -> str:\n",
- " \"\"\"\n",
- " Parses the logprobs from a model response and formats them.\n",
- " \"\"\"\n",
+ " \"\"\"Parses the logprobs from a model response and formats them.\"\"\"\n",
" if not (\n",
" response.candidates\n",
" and response.candidates[0].logprobs_result\n",
@@ -756,9 +748,7 @@
"outputs": [],
"source": [
"def get_answer_and_score(question: str, context: str) -> tuple[str, float]:\n",
- " \"\"\"\n",
- " Generates an answer based on a question and context, and calculates a grounding score.\n",
- " \"\"\"\n",
+ " \"\"\"Generates an answer based on a question and context, and calculates a grounding score.\"\"\"\n",
" if context:\n",
" prompt = f\"\"\"\n",
" Context:\n",
@@ -790,7 +780,6 @@
" and response.candidates[0].logprobs_result\n",
" and response.candidates[0].logprobs_result.chosen_candidates\n",
" ):\n",
- "\n",
" logprobs_result = response.candidates[0].logprobs_result\n",
" for chosen_candidate in logprobs_result.chosen_candidates:\n",
" total_logprob += chosen_candidate.log_probability\n",
diff --git a/gemini/long-context/intro_long_context.ipynb b/gemini/long-context/intro_long_context.ipynb
index 02ae90a2d48..7902ba0a57c 100644
--- a/gemini/long-context/intro_long_context.ipynb
+++ b/gemini/long-context/intro_long_context.ipynb
@@ -103,7 +103,7 @@
"source": [
"## Overview\n",
"\n",
- "Gemini 2.0 Flash comes standard with a 1 million token context window, and Gemini 2.0 comes with a 2 million token context window. Historically, large language models (LLMs) were significantly limited by the amount of text (or tokens) that could be passed to the model at one time. The Gemini long context window, with [near-perfect retrieval (>99%)](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf), unlocks many new use cases and developer paradigms.\n",
+ "Gemini 3 Flash comes standard with a 1 million token context window, and Gemini 3 comes with a 2 million token context window. Historically, large language models (LLMs) were significantly limited by the amount of text (or tokens) that could be passed to the model at one time. The Gemini long context window, with [near-perfect retrieval (>99%)](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf), unlocks many new use cases and developer paradigms.\n",
"\n",
"In practice, 1 million tokens would look like:\n",
"\n",
@@ -244,19 +244,6 @@
"### Import libraries\n"
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "e206cb5e39e2"
- },
- "outputs": [],
- "source": [
- "from IPython.display import Markdown, display\n",
- "from google import genai\n",
- "from google.genai.types import Part"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -280,7 +267,13 @@
"source": [
"import os\n",
"\n",
+ "from IPython.display import Markdown, display\n",
+ "from google import genai\n",
+ "from google.genai.types import Part\n",
+ "\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -304,7 +297,7 @@
"id": "BY1nfXrqRxVX"
},
"source": [
- "### Load the Gemini 2.0 Flash model\n",
+ "### Load the Gemini 3 Flash model\n",
"\n",
"To learn more about all [Gemini API models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models).\n"
]
@@ -317,7 +310,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type:\"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type:\"string\"}"
]
},
{
@@ -354,7 +347,7 @@
"source": [
"[War and Peace by Leo Tolstoy](https://en.wikipedia.org/wiki/War_and_Peace) is considered one of the greatest literary works of all time; however, it is over 1,225 pages and the average reader will spend 37 hours and 48 minutes reading this book at 250 WPM (words per minute). 😵💫 The text alone takes up 3.4 MB of storage space. However, the entire novel consists of less than 900,000 tokens, so it will fit within the Gemini context window.\n",
"\n",
- "We are going to pass in the entire text into Gemini 2.0 and get a detailed summary of the plot. For this example, we have the text of the novel from [Project Gutenberg](https://www.gutenberg.org/ebooks/2600) stored in a public Google Cloud Storage bucket.\n",
+ "We are going to pass in the entire text into Gemini 3 and get a detailed summary of the plot. For this example, we have the text of the novel from [Project Gutenberg](https://www.gutenberg.org/ebooks/2600) stored in a public Google Cloud Storage bucket.\n",
"\n",
"First, we will use the `count_tokens()` method to examine the token count of the full prompt, then send the prompt to Gemini."
]
@@ -405,7 +398,7 @@
"The Gemini long context window allows the ability to reason and answer questions about multimodal inputs with\n",
"sustained performance.\n",
"\n",
- "When tested on the needle in a video haystack problem with 1M tokens, Gemini 2.0 obtained >99.8% recall of the video in the context window, and Gemini 2.0 reached state of the art performance on the [Video-MME benchmark](https://video-mme.github.io/home_page.html).\n",
+ "When tested on the needle in a video haystack problem with 1M tokens, Gemini 3 obtained >99.8% recall of the video in the context window, and Gemini 3 reached state of the art performance on the [Video-MME benchmark](https://video-mme.github.io/home_page.html).\n",
"\n",
"Some emerging and standard use cases for video long context include:\n",
"\n",
@@ -477,7 +470,7 @@
"\n",
"The Gemini models were the first natively multimodal large language models that could understand audio.\n",
"\n",
- "On standard audio-haystack evaluations, Gemini 2.0 is able to find the hidden audio in 100% of the tests and Gemini 2.0 is able to find it in 98.7% [of the tests](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf). Further, on a test set of 15-minute audio clips, Gemini 2.0 archives a word error rate (WER) of ~5.5%, much lower than even specialized speech-to-text models, without the added complexity of extra input segmentation and pre-processing.\n",
+ "On standard audio-haystack evaluations, Gemini 3 is able to find the hidden audio in 100% of the tests and Gemini 3 is able to find it in 98.7% [of the tests](https://storage.googleapis.com/deepmind-media/gemini/gemini_v1_5_report.pdf). Further, on a test set of 15-minute audio clips, Gemini 3 archives a word error rate (WER) of ~5.5%, much lower than even specialized speech-to-text models, without the added complexity of extra input segmentation and pre-processing.\n",
"\n",
"The long context window accepts up to 9.5 hours of audio in a single request.\n",
"\n",
diff --git a/gemini/orchestration/intro_langchain_gemini.ipynb b/gemini/orchestration/intro_langchain_gemini.ipynb
index d85d8435e7a..56a910b716e 100644
--- a/gemini/orchestration/intro_langchain_gemini.ipynb
+++ b/gemini/orchestration/intro_langchain_gemini.ipynb
@@ -204,9 +204,8 @@
},
"outputs": [],
"source": [
- "# Install Vertex AI SDK, LangChain and dependencies\n",
- "%pip install --upgrade --quiet google-cloud-aiplatform langchain langchain-core langchain-text-splitters langchain-google-vertexai langchain-community faiss-cpu langchain-chroma pypdf\n",
- "%pip install --upgrade --quiet langchain-classic"
+ "# Install LangChain and dependencies\n",
+ "%pip install --upgrade --quiet langchain langchain-core langchain-text-splitters langchain-google-genai langchain-community faiss-cpu langchain-chroma pypdf langchain-classic beautifulsoup4"
]
},
{
@@ -286,12 +285,7 @@
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
- "\n",
- "import vertexai\n",
- "\n",
- "# Initialize Vertex AI SDK\n",
- "vertexai.init(project=PROJECT_ID, location=LOCATION)"
+ "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"global\")"
]
},
{
@@ -319,7 +313,11 @@
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.prompts.few_shot import FewShotPromptTemplate\n",
- "from langchain_google_vertexai import ChatVertexAI, VertexAI, VertexAIEmbeddings\n",
+ "from langchain_google_genai import (\n",
+ " ChatGoogleGenerativeAI,\n",
+ " GoogleGenerativeAI,\n",
+ " GoogleGenerativeAIEmbeddings,\n",
+ ")\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
]
},
@@ -341,16 +339,29 @@
"outputs": [],
"source": [
"# LLM model\n",
- "llm = VertexAI(\n",
- " model_name=\"gemini-2.0-flash\",\n",
+ "llm = GoogleGenerativeAI(\n",
+ " model=\"gemini-3-flash-preview\",\n",
" verbose=True,\n",
+ " project=PROJECT_ID,\n",
+ " location=LOCATION,\n",
+ " vertexai=True,\n",
")\n",
"\n",
"# Chat\n",
- "chat = ChatVertexAI(model=\"gemini-2.0-flash\")\n",
+ "chat = ChatGoogleGenerativeAI(\n",
+ " model=\"gemini-3-flash-preview\",\n",
+ " project=PROJECT_ID,\n",
+ " location=LOCATION,\n",
+ " vertexai=True,\n",
+ ")\n",
"\n",
"# Embedding\n",
- "embeddings = VertexAIEmbeddings(model_name=\"text-embedding-005\")"
+ "embeddings = GoogleGenerativeAIEmbeddings(\n",
+ " model=\"gemini-embedding-001\",\n",
+ " project=PROJECT_ID,\n",
+ " location=LOCATION,\n",
+ " vertexai=True,\n",
+ ")"
]
},
{
@@ -490,7 +501,7 @@
"source": [
"### Documents\n",
"\n",
- "Document in LangChain refers to an unstructured text consisting of `page_content` referring to the content of the data and `metadata` (data describing attributes of page content).\n"
+ "`Document` in LangChain refers to an unstructured text consisting of `page_content` referring to the content of the data and `metadata` (data describing attributes of page content).\n"
]
},
{
diff --git a/gemini/orchestration/intro_langgraph_gemini.ipynb b/gemini/orchestration/intro_langgraph_gemini.ipynb
index 9bd797abc71..01f3b40a07d 100644
--- a/gemini/orchestration/intro_langgraph_gemini.ipynb
+++ b/gemini/orchestration/intro_langgraph_gemini.ipynb
@@ -240,6 +240,8 @@
"import logging\n",
"from typing import Annotated, TypedDict\n",
"\n",
+ "import requests\n",
+ "\n",
"# IPython display utilities\n",
"from IPython.display import Image, Markdown, display\n",
"\n",
@@ -254,8 +256,7 @@
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.graph import END, START, StateGraph\n",
"from langgraph.graph.message import add_messages\n",
- "from langgraph.prebuilt import ToolNode\n",
- "import requests"
+ "from langgraph.prebuilt import ToolNode"
]
},
{
@@ -316,7 +317,7 @@
},
"outputs": [],
"source": [
- "model = ChatVertexAI(model=\"gemini-2.0-flash\", temperature=0)"
+ "model = ChatVertexAI(model=\"gemini-3-flash-preview\", temperature=0)"
]
},
{
@@ -403,7 +404,7 @@
"\n",
"\n",
"# Determine if additional tool calls are needed\n",
- "def should_continue(state: AgentState):\n",
+ "def should_continue(state: AgentState) -> str:\n",
" messages = state[\"messages\"]\n",
" last_message = messages[-1]\n",
" if last_message.tool_calls:\n",
diff --git a/gemini/orchestration/langgraph_gemini_podcast.ipynb b/gemini/orchestration/langgraph_gemini_podcast.ipynb
index 907418233be..1fdd3fec22b 100644
--- a/gemini/orchestration/langgraph_gemini_podcast.ipynb
+++ b/gemini/orchestration/langgraph_gemini_podcast.ipynb
@@ -318,10 +318,11 @@
"\n",
"# LangChain integrations for Gemini API in Google AI Studio and Vertex AI\n",
"from langchain_google_vertexai import ChatVertexAI\n",
+ "from pydub import AudioSegment\n",
+ "\n",
"from langgraph.checkpoint.memory import MemorySaver\n",
"from langgraph.graph import END, StateGraph\n",
"from langgraph.prebuilt import ToolNode\n",
- "from pydub import AudioSegment\n",
"\n",
"# Set logging level to ERROR to filter warnings\n",
"logger = logging.getLogger()\n",
@@ -389,7 +390,7 @@
},
"outputs": [],
"source": [
- "model = ChatVertexAI(model=\"gemini-2.0-flash\", temperature=0)"
+ "model = ChatVertexAI(model=\"gemini-3-flash-preview\", temperature=0)"
]
},
{
@@ -423,7 +424,7 @@
"source": [
"@tool\n",
"def search_arxiv(query: str) -> list[Document]:\n",
- " \"\"\"Search for relevant publications on arXiv\"\"\"\n",
+ " \"\"\"Search for relevant publications on arXiv.\"\"\"\n",
" retriever = ArxivRetriever(\n",
" load_max_docs=2,\n",
" get_full_documents=True,\n",
@@ -431,30 +432,27 @@
" docs = retriever.invoke(query)\n",
" if docs:\n",
" return docs\n",
- " else:\n",
- " return [\"No results found on arXiv\"]\n",
+ " return [\"No results found on arXiv\"]\n",
"\n",
"\n",
"@tool\n",
"def search_pubmed(query: str) -> list[Document]:\n",
- " \"\"\"Search for information on PubMed\"\"\"\n",
+ " \"\"\"Search for information on PubMed.\"\"\"\n",
" retriever = PubMedRetriever()\n",
" docs = retriever.invoke(query)\n",
" if docs:\n",
" return docs\n",
- " else:\n",
- " return [\"No results found on PubMed\"]\n",
+ " return [\"No results found on PubMed\"]\n",
"\n",
"\n",
"@tool\n",
"def search_wikipedia(query: str) -> list[Document]:\n",
- " \"\"\"Search for information on Wikipedia\"\"\"\n",
+ " \"\"\"Search for information on Wikipedia.\"\"\"\n",
" retriever = WikipediaRetriever()\n",
" docs = retriever.invoke(query)\n",
" if docs:\n",
" return docs\n",
- " else:\n",
- " return [\"No results found on Wikipedia\"]"
+ " return [\"No results found on Wikipedia\"]"
]
},
{
@@ -623,11 +621,10 @@
"\n",
"\n",
"# Determine whether to continue research based on the number of searches performed\n",
- "def should_continue_tools(state: AgentState):\n",
+ "def should_continue_tools(state: AgentState) -> str:\n",
" if state[\"search_count\"] > state[\"max_searches\"]:\n",
" return \"generate_script\"\n",
- " else:\n",
- " return \"research_plan\""
+ " return \"research_plan\""
]
},
{
@@ -938,8 +935,7 @@
" ) # Remove non-ASCII characters\n",
" agent_result = re.sub(r\"\\\\\\\\n\", \"\\n\", agent_result) # Replace escaped newlines\n",
" agent_result = re.sub(r\"\\\\n\", \"\", agent_result) # Replace newlines\n",
- " agent_result = re.sub(r\"\\\\'\", \"'\", agent_result) # Replace escaped single quotes\n",
- " return agent_result\n",
+ " return re.sub(r\"\\\\'\", \"'\", agent_result) # Replace escaped single quotes\n",
"\n",
"\n",
"# Thread ID for unique history in workflow execution\n",
@@ -1052,7 +1048,7 @@
" )\n",
"\n",
" # Save the generated audio to an MP3 file\n",
- " filename = f\"part-{str(count)}.mp3\"\n",
+ " filename = f\"part-{count!s}.mp3\"\n",
" audio_files.append(filename)\n",
" with open(filename, \"wb\") as out:\n",
" out.write(response.audio_content)\n",
diff --git a/gemini/prompts/intro_prompt_design.ipynb b/gemini/prompts/intro_prompt_design.ipynb
index 37f25b9d471..f8d70ae9b52 100644
--- a/gemini/prompts/intro_prompt_design.ipynb
+++ b/gemini/prompts/intro_prompt_design.ipynb
@@ -251,7 +251,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-2.5-flash\" # @param {type: \"string\"}"
]
},
{
@@ -848,7 +848,7 @@
"Gemini 3 Pro is designed for high efficiency and action. It defaults to concise, direct answers and attempts to solve user intent immediately. Because the model prioritizes being helpful, it may occasionally guess when information is missing or prioritize a satisfying answer over strict instructions. Users can steer the model to curb this behavior with prompting.\n",
"\n",
"**Prompt Optimization:**\n",
- "We’ve detailed strategies that can help you control the model's output, ensure factual accuracy, and strictly enforce constraints. For any other quality regressions, please try prompting Gemini 3 Pro with the regression you’re seeing like so:\n",
+ "We've detailed strategies that can help you control the model's output, ensure factual accuracy, and strictly enforce constraints. For any other quality regressions, please try prompting Gemini 3 Pro with the regression you're seeing like so:\n",
"\n",
"```md\n",
"You are an expert prompt engineer. Rewrite the **Original Prompt** below to prevent the specific failure described in the **Bad Response** and **Feedback**. The new prompt should address the constraints and logic gaps that caused the error.\n",
@@ -901,7 +901,7 @@
"source": [
"### I. Distinguish between deduction and external information\n",
"\n",
- "When you instruct the model with “do not infer\" or \"do not guess,\" it may interpret this too strictly and refuse to perform basic logic (like arithmetic) or synthesize information found in different parts of a document.\n",
+ "When you instruct the model with \"do not infer\" or \"do not guess,\" it may interpret this too strictly and refuse to perform basic logic (like arithmetic) or synthesize information found in different parts of a document.\n",
"\n",
"Instead of a blanket negative constraint, explicitly tell the model to use the provided text for deductions while banning outside knowledge."
]
@@ -998,7 +998,7 @@
"\n",
"The model treats the persona it is assigned seriously and will, sometimes, ignore instructions in order to maintain adherence to the described persona.\n",
"\n",
- "Review the persona that’s assigned to the model and avoid ambiguous situations."
+ "Review the persona that's assigned to the model and avoid ambiguous situations."
]
},
{
diff --git a/gemini/rag-engine/intro_rag_engine.ipynb b/gemini/rag-engine/intro_rag_engine.ipynb
index 02ae26a37cc..84094228821 100644
--- a/gemini/rag-engine/intro_rag_engine.ipynb
+++ b/gemini/rag-engine/intro_rag_engine.ipynb
@@ -230,7 +230,7 @@
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
"# See https://cloud.google.com/vertex-ai/generative-ai/docs/rag-engine/rag-overview#supported-regions for location options.\n",
- "vertexai.init(project=PROJECT_ID, location=\"us-east4\")\n",
+ "vertexai.init(project=PROJECT_ID, location=\"us-east1\")\n",
"client = genai.Client(vertexai=True, project=PROJECT_ID, location=\"global\")"
]
},
@@ -529,7 +529,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\""
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
diff --git a/gemini/rag-engine/rag_engine_feature_store.ipynb b/gemini/rag-engine/rag_engine_feature_store.ipynb
index dc3163c7274..18a01471c39 100644
--- a/gemini/rag-engine/rag_engine_feature_store.ipynb
+++ b/gemini/rag-engine/rag_engine_feature_store.ipynb
@@ -90,9 +90,9 @@
"id": "84f0f73a0f76"
},
"source": [
- "| | |\n",
- "|-|-|\n",
- "| Author(s) | [Holt Skinner](https://github.com/holtskinner) |"
+ "| Author |\n",
+ "| --- |\n",
+ "| [Holt Skinner](https://github.com/holtskinner) |"
]
},
{
@@ -140,7 +140,7 @@
},
"outputs": [],
"source": [
- "%pip install --upgrade --user --quiet google-cloud-aiplatform google-cloud-bigquery"
+ "%pip install --upgrade --user --quiet google-cloud-aiplatform google-genai google-cloud-bigquery"
]
},
{
@@ -233,14 +233,17 @@
"import os\n",
"\n",
"import vertexai\n",
+ "from google import genai\n",
"\n",
- "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\", isTemplate: true}\n",
- "if PROJECT_ID == \"[your-project-id]\":\n",
+ "# fmt: off\n",
+ "PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
+ "if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
- "\n",
- "vertexai.init(project=PROJECT_ID, location=LOCATION)"
+ "# See https://cloud.google.com/vertex-ai/generative-ai/docs/rag-engine/rag-overview#supported-regions for location options.\n",
+ "vertexai.init(project=PROJECT_ID, location=\"us-east1\")\n",
+ "client = genai.Client(vertexai=True, project=PROJECT_ID, location=\"global\")"
]
},
{
@@ -260,9 +263,10 @@
},
"outputs": [],
"source": [
+ "from IPython.display import Markdown, display\n",
"from google.cloud import bigquery\n",
+ "from google.genai.types import GenerateContentConfig, Retrieval, Tool, VertexRagStore\n",
"from vertexai.preview import rag\n",
- "from vertexai.preview.generative_models import GenerativeModel, Tool\n",
"from vertexai.resources.preview import feature_store"
]
},
@@ -314,7 +318,7 @@
},
"outputs": [],
"source": [
- "client = bigquery.Client(project=PROJECT_ID)\n",
+ "bq_client = bigquery.Client(project=PROJECT_ID)\n",
"\n",
"# Define dataset and table name\n",
"dataset_id = \"input_us_central1\" # @param {type:\"string\"}\n",
@@ -333,16 +337,16 @@
"dataset_ref = bigquery.DatasetReference(PROJECT_ID, dataset_id)\n",
"\n",
"try:\n",
- " dataset = client.get_dataset(dataset_ref)\n",
+ " dataset = bq_client.get_dataset(dataset_ref)\n",
" print(f\"Dataset {dataset_id} already exists.\")\n",
"except Exception:\n",
" dataset = bigquery.Dataset(dataset_ref)\n",
" dataset.location = \"US\" # Set the location (optional, adjust if needed)\n",
- " dataset = client.create_dataset(dataset)\n",
+ " dataset = bq_client.create_dataset(dataset)\n",
" print(f\"Created dataset {dataset.dataset_id}\")\n",
"\n",
"table_ref = dataset_ref.table(table_id)\n",
- "table = client.create_table(bigquery.Table(table_ref, schema=schema))\n",
+ "table = bq_client.create_table(bigquery.Table(table_ref, schema=schema))\n",
"print(f\"Created table {PROJECT_ID}.{dataset_id}.{table_id}\")"
]
},
@@ -354,7 +358,7 @@
},
"outputs": [],
"source": [
- "BIGQUERY_TABLE = f'bq://{table.full_table_id.replace(\":\", \".\")}'"
+ "BIGQUERY_TABLE = f\"bq://{table.full_table_id.replace(':', '.')}\""
]
},
{
@@ -507,9 +511,11 @@
},
"outputs": [],
"source": [
+ "# fmt: off\n",
"GCS_BUCKET = \"cloud-samples-data/gen-app-builder/search/cymbal-bank-employee\" # @param {type:\"string\"}\n",
+ "# fmt: on\n",
"\n",
- "response = rag.import_files( # noqa: F704\n",
+ "response = rag.import_files(\n",
" corpus_name=rag_corpus.name,\n",
" paths=[GCS_BUCKET],\n",
" chunk_size=512,\n",
@@ -586,45 +592,48 @@
},
"outputs": [],
"source": [
- "rag_retrieval_tool = Tool.from_retrieval(\n",
- " retrieval=rag.Retrieval(\n",
- " source=rag.VertexRagStore(\n",
- " rag_resources=[\n",
- " rag.RagResource(\n",
- " rag_corpus=rag_corpus.name, # Currently only 1 corpus is allowed.\n",
- " )\n",
- " ],\n",
+ "# Create a tool for the RAG Corpus\n",
+ "rag_retrieval_tool = Tool(\n",
+ " retrieval=Retrieval(\n",
+ " vertex_rag_store=VertexRagStore(\n",
+ " rag_corpora=[rag_corpus.name],\n",
" similarity_top_k=10,\n",
" vector_distance_threshold=0.4,\n",
- " ),\n",
+ " )\n",
" )\n",
- ")\n",
- "\n",
- "rag_model = GenerativeModel(\"gemini-2.0-flash\", tools=[rag_retrieval_tool])"
+ ")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
- "id": "cc0ee39e50f6"
+ "id": "bff529ef3710"
},
"outputs": [],
"source": [
- "GENERATE_CONTENT_PROMPT = \"What is RAG and why it is helpful?\" # @param {type:\"string\"}\n",
- "\n",
- "response = rag_model.generate_content(GENERATE_CONTENT_PROMPT)"
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
- "id": "8b9b84c58cb0"
+ "id": "cc0ee39e50f6"
},
"outputs": [],
"source": [
- "response.text"
+ "# fmt: off\n",
+ "GENERATE_CONTENT_PROMPT = \"What is RAG and why it is helpful?\" # @param {type:\"string\"}\n",
+ "# fmt: on\n",
+ "\n",
+ "response = client.models.generate_content(\n",
+ " model=MODEL_ID,\n",
+ " contents=GENERATE_CONTENT_PROMPT,\n",
+ " config=GenerateContentConfig(tools=[rag_retrieval_tool]),\n",
+ ")\n",
+ "\n",
+ "display(Markdown(response.text))"
]
},
{
diff --git a/gemini/rag-engine/rag_engine_pinecone.ipynb b/gemini/rag-engine/rag_engine_pinecone.ipynb
index b29e241dd0b..eb941a35dda 100644
--- a/gemini/rag-engine/rag_engine_pinecone.ipynb
+++ b/gemini/rag-engine/rag_engine_pinecone.ipynb
@@ -829,7 +829,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\"\n",
+ "MODEL_ID = \"gemini-2.5-flash\"\n",
"\n",
"rag_retrieval_tool = Tool(\n",
" retrieval=Retrieval(\n",
diff --git a/gemini/rag-engine/rag_engine_vector_search.ipynb b/gemini/rag-engine/rag_engine_vector_search.ipynb
index 92d4cfbf104..045ccadc0f6 100644
--- a/gemini/rag-engine/rag_engine_vector_search.ipynb
+++ b/gemini/rag-engine/rag_engine_vector_search.ipynb
@@ -85,9 +85,9 @@
"id": "84f0f73a0f76"
},
"source": [
- "| | |\n",
- "|-|-|\n",
- "| Author(s) | [Holt Skinner](https://github.com/holtskinner) |"
+ "| Author |\n",
+ "| --- |\n",
+ "| [Holt Skinner](https://github.com/holtskinner) |"
]
},
{
@@ -129,27 +129,7 @@
"metadata": {
"id": "tFy3H3aPgx12"
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[33mWARNING: Skipping /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_aiplatform-1.50.0.dist-info due to invalid metadata entry 'name'\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mWARNING: Skipping /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_storage-2.16.0.dist-info due to invalid metadata entry 'name'\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mDEPRECATION: Loading egg at /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/fsspec-2024.3.1-py3.11.egg is deprecated. pip 24.3 will enforce this behaviour change. A possible replacement is to use pip for package installation.. Discussion can be found at https://github.com/pypa/pip/issues/12330\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mDEPRECATION: Loading egg at /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_documentai_toolbox-0.12.2a0-py3.11.egg is deprecated. pip 24.3 will enforce this behaviour change. A possible replacement is to use pip for package installation.. Discussion can be found at https://github.com/pypa/pip/issues/12330\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mDEPRECATION: Loading egg at /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_documentai_toolbox-0.11.1a0-py3.11.egg is deprecated. pip 24.3 will enforce this behaviour change. A possible replacement is to use pip for package installation.. Discussion can be found at https://github.com/pypa/pip/issues/12330\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mWARNING: Skipping /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_aiplatform-1.50.0.dist-info due to invalid metadata entry 'name'\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mWARNING: Skipping /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_storage-2.16.0.dist-info due to invalid metadata entry 'name'\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mWARNING: Skipping /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_aiplatform-1.50.0.dist-info due to invalid metadata entry 'name'\u001b[0m\u001b[33m\n",
- "\u001b[0m\u001b[33mWARNING: Skipping /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/google_cloud_aiplatform-1.50.0.dist-info due to invalid metadata entry 'name'\u001b[0m\u001b[33m\n",
- "\u001b[0m\n",
- "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m25.0.1\u001b[0m\n",
- "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip3.11 install --upgrade pip\u001b[0m\n",
- "Note: you may need to restart the kernel to use updated packages.\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"%pip install --upgrade --quiet google-cloud-aiplatform google-genai"
]
@@ -246,7 +226,9 @@
"from google import genai\n",
"from google.cloud import aiplatform\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -499,7 +481,7 @@
"source": [
"GCS_BUCKET = \"\" # @param {type:\"string\", \"placeholder\": \"your-gs-bucket\"}\n",
"\n",
- "response = rag.import_files( # noqa: F704\n",
+ "response = rag.import_files(\n",
" corpus_name=rag_corpus.name,\n",
" paths=[GCS_BUCKET],\n",
" transformation_config=rag.TransformationConfig(\n",
@@ -593,7 +575,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\"\n",
+ "MODEL_ID = \"gemini-2.5-flash\"\n",
"\n",
"rag_retrieval_tool = Tool(\n",
" retrieval=Retrieval(\n",
@@ -618,7 +600,9 @@
},
"outputs": [],
"source": [
+ "# fmt: off\n",
"GENERATE_CONTENT_PROMPT = \"What is RAG and why it is helpful?\" # @param {type:\"string\"}\n",
+ "# fmt: on\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
diff --git a/gemini/use-cases/applying-llms-to-data/bigquery_dataframes_ml_drug_name_generation.ipynb b/gemini/use-cases/applying-llms-to-data/bigquery_dataframes_ml_drug_name_generation.ipynb
index e629877c35b..2fdffa63a59 100644
--- a/gemini/use-cases/applying-llms-to-data/bigquery_dataframes_ml_drug_name_generation.ipynb
+++ b/gemini/use-cases/applying-llms-to-data/bigquery_dataframes_ml_drug_name_generation.ipynb
@@ -90,9 +90,10 @@
"id": "a3a54161ab79"
},
"source": [
- "| | |\n",
- "|-|-|\n",
- "|Author(s) | [Ashley Xu](https://github.com/ashleyxuu) |"
+ "| Authors |\n",
+ "| --- |\n",
+ "| [Ashley Xu](https://github.com/ashleyxuu) |\n",
+ "| [Holt Skinner](https://github.com/holtskinner) |"
]
},
{
@@ -135,7 +136,7 @@
"1. Use `bigframes` to query the FDA dataset of over 100,000 drugs, filtered on the brand name, generic name, and indications & usage columns.\n",
"1. Filter this dataset to find prototypical brand names that can be used as examples in prompt tuning.\n",
"1. Create a prompt with the user input, general instructions, examples and counter-examples for the desired brand name.\n",
- "1. Use [`bigframes.ml.llm.GeminiTextGenerator`](https://cloud.google.com/python/docs/reference/bigframes/latest/bigframes.ml.llm.GeminiTextGenerator) to generate choices of brand names."
+ "1. Use [`bigframes.ml.llm.GeminiTextGenerator`](https://dataframes.bigquery.dev/reference/api/bigframes.ml.llm.GeminiTextGenerator.html) to generate choices of brand names."
]
},
{
@@ -190,43 +191,6 @@
"%pip install --upgrade --quiet bigframes google-cloud-bigquery-connection"
]
},
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "58707a750154"
- },
- "source": [
- "### Restart current runtime\n",
- "\n",
- "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which will restart the current kernel."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "f200f10a1da3"
- },
- "outputs": [],
- "source": [
- "# Automatically restart kernel after installs so that your environment can access the new packages\n",
- "import IPython\n",
- "\n",
- "app = IPython.Application.instance()\n",
- "app.kernel.do_shutdown(True)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "ZleVuNh9rtqj"
- },
- "source": [
- "\n",
- "⚠️ The kernel is going to restart. Please wait until it is finished before continuing to the next step. ⚠️\n",
- " "
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -236,20 +200,6 @@
"### Import libraries"
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "PyQmSRbKA8r-"
- },
- "outputs": [],
- "source": [
- "from IPython.display import Markdown\n",
- "from bigframes.ml.llm import GeminiTextGenerator\n",
- "import bigframes.pandas as bpd\n",
- "from google.cloud import bigquery_connection_v1 as bq_connection"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -310,6 +260,11 @@
"source": [
"import sys\n",
"\n",
+ "import bigframes.pandas as bpd\n",
+ "from IPython.display import Markdown\n",
+ "from bigframes.ml.llm import GeminiTextGenerator\n",
+ "from google.cloud import bigquery_connection_v1 as bq_connection\n",
+ "\n",
"if \"google.colab\" in sys.modules:\n",
" from google.colab import auth\n",
"\n",
@@ -362,7 +317,9 @@
"# Use the environment variable if the user doesn't provide Project ID.\n",
"import os\n",
"\n",
- "PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: off\n",
+ "PROJECT_ID = \"document-ai-test-337818\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -376,9 +333,9 @@
"id": "t5w0GGdYsDjS"
},
"source": [
- "### Load the Gemini 2.0 Flash model\n",
+ "### Load the Gemini model\n",
"\n",
- "To learn more about all [Gemini models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models)."
+ "Learn more about all [Gemini models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models)."
]
},
{
@@ -389,7 +346,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\""
+ "MODEL_ID = \"gemini-2.5-flash\""
]
},
{
@@ -510,9 +467,10 @@
"outputs": [],
"source": [
"GENERIC_NAME = \"Entropofloxacin\" # @param {type:\"string\"}\n",
+ "# fmt: off\n",
"USAGE = \"Entropofloxacin is a fluoroquinolone antibiotic that is used to treat a variety of bacterial infections, including: pneumonia, streptococcus infections, salmonella infections, escherichia coli infections, and pseudomonas aeruginosa infections It is taken by mouth or by injection. The dosage and frequency of administration will vary depending on the type of infection being treated. It should be taken for the full course of treatment, even if symptoms improve after a few days. Stopping the medication early may increase the risk of the infection coming back.\" # @param {type: \"string\"}\n",
- "NUM_NAMES = 10 # @param {type: \"integer\"}\n",
- "TEMPERATURE = 1.0 # @param {type: \"number\"}"
+ "# fmt: on\n",
+ "NUM_NAMES = 10 # @param {type: \"integer\"}"
]
},
{
@@ -560,9 +518,7 @@
},
"outputs": [],
"source": [
- "def predict(\n",
- " model: GeminiTextGenerator, prompt: str, temperature: float = TEMPERATURE\n",
- ") -> str:\n",
+ "def predict(model: GeminiTextGenerator, prompt: str) -> str:\n",
" # Create DataFrame\n",
" input = bpd.DataFrame(\n",
" {\n",
@@ -571,9 +527,7 @@
" )\n",
"\n",
" # Return response\n",
- " return model.predict(\n",
- " input, temperature=temperature\n",
- " ).ml_generate_text_llm_result.iloc[0]"
+ " return model.predict(input).ml_generate_text_llm_result.iloc[0]"
]
},
{
@@ -787,6 +741,7 @@
" df_examples[\"openfda_brand_name\"],\n",
" df_examples[\"openfda_generic_name\"],\n",
" df_examples[\"indications_and_usage\"],\n",
+ " strict=False,\n",
" )\n",
")\n",
"example_prompt"
@@ -928,7 +883,7 @@
"source": [
"df_missing[\"prompt\"] = (\n",
" \"Provide a unique and modern brand name related to this pharmaceutical drug.\"\n",
- " + \"Don't use English words directly; use variants or invented words. The generic name is: \"\n",
+ " \"Don't use English words directly; use variants or invented words. The generic name is: \"\n",
" + df_missing[\"openfda_generic_name\"]\n",
" + \". The indications and usage are: \"\n",
" + df_missing[\"indications_and_usage\"]\n",
@@ -953,10 +908,8 @@
},
"outputs": [],
"source": [
- "def batch_predict(\n",
- " model: GeminiTextGenerator, input: bpd.DataFrame, temperature: float = TEMPERATURE\n",
- ") -> bpd.DataFrame:\n",
- " return model.predict(input, temperature=temperature).ml_generate_text_llm_result\n",
+ "def batch_predict(model: GeminiTextGenerator, input: bpd.DataFrame) -> bpd.DataFrame:\n",
+ " return model.predict(input).ml_generate_text_llm_result\n",
"\n",
"\n",
"response = batch_predict(model, df_missing[\"prompt\"])"
diff --git a/gemini/use-cases/code/analyze_codebase.ipynb b/gemini/use-cases/code/analyze_codebase.ipynb
index b8956084f8c..f4ba4da7131 100644
--- a/gemini/use-cases/code/analyze_codebase.ipynb
+++ b/gemini/use-cases/code/analyze_codebase.ipynb
@@ -107,7 +107,7 @@
"\n",
"With its long-context reasoning, Gemini can analyze an entire codebase for deeper insights.\n",
"\n",
- "In this tutorial, you learn how to analyze an entire codebase with Gemini 2.0 and prompt the model to:\n",
+ "In this tutorial, you learn how to analyze an entire codebase with Gemini 3 and prompt the model to:\n",
"\n",
"- **Analyze**: Summarize codebases effortlessly.\n",
"- **Guide**: Generate clear developer getting-started documentation.\n",
@@ -237,7 +237,9 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -268,16 +270,16 @@
"import os\n",
"import shutil\n",
"\n",
+ "import git\n",
"from IPython.core.interactiveshell import InteractiveShell\n",
"from IPython.display import Markdown, display\n",
- "import git\n",
"from github import Github\n",
"from gitingest import ingest\n",
"\n",
"InteractiveShell.ast_node_interactivity = \"all\"\n",
"\n",
- "from google.genai.types import CreateCachedContentConfig, GenerateContentConfig\n",
"import nest_asyncio\n",
+ "from google.genai.types import CreateCachedContentConfig, GenerateContentConfig\n",
"\n",
"nest_asyncio.apply()"
]
@@ -302,7 +304,9 @@
"outputs": [],
"source": [
"# The GitHub repository URL\n",
+ "# fmt: off\n",
"repo_url = \"https://github.com/GoogleCloudPlatform/microservices-demo\" # @param {type:\"string\"}\n",
+ "# fmt: on\n",
"\n",
"# The location to clone the repo\n",
"repo_dir = \"./repo\""
@@ -334,8 +338,7 @@
"\n",
"\n",
"def get_github_issue(owner: str, repo: str, issue_number: int) -> str | None:\n",
- " \"\"\"\n",
- " Fetch the contents of a GitHub issue.\n",
+ " \"\"\"Fetch the contents of a GitHub issue.\n",
"\n",
" Args:\n",
" owner (str): The owner of the repository.\n",
@@ -427,7 +430,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type:\"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type:\"string\"}"
]
},
{
@@ -440,7 +443,7 @@
"\n",
"We will create a [context cache](https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview) of the codebase so we don't have to send the entire context with every request, saving processing time and cost.\n",
"\n",
- "**Note**: Context caching is only available for stable models with fixed versions (for example, `gemini-2.0-flash-001`). You must include the version postfix (for example, the `-001`).\n",
+ "**Note**: Context caching is only available for stable models with fixed versions (for example, `gemini-3-flash-preview`). You must include the version postfix (for example, the `-001`).\n",
"\n",
"For more information, see [Available Gemini stable model versions](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/model-versioning#stable-versions-available)."
]
diff --git a/gemini/use-cases/document-processing/document_processing.ipynb b/gemini/use-cases/document-processing/document_processing.ipynb
index 5fbeb5e6577..9068e8740f0 100644
--- a/gemini/use-cases/document-processing/document_processing.ipynb
+++ b/gemini/use-cases/document-processing/document_processing.ipynb
@@ -130,7 +130,7 @@
"You will complete the following tasks:\n",
"\n",
"- Install the SDK\n",
- "- Use the Gemini 2.0 Flash model to:\n",
+ "- Use the Gemini 3 Flash model to:\n",
" - Extract structured entities from an unstructured document\n",
" - Classify document types\n",
" - Combine classification and entity extraction into a single workflow\n",
@@ -183,7 +183,7 @@
},
"outputs": [],
"source": [
- "%pip install --upgrade --quiet google-genai pypdf"
+ "%pip install --upgrade --quiet google-genai pypdf pydantic"
]
},
{
@@ -240,7 +240,9 @@
"\n",
"from google import genai\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -266,14 +268,14 @@
},
"outputs": [],
"source": [
+ "import json\n",
"from datetime import date\n",
"from enum import Enum\n",
- "import json\n",
"\n",
- "from IPython.display import Markdown, display\n",
+ "import pypdf\n",
+ "from IPython.display import HTML, Markdown, display\n",
"from google.genai.types import GenerateContentConfig, Part\n",
"from pydantic import BaseModel, Field\n",
- "import pypdf\n",
"\n",
"PDF_MIME_TYPE = \"application/pdf\"\n",
"JSON_MIME_TYPE = \"application/json\"\n",
@@ -286,9 +288,9 @@
"id": "FTMywdzUORIA"
},
"source": [
- "### Load the Gemini 2.0 Flash model\n",
+ "### Load the Gemini model\n",
"\n",
- "Gemini 2.0 Flash (`gemini-2.0-flash`) is a multimodal model that supports multimodal prompts. You can include text, image(s), and video in your prompt requests and get text or code responses.\n",
+ "Gemini is a multimodal model that supports multimodal prompts. You can include text, image(s), and video in your prompt requests and get text or code responses.\n",
"\n",
"Learn more about all [Gemini models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models)."
]
@@ -301,7 +303,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -348,7 +350,7 @@
"id": "802016a08f79"
},
"source": [
- "We will use [Controlled generation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output) to tell the model which fields need to be extracted.\n",
+ "We will use [Structured output](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output) to tell the model which fields need to be extracted.\n",
"\n",
"The response schema is specified in the `response_schema` parameter in `config`, and the model output will strictly follow that schema.\n",
"\n",
@@ -364,51 +366,73 @@
"outputs": [],
"source": [
"class Address(BaseModel):\n",
- " street: str | None = Field(None, example=\"123 Main St\")\n",
- " city: str | None = Field(None, example=\"Springfield\")\n",
- " state: str | None = Field(None, example=\"IL\")\n",
- " postal_code: str | None = Field(None, example=\"62704\")\n",
- " country: str | None = Field(None, example=\"USA\")\n",
+ " \"\"\"Represents the geographic location details of a person or organization.\"\"\"\n",
+ "\n",
+ " street: str | None = Field(\n",
+ " None, description=\"The street address, e.g., 123 Main St\"\n",
+ " )\n",
+ " city: str | None = Field(None, description=\"The city, e.g., Springfield\")\n",
+ " state: str | None = Field(None, description=\"The state or province code, e.g., IL\")\n",
+ " postal_code: str | None = Field(\n",
+ " None, description=\"The postal or ZIP code, e.g., 62704\"\n",
+ " )\n",
+ " country: str | None = Field(None, description=\"The country, e.g., USA\")\n",
"\n",
"\n",
"class LineItem(BaseModel):\n",
- " amount: float = Field(..., example=100.00)\n",
- " description: str | None = Field(None, example=\"Laptop\")\n",
- " product_code: str | None = Field(None, example=\"LPT-001\")\n",
- " quantity: int = Field(..., example=2)\n",
- " unit: str | None = Field(None, example=\"pcs\")\n",
- " unit_price: float = Field(..., example=50.00)\n",
+ " \"\"\"Represents an individual product or service entry listed in a document.\"\"\"\n",
+ "\n",
+ " amount: float = Field(..., description=\"The total amount for this line item\")\n",
+ " description: str | None = Field(\n",
+ " None, description=\"Description of the product/service, e.g., Laptop\"\n",
+ " )\n",
+ " product_code: str | None = Field(\n",
+ " None, description=\"The SKU or product code, e.g., LPT-001\"\n",
+ " )\n",
+ " quantity: int = Field(..., description=\"The number of units\")\n",
+ " unit: str | None = Field(None, description=\"Unit of measure, e.g., pcs\")\n",
+ " unit_price: float = Field(..., description=\"Price per single unit\")\n",
"\n",
"\n",
"class VAT(BaseModel):\n",
- " amount: float = Field(..., example=20.00)\n",
- " category_code: str | None = Field(None, example=\"A\")\n",
- " tax_amount: float | None = Field(None, example=5.00)\n",
+ " \"\"\"Represents Value Added Tax details, including rates and calculated amounts.\"\"\"\n",
+ "\n",
+ " amount: float = Field(..., description=\"Taxable amount\")\n",
+ " category_code: str | None = Field(None, description=\"Tax category code, e.g., A\")\n",
+ " tax_amount: float | None = Field(None, description=\"The calculated tax value\")\n",
" tax_rate: float | None = Field(\n",
- " None, example=10.0\n",
- " ) # Percentage as a float (e.g., 10 for 10%)\n",
- " total_amount: float = Field(..., example=200.00)\n",
+ " None, description=\"Percentage as a float, e.g., 10 for 10%\"\n",
+ " )\n",
+ " total_amount: float = Field(..., description=\"Total amount including tax\")\n",
"\n",
"\n",
"class Party(BaseModel):\n",
- " name: str = Field(..., example=\"Google\")\n",
- " street: str | None = Field(None, example=\"456 Business Rd\")\n",
- " city: str | None = Field(None, example=\"Metropolis\")\n",
- " state: str | None = Field(None, example=\"NY\")\n",
- " postal_code: str | None = Field(None, example=\"10001\")\n",
- " country: str | None = Field(None, example=\"USA\")\n",
- " email: str | None = Field(None, example=\"contact@google.com\")\n",
- " phone: str | None = Field(None, example=\"+1-555-1234\")\n",
- " website: str | None = Field(None, example=\"https://google.com\")\n",
- " tax_id: str | None = Field(None, example=\"123456789\")\n",
- " registration: str | None = Field(None, example=\"Reg-98765\")\n",
- " iban: str | None = Field(None, example=\"US1234567890123456789\")\n",
- " payment_ref: str | None = Field(None, example=\"INV-2024-001\")\n",
+ " \"\"\"Represents the contact and identification details of a transacting entity.\"\"\"\n",
+ "\n",
+ " name: str = Field(..., description=\"Name of the entity, e.g., Google\")\n",
+ " street: str | None = Field(None, description=\"Street address\")\n",
+ " city: str | None = Field(None, description=\"City name\")\n",
+ " state: str | None = Field(None, description=\"State or province\")\n",
+ " postal_code: str | None = Field(None, description=\"Postal code\")\n",
+ " country: str | None = Field(None, description=\"Country name\")\n",
+ " email: str | None = Field(None, description=\"Contact email address\")\n",
+ " phone: str | None = Field(None, description=\"Contact phone number\")\n",
+ " website: str | None = Field(None, description=\"Entity website URL\")\n",
+ " tax_id: str | None = Field(None, description=\"Tax identification number\")\n",
+ " registration: str | None = Field(None, description=\"Business registration number\")\n",
+ " iban: str | None = Field(None, description=\"International Bank Account Number\")\n",
+ " payment_ref: str | None = Field(\n",
+ " None, description=\"Payment reference or invoice number\"\n",
+ " )\n",
"\n",
"\n",
"class Invoice(BaseModel):\n",
- " invoice_id: str = Field(..., example=\"INV-2024-001\")\n",
- " invoice_date: str = Field(..., example=\"2024-02-03\")\n",
+ " \"\"\"Represents the comprehensive structured data extracted from an invoice document.\"\"\"\n",
+ "\n",
+ " invoice_id: str = Field(..., description=\"The unique identifier of the invoice\")\n",
+ " invoice_date: str = Field(\n",
+ " ..., description=\"The date the invoice was issued (YYYY-MM-DD)\"\n",
+ " )\n",
" supplier: Party\n",
" receiver: Party\n",
" line_items: list[LineItem]\n",
@@ -459,7 +483,6 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=entity_extraction_system_instruction,\n",
- " temperature=0,\n",
" response_schema=Invoice,\n",
" response_mime_type=JSON_MIME_TYPE,\n",
" ),\n",
@@ -542,6 +565,8 @@
"outputs": [],
"source": [
"class Payslip(BaseModel):\n",
+ " \"\"\"Represents the structured payroll, tax withholding, and employment details extracted from a payslip.\"\"\"\n",
+ "\n",
" employee_id: str = Field(..., description=\"Unique identifier for the employee\")\n",
" employee_name: str = Field(..., description=\"Full name of the employee\")\n",
" pay_period_start: date = Field(..., description=\"Start date of the pay period\")\n",
@@ -585,7 +610,6 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=entity_extraction_system_instruction,\n",
- " temperature=0,\n",
" response_schema=Payslip,\n",
" response_mime_type=JSON_MIME_TYPE,\n",
" ),\n",
@@ -629,6 +653,8 @@
"\n",
"\n",
"class DocumentCategory(Enum):\n",
+ " \"\"\"Specifies the supported classifications for financial, tax, and identification documents.\"\"\"\n",
+ "\n",
" TAX_1040_2019 = \"1040_2019\"\n",
" TAX_1040_2020 = \"1040_2020\"\n",
" TAX_1099_R = \"1099-r\"\n",
@@ -667,7 +693,6 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=classification_prompt,\n",
- " temperature=0,\n",
" response_schema=DocumentCategory,\n",
" response_mime_type=ENUM_MIME_TYPE,\n",
" ),\n",
@@ -724,41 +749,89 @@
"outputs": [],
"source": [
"class W2Form(BaseModel):\n",
- " control_number: str | None = Field(None)\n",
- " ein: str = Field(...)\n",
+ " \"\"\"Represents the wage and tax information extracted from a standard US W-2 form.\"\"\"\n",
"\n",
- " employee_first_name: str = Field(...)\n",
- " employee_last_name: str = Field(...)\n",
- " employee_address_street: str = Field(...)\n",
- " employee_address_city: str = Field(...)\n",
- " employee_address_state: str = Field(...)\n",
- " employee_address_zip: str = Field(...)\n",
+ " control_number: str | None = Field(\n",
+ " None, description=\"The employer's internal control number, if available\"\n",
+ " )\n",
+ " ein: str = Field(..., description=\"Employer Identification Number (EIN)\")\n",
"\n",
- " employer_name: str = Field(...)\n",
- " employer_address_street: str = Field(...)\n",
- " employer_address_city: str = Field(...)\n",
- " employer_address_state: str = Field(...)\n",
- " employer_address_zip: str = Field(...)\n",
- " employer_state_id_number: str | None = Field(None)\n",
+ " employee_first_name: str = Field(\n",
+ " ..., description=\"Legal first name of the employee\"\n",
+ " )\n",
+ " employee_last_name: str = Field(..., description=\"Legal last name of the employee\")\n",
+ " employee_address_street: str = Field(\n",
+ " ..., description=\"Employee's residential street address\"\n",
+ " )\n",
+ " employee_address_city: str = Field(..., description=\"Employee's residential city\")\n",
+ " employee_address_state: str = Field(\n",
+ " ..., description=\"Employee's residential state abbreviation\"\n",
+ " )\n",
+ " employee_address_zip: str = Field(\n",
+ " ..., description=\"Employee's residential postal code\"\n",
+ " )\n",
"\n",
- " wages_tips_other_compensation: float = Field(...)\n",
- " federal_income_tax_withheld: float = Field(...)\n",
- " social_security_wages: float = Field(...)\n",
- " social_security_tax_withheld: float = Field(...)\n",
- " medicare_wages_and_tips: float = Field(...)\n",
- " medicare_tax_withheld: float = Field(...)\n",
+ " employer_name: str = Field(..., description=\"Legal name of the employing entity\")\n",
+ " employer_address_street: str = Field(\n",
+ " ..., description=\"Employer's business street address\"\n",
+ " )\n",
+ " employer_address_city: str = Field(..., description=\"Employer's business city\")\n",
+ " employer_address_state: str = Field(\n",
+ " ..., description=\"Employer's business state abbreviation\"\n",
+ " )\n",
+ " employer_address_zip: str = Field(\n",
+ " ..., description=\"Employer's business postal code\"\n",
+ " )\n",
+ " employer_state_id_number: str | None = Field(\n",
+ " None, description=\"The employer's state-specific identification number\"\n",
+ " )\n",
"\n",
- " state: str | None = Field(None)\n",
- " state_wages_tips_etc: float | None = Field(None)\n",
- " state_income_tax: float | None = Field(None)\n",
+ " wages_tips_other_compensation: float = Field(\n",
+ " ..., description=\"Box 1: Total taxable wages, tips, and other compensation\"\n",
+ " )\n",
+ " federal_income_tax_withheld: float = Field(\n",
+ " ..., description=\"Box 2: Total federal income tax withheld\"\n",
+ " )\n",
+ " social_security_wages: float = Field(\n",
+ " ..., description=\"Box 3: Total wages subject to Social Security tax\"\n",
+ " )\n",
+ " social_security_tax_withheld: float = Field(\n",
+ " ..., description=\"Box 4: Total Social Security tax withheld\"\n",
+ " )\n",
+ " medicare_wages_and_tips: float = Field(\n",
+ " ..., description=\"Box 5: Total wages and tips subject to Medicare tax\"\n",
+ " )\n",
+ " medicare_tax_withheld: float = Field(\n",
+ " ..., description=\"Box 6: Total Medicare tax withheld\"\n",
+ " )\n",
+ "\n",
+ " state: str | None = Field(\n",
+ " None, description=\"Box 15: State abbreviation for state-level reporting\"\n",
+ " )\n",
+ " state_wages_tips_etc: float | None = Field(\n",
+ " None, description=\"Box 16: Total wages subject to state income tax\"\n",
+ " )\n",
+ " state_income_tax: float | None = Field(\n",
+ " None, description=\"Box 17: Total state income tax withheld\"\n",
+ " )\n",
"\n",
- " box_12_code: str | None = Field(None)\n",
- " box_12_value: str | None = Field(None)\n",
+ " box_12_code: str | None = Field(\n",
+ " None,\n",
+ " description=\"Box 12: Alpha code for specific compensation types (e.g., D, J, AA)\",\n",
+ " )\n",
+ " box_12_value: str | None = Field(\n",
+ " None, description=\"Box 12: The dollar amount associated with the alpha code\"\n",
+ " )\n",
"\n",
- " form_year: int = Field(...)\n",
+ " form_year: int = Field(\n",
+ " ...,\n",
+ " description=\"The specific tax year for which the W-2 was issued (e.g., 2023)\",\n",
+ " )\n",
"\n",
"\n",
"class DriversLicense(BaseModel):\n",
+ " \"\"\"Captures personal identity and licensing details extracted from a government-issued driver's license.\"\"\"\n",
+ "\n",
" address: str = Field(\n",
" ..., title=\"Address\", description=\"The address of the individual.\"\n",
" )\n",
@@ -824,7 +897,6 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=classification_prompt,\n",
- " temperature=0,\n",
" response_schema=DocumentCategory,\n",
" response_mime_type=ENUM_MIME_TYPE,\n",
" ),\n",
@@ -836,7 +908,7 @@
" extraction_schema = classification_to_schema.get(classification_response.parsed)\n",
"\n",
" if not extraction_schema:\n",
- " print(f\"Document does not belong to a specified class. Skipping extraction.\")\n",
+ " print(\"Document does not belong to a specified class. Skipping extraction.\")\n",
" continue\n",
"\n",
" # Send to Gemini with Extraction Prompt\n",
@@ -848,7 +920,6 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=classification_prompt,\n",
- " temperature=0,\n",
" response_schema=extraction_schema,\n",
" response_mime_type=JSON_MIME_TYPE,\n",
" ),\n",
@@ -902,12 +973,10 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=qa_system_instruction,\n",
- " temperature=0,\n",
- " response_mime_type=\"text/plain\",\n",
" ),\n",
")\n",
"\n",
- "print(f\"Answer: {response.text}\")"
+ "display(Markdown(response.text))"
]
},
{
@@ -958,12 +1027,10 @@
" ],\n",
" config=GenerateContentConfig(\n",
" system_instruction=summarization_system_instruction,\n",
- " temperature=0,\n",
- " response_mime_type=\"text/plain\",\n",
" ),\n",
")\n",
"\n",
- "display(Markdown(f\"### Document Summary\"))\n",
+ "display(Markdown(\"### Document Summary\"))\n",
"display(Markdown(response.text))"
]
},
@@ -1007,10 +1074,10 @@
" mime_type=PDF_MIME_TYPE,\n",
" ),\n",
" ],\n",
- " config=GenerateContentConfig(temperature=0),\n",
")\n",
"\n",
- "display(Markdown(response.text))"
+ "display(Markdown(response.text))\n",
+ "display(HTML(response.text.removeprefix(\"```html\").removesuffix(\"```\")))"
]
},
{
@@ -1053,12 +1120,9 @@
" mime_type=PDF_MIME_TYPE,\n",
" ),\n",
" ],\n",
- " config=GenerateContentConfig(\n",
- " temperature=0,\n",
- " ),\n",
")\n",
"\n",
- "display(Markdown(f\"### Translations\"))\n",
+ "display(Markdown(\"### Translations\"))\n",
"display(Markdown(response.text))"
]
},
@@ -1108,10 +1172,9 @@
" mime_type=PDF_MIME_TYPE,\n",
" ),\n",
" ],\n",
- " config=GenerateContentConfig(temperature=0),\n",
")\n",
"\n",
- "display(Markdown(f\"### Comparison\"))\n",
+ "display(Markdown(\"### Comparison\"))\n",
"display(Markdown(response.text))"
]
},
@@ -1155,9 +1218,7 @@
"\n",
"\n",
"def pdf_slice(input_file: str, output_file: str, pages: list[int]) -> None:\n",
- " \"\"\"Using an input pdf file name and a list of page numbers,\n",
- " return the file name of a new pdf with only those pages\n",
- " \"\"\"\n",
+ " \"\"\"Extracts specific pages from a PDF and saves them to a new file.\"\"\"\n",
" pdf_reader = pypdf.PdfReader(input_file)\n",
" pdf_writer = pypdf.PdfWriter()\n",
" for page_num in pages:\n",
@@ -1183,8 +1244,10 @@
},
"outputs": [],
"source": [
+ "# fmt: off\n",
"question = \"From the Consolidated Balance Sheet, what was the difference between the total assets from 2022 to 2023?\" # @param {type: \"string\"}\n",
"pdf_path = \"https://storage.googleapis.com/github-repo/generative-ai/gemini/use-cases/document-processing/CymbalBankFinancialStatements.pdf\" # @param {type: \"string\"}\n",
+ "# fmt: on\n",
"local_pdf = os.path.basename(pdf_path)"
]
},
@@ -1214,7 +1277,6 @@
" PROMPT_PAGES.format(question=question),\n",
" ],\n",
" config=GenerateContentConfig(\n",
- " temperature=0,\n",
" response_mime_type=JSON_MIME_TYPE,\n",
" response_schema=list[int],\n",
" ),\n",
@@ -1252,6 +1314,17 @@
"To ensure we find the answer to the question, we will also retrieve the page immediately after the selected page."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "3bc18fd65be1"
+ },
+ "outputs": [],
+ "source": [
+ "pages"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -1260,8 +1333,10 @@
},
"outputs": [],
"source": [
- "expanded_pages = set(pages).union(page + 1 for page in pages)\n",
- "pdf_slice(input_file=local_pdf, output_file=\"sample.pdf\", pages=sorted(expanded_pages))"
+ "# This includes the page and its successor (e.g., [1, 5] -> {1, 2, 5, 6})\n",
+ "expanded_pages = sorted({p for page in pages for p in (page, page + 1)})\n",
+ "\n",
+ "pdf_slice(local_pdf, \"sample1.pdf\", expanded_pages)"
]
}
],
diff --git a/gemini/use-cases/document-processing/patents_understanding.ipynb b/gemini/use-cases/document-processing/patents_understanding.ipynb
index ba3625cfbd8..49dcdd5e4ce 100644
--- a/gemini/use-cases/document-processing/patents_understanding.ipynb
+++ b/gemini/use-cases/document-processing/patents_understanding.ipynb
@@ -90,9 +90,9 @@
"id": "84f0f73a0f76"
},
"source": [
- "| | |\n",
- "|-|-|\n",
- "| Author(s) | [Holt Skinner](https://github.com/holtskinner) |"
+ "| Author |\n",
+ "| --- |\n",
+ "| [Holt Skinner](https://github.com/holtskinner) |"
]
},
{
@@ -114,7 +114,7 @@
"\n",
"In today's world of Generative AI models like [Gemini](https://blog.google/technology/ai/google-gemini-ai/), it's possible to create the same document processing pipeline without training custom models. This significantly simplifies the process and reduces the time & resources required to automate these workflows.\n",
"\n",
- "In this notebook, we'll create a document understanding pipeline on a public dataset of [patents PDFs](https://console.cloud.google.com/marketplace/details/global-patents/labeled-patents) stored in BigQuery and use [Batch Prediction for Gemini 2.0 in Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini) to:\n",
+ "In this notebook, we'll create a document understanding pipeline on a public dataset of [patents PDFs](https://console.cloud.google.com/marketplace/details/global-patents/labeled-patents) stored in BigQuery and use [Batch Prediction for Gemini 3 in Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/batch-prediction-gemini) to:\n",
"\n",
"- Classify the patent granter (US or EU).\n",
"- Classify the invention type (Medical Tech, Computer Vision, Cryptography, Other).\n",
@@ -225,19 +225,6 @@
"### Import libraries\n"
]
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "rrgrbhPmmEWt"
- },
- "outputs": [],
- "source": [
- "from google import genai\n",
- "from google.cloud import bigquery\n",
- "import pandas_gbq"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -261,7 +248,13 @@
"source": [
"import os\n",
"\n",
+ "import pandas_gbq\n",
+ "from google import genai\n",
+ "from google.cloud import bigquery\n",
+ "\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -277,7 +270,7 @@
"id": "e43229f3ad4f"
},
"source": [
- "### Load the Gemini 2.0 Flash model\n",
+ "### Load the Gemini 3 Flash model\n",
"\n",
"To learn more about all [Gemini models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models)."
]
@@ -290,7 +283,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -530,7 +523,6 @@
"source": [
"import json\n",
"\n",
- "\n",
"def create_request_json(row) -> str:\n",
" return json.dumps(\n",
" {\n",
@@ -552,7 +544,6 @@
" \"parts\": [{\"text\": \"You are an expert at analyzing patent documents.\"}]\n",
" },\n",
" \"generationConfig\": {\n",
- " \"temperature\": 0,\n",
" \"responseMimeType\": \"application/json\",\n",
" \"responseSchema\": {\n",
" \"type\": \"OBJECT\",\n",
@@ -789,7 +780,6 @@
"source": [
"import pandas as pd\n",
"\n",
- "\n",
"def flatten_response(response) -> dict | None:\n",
" try:\n",
" parsed_json = json.loads(\n",
diff --git a/gemini/use-cases/document-processing/sheet_music.ipynb b/gemini/use-cases/document-processing/sheet_music.ipynb
index aee2224f88e..1fa46d65681 100644
--- a/gemini/use-cases/document-processing/sheet_music.ipynb
+++ b/gemini/use-cases/document-processing/sheet_music.ipynb
@@ -136,7 +136,7 @@
},
"outputs": [],
"source": [
- "%pip install --upgrade -q google-genai PyPDF2"
+ "%pip install --upgrade -q google-genai pypdf"
]
},
{
@@ -194,7 +194,9 @@
"\n",
"from google import genai\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -222,8 +224,8 @@
"source": [
"import json\n",
"\n",
+ "import pypdf\n",
"from IPython.display import Markdown, display\n",
- "import PyPDF2\n",
"from google.genai.types import (\n",
" GenerateContentConfig,\n",
" GoogleSearch,\n",
@@ -254,11 +256,9 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\" # @param {type: \"string\"}\n",
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}\n",
"\n",
"config = GenerateContentConfig(\n",
- " temperature=1.0,\n",
- " max_output_tokens=8192,\n",
" safety_settings=[\n",
" SafetySetting(\n",
" category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,\n",
@@ -463,21 +463,23 @@
"\n",
" Args:\n",
" file_path (str): Path to the PDF file.\n",
- " new_metadata (dict): Dictionary containing the new metadata fields and values.\n",
- " Example: {'/Author': 'John Doe', '/Title': 'My Report'}\n",
+ " new_metadata (dict): Dictionary containing the metadata fields (e.g., {'/Author': 'John Doe', '/Title': 'My Report'})\n",
" \"\"\"\n",
- " with open(file_path, \"rb\") as pdf_file:\n",
- " pdf_reader = PyPDF2.PdfReader(pdf_file)\n",
- " pdf_writer = PyPDF2.PdfWriter()\n",
+ " # 1. Read the existing PDF\n",
+ " reader = pypdf.PdfReader(file_path)\n",
+ " writer = pypdf.PdfWriter()\n",
"\n",
- " pdf_writer.clone_reader_document_root(pdf_reader)\n",
- " pdf_writer.add_metadata(new_metadata)\n",
+ " # 2. Copy all pages and structure (bookmarks, etc.) from reader to writer\n",
+ " writer.append(reader)\n",
"\n",
- " with open(file_path, \"wb\") as out_file:\n",
- " pdf_writer.write(out_file)\n",
+ " # 3. Add/Update metadata\n",
+ " # Note: Keys usually start with a slash, e.g., \"/Title\"\n",
+ " writer.add_metadata(new_metadata)\n",
"\n",
- "\n",
- "edit_pdf_metadata(output_file_name, new_metadata)"
+ " # 4. Save the result\n",
+ " # We open the file in write-binary mode to save the changes\n",
+ " with open(file_path, \"wb\") as f:\n",
+ " writer.write(f)"
]
},
{
@@ -488,8 +490,15 @@
},
"outputs": [],
"source": [
- "pdf_reader = PyPDF2.PdfReader(output_file_name)\n",
- "print(pdf_reader.metadata)"
+ "# View current metadata\n",
+ "reader = pypdf.PdfReader(output_file_name)\n",
+ "print(reader.metadata)\n",
+ "\n",
+ "edit_pdf_metadata(output_file_name, new_metadata)\n",
+ "\n",
+ "# View updated metadata\n",
+ "reader = pypdf.PdfReader(output_file_name)\n",
+ "print(reader.metadata)"
]
}
],
diff --git a/gemini/use-cases/document-processing/summarization_large_documents_langchain.ipynb b/gemini/use-cases/document-processing/summarization_large_documents_langchain.ipynb
index 4d02b7de8ae..8cc78eea86e 100644
--- a/gemini/use-cases/document-processing/summarization_large_documents_langchain.ipynb
+++ b/gemini/use-cases/document-processing/summarization_large_documents_langchain.ipynb
@@ -251,11 +251,13 @@
"# Use the environment variable if the user doesn't provide Project ID.\n",
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "REGION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "REGION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"global\")\n",
"\n",
"import vertexai\n",
"\n",
@@ -270,15 +272,15 @@
},
"outputs": [],
"source": [
- "from pathlib import Path as p\n",
"import urllib\n",
"import warnings\n",
+ "from pathlib import Path as p\n",
"\n",
+ "import pandas as pd\n",
"from langchain import PromptTemplate\n",
"from langchain.chains.summarize import load_summarize_chain\n",
"from langchain.document_loaders import PyPDFLoader\n",
"from langchain_google_vertexai import VertexAI\n",
- "import pandas as pd\n",
"\n",
"warnings.filterwarnings(\"ignore\")"
]
@@ -291,7 +293,7 @@
"source": [
"### Import models\n",
"\n",
- "You load the pre-trained text generation model Gemini 2.0 Flash."
+ "You load the pre-trained text generation model Gemini 3 Flash."
]
},
{
@@ -302,7 +304,7 @@
},
"outputs": [],
"source": [
- "vertex_llm_text = VertexAI(model_name=\"gemini-2.0-flash\")"
+ "vertex_llm_text = VertexAI(model_name=\"gemini-3-flash-preview\")"
]
},
{
@@ -620,7 +622,9 @@
"source": [
"final_mp_data = []\n",
"for doc, out in zip(\n",
- " map_reduce_outputs[\"input_documents\"], map_reduce_outputs[\"intermediate_steps\"]\n",
+ " map_reduce_outputs[\"input_documents\"],\n",
+ " map_reduce_outputs[\"intermediate_steps\"],\n",
+ " strict=False,\n",
"):\n",
" output = {}\n",
" output[\"file_name\"] = p(doc.metadata[\"source\"]).stem\n",
@@ -691,7 +695,7 @@
"\n",
"The Refine method is an alternative method to deal with large document summarization. It works by first running an initial prompt on a small chunk of data, generating some output. Then, for each subsequent document, the output from the previous document is passed in along with the new document, and the LLM is asked to refine the output based on the new document.\n",
"\n",
- "In LangChain, you can use `RefineDocumentsChain` as part of the load_summarize_chain method. What you need to do is setting `refine` as `chain_type` of your chain."
+ "In LangChain, you can use `RefineDocumentsChain` as part of the `load_summarize_chain` method. What you need to do is setting `refine` as `chain_type` of your chain."
]
},
{
@@ -821,7 +825,9 @@
"source": [
"final_refine_data = []\n",
"for doc, out in zip(\n",
- " refine_outputs[\"input_documents\"], refine_outputs[\"intermediate_steps\"]\n",
+ " refine_outputs[\"input_documents\"],\n",
+ " refine_outputs[\"intermediate_steps\"],\n",
+ " strict=False,\n",
"):\n",
" output = {}\n",
" output[\"file_name\"] = p(doc.metadata[\"source\"]).stem\n",
diff --git a/gemini/use-cases/document-processing/tax_automation.ipynb b/gemini/use-cases/document-processing/tax_automation.ipynb
index 5b69b6fae39..ef809ea3fbf 100644
--- a/gemini/use-cases/document-processing/tax_automation.ipynb
+++ b/gemini/use-cases/document-processing/tax_automation.ipynb
@@ -232,9 +232,9 @@
"source": [
"from enum import Enum\n",
"\n",
+ "import pandas as pd\n",
"from IPython.display import display\n",
"from google.genai.types import GenerateContentConfig, Part\n",
- "import pandas as pd\n",
"from pydantic import BaseModel, Field\n",
"\n",
"pd.set_option(\"display.max_colwidth\", None)\n",
@@ -249,7 +249,7 @@
"id": "e43229f3ad4f"
},
"source": [
- "### Load the Gemini 2.0 Flash model\n",
+ "### Load the Gemini 3 Flash model\n",
"\n",
"To learn more about all [Gemini models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models)."
]
@@ -262,7 +262,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -394,9 +394,7 @@
"outputs": [],
"source": [
"class FormW2(BaseModel):\n",
- " \"\"\"\n",
- " Pydantic class to represent data extracted from a Form W-2 (Wage and Tax Statement).\n",
- " \"\"\"\n",
+ " \"\"\"Pydantic class to represent data extracted from a Form W-2 (Wage and Tax Statement).\"\"\"\n",
"\n",
" employee_ssn: str = Field(..., description=\"Employee's Social Security Number\")\n",
" employer_ein: str = Field(\n",
@@ -463,9 +461,7 @@
"\n",
"\n",
"class Form1099DIV(BaseModel):\n",
- " \"\"\"\n",
- " Pydantic class representing data extracted from Form 1099-DIV (Dividends and Distributions).\n",
- " \"\"\"\n",
+ " \"\"\"Pydantic class representing data extracted from Form 1099-DIV (Dividends and Distributions).\"\"\"\n",
"\n",
" payer_name: str | None = Field(\n",
" None, description=\"Name of the payer (company distributing dividends).\"\n",
@@ -538,9 +534,7 @@
"\n",
"\n",
"class Form1099INT(BaseModel):\n",
- " \"\"\"\n",
- " Pydantic class representing data extracted from a Form 1099-INT (Interest Income).\n",
- " \"\"\"\n",
+ " \"\"\"Pydantic class representing data extracted from a Form 1099-INT (Interest Income).\"\"\"\n",
"\n",
" payer_name: str = Field(..., description=\"Name of the payer (bank, institution)\")\n",
" payer_tin: str = Field(\n",
diff --git a/gemini/use-cases/productivity/slide_generation_with_marp.ipynb b/gemini/use-cases/productivity/slide_generation_with_marp.ipynb
index 66d99c71529..6f4d2d82bd3 100644
--- a/gemini/use-cases/productivity/slide_generation_with_marp.ipynb
+++ b/gemini/use-cases/productivity/slide_generation_with_marp.ipynb
@@ -207,7 +207,9 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -246,10 +248,10 @@
"source": [
"import re\n",
"\n",
+ "import nest_asyncio\n",
"from IPython.display import Markdown, display\n",
"from gitingest import ingest\n",
"from google.genai.types import GenerateContentConfig, Part\n",
- "import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
@@ -260,7 +262,7 @@
"id": "eXHJi5B6P5vd"
},
"source": [
- "### Load the Gemini 2.0 Flash Thinking model\n",
+ "### Load the Gemini 3 Flash Thinking model\n",
"\n",
"Learn more about all [Gemini models on Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-models)."
]
@@ -273,7 +275,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-thinking-exp-01-21\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
diff --git a/gemini/use-cases/retail/multimodal_retail_recommendations.ipynb b/gemini/use-cases/retail/multimodal_retail_recommendations.ipynb
index 0d1cfa7bca8..00081cdb368 100644
--- a/gemini/use-cases/retail/multimodal_retail_recommendations.ipynb
+++ b/gemini/use-cases/retail/multimodal_retail_recommendations.ipynb
@@ -105,7 +105,7 @@
"\n",
"For retail companies, recommendation systems improve customer experience and thus can increase sales.\n",
"\n",
- "This notebook shows how you can use the multimodal capabilities of Gemini 2.0 model to rapidly create a multimodal recommendation system out-of-the-box."
+ "This notebook shows how you can use the multimodal capabilities of Gemini 3 model to rapidly create a multimodal recommendation system out-of-the-box."
]
},
{
@@ -142,7 +142,7 @@
"source": [
"### Objectives\n",
"\n",
- "Your main objective is to learn how to create a recommendation system that can provide both recommendations and explanations using a multimodal model: Gemini 2.0.\n",
+ "Your main objective is to learn how to create a recommendation system that can provide both recommendations and explanations using a multimodal model: Gemini 3.\n",
"\n",
"In this notebook, you will begin with a scene (e.g. a living room) and use the Gemini model to perform visual understanding. You will also investigate how the Gemini model can be used to recommend an item (e.g. a chair) from a list of furniture items as input.\n",
"\n",
@@ -248,7 +248,9 @@
"\n",
"from google import genai\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -305,7 +307,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\""
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
diff --git a/gemini/use-cases/spatial-understanding/spatial_understanding.ipynb b/gemini/use-cases/spatial-understanding/spatial_understanding.ipynb
index 1da2989c73b..b1314533366 100644
--- a/gemini/use-cases/spatial-understanding/spatial_understanding.ipynb
+++ b/gemini/use-cases/spatial-understanding/spatial_understanding.ipynb
@@ -29,7 +29,7 @@
"id": "JAPoU8Sm5E6e"
},
"source": [
- "# Spatial understanding with Gemini 2.0\n",
+ "# Spatial understanding with Gemini 3\n",
"\n",
"\n",
" \n",
@@ -102,10 +102,10 @@
"This notebook introduces object detection and spatial understanding with the Gemini API in Vertex AI.\n",
"\n",
"\n",
- "**YouTube Video: Building with Gemini 2.0: Spatial understanding**\n",
+ "**YouTube Video: Building with Gemini 3: Spatial understanding**\n",
"\n",
"\n",
- " \n",
+ " \n",
"\n",
"\n",
"\n",
@@ -204,7 +204,9 @@
"source": [
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -241,11 +243,11 @@
},
"outputs": [],
"source": [
+ "import requests\n",
"from IPython.display import display\n",
"from PIL import Image, ImageColor, ImageDraw, ImageFont\n",
"from google.genai.types import GenerateContentConfig, Part, SafetySetting\n",
- "from pydantic import BaseModel\n",
- "import requests"
+ "from pydantic import BaseModel"
]
},
{
@@ -292,8 +294,7 @@
"outputs": [],
"source": [
"class BoundingBox(BaseModel):\n",
- " \"\"\"\n",
- " Represents a bounding box with its 2D coordinates and associated label.\n",
+ " \"\"\"Represents a bounding box with its 2D coordinates and associated label.\n",
"\n",
" Attributes:\n",
" box_2d (list[int]): A list of integers representing the 2D coordinates of the bounding box,\n",
@@ -340,8 +341,7 @@
"outputs": [],
"source": [
"def plot_bounding_boxes(image_uri: str, bounding_boxes: list[BoundingBox]) -> None:\n",
- " \"\"\"\n",
- " Plots bounding boxes on an image with labels, using PIL and normalized coordinates.\n",
+ " \"\"\"Plots bounding boxes on an image with labels, using PIL and normalized coordinates.\n",
"\n",
" Args:\n",
" image_uri: The URI of the image file.\n",
@@ -407,7 +407,9 @@
"outputs": [],
"source": [
"image_uri = \"https://storage.googleapis.com/generativeai-downloads/images/Cupcakes.jpg\"\n",
+ "# fmt: off\n",
"prompt = \"Detect the 2d bounding boxes of the cupcakes (with `label` as topping description)\" # @param {type:\"string\"}\n",
+ "# fmt: on\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
@@ -473,7 +475,9 @@
"outputs": [],
"source": [
"image_uri = \"https://storage.googleapis.com/generativeai-downloads/images/socks.jpg\"\n",
+ "# fmt: off\n",
"prompt = \"Show me the positions of the socks with a face. Label according to position in the image.\" # @param [\"Detect all rainbow socks\", \"Find all socks and label them with emojis \", \"Show me the positions of the socks with a face. Label according to position in the image.\", \"Find the sock that goes with the one at the top\"] {\"allow-input\":true}\n",
+ "# fmt: on\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
@@ -521,7 +525,9 @@
"outputs": [],
"source": [
"image_uri = \"https://storage.googleapis.com/generativeai-downloads/images/origamis.jpg\"\n",
+ "# fmt: off\n",
"prompt = \"Draw a square around the fox shadow\" # @param [\"Find the two origami animals.\", \"Where are the origamis' shadows?\", \"Draw a square around the fox shadow\"] {\"allow-input\":true}\n",
+ "# fmt: on\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
@@ -569,7 +575,9 @@
"outputs": [],
"source": [
"image_uri = \"https://storage.googleapis.com/generativeai-downloads/images/spill.jpg\"\n",
+ "# fmt: off\n",
"prompt = \"Tell me how to clean my table with an explanation as label\" # @param [\"Show me where my coffee was spilled.\", \"Tell me how to clean my table with an explanation as label\"] {\"allow-input\":true}\n",
+ "# fmt: on\n",
"\n",
"response = client.models.generate_content(\n",
" model=MODEL_ID,\n",
diff --git a/gemini/use-cases/video-analysis/video_analysis.ipynb b/gemini/use-cases/video-analysis/video_analysis.ipynb
deleted file mode 100644
index 3830e7fe23d..00000000000
--- a/gemini/use-cases/video-analysis/video_analysis.ipynb
+++ /dev/null
@@ -1,393 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "ijGzTHJJUCPY"
- },
- "outputs": [],
- "source": [
- "# Copyright 2024 Google LLC\n",
- "#\n",
- "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
- "# you may not use this file except in compliance with the License.\n",
- "# You may obtain a copy of the License at\n",
- "#\n",
- "# https://www.apache.org/licenses/LICENSE-2.0\n",
- "#\n",
- "# Unless required by applicable law or agreed to in writing, software\n",
- "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
- "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
- "# See the License for the specific language governing permissions and\n",
- "# limitations under the License."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "VEqbX8OhE8y9"
- },
- "source": [
- "# Video Analysis with Gemini\n",
- "\n",
- "> **NOTE:** This notebook uses the Vertex AI SDK, which does not support Gemini 2.0; refer to [this updated notebook](https://github.com/GoogleCloudPlatform/generative-ai/blob/main/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb) which uses the Google Gen AI SDK.\n",
- "\n",
- "\n",
- " \n",
- " \n",
- "  Run in Colab\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  Run in Colab Enterprise\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  View on GitHub\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  Open in Vertex AI Workbench\n",
- " \n",
- " | \n",
- " \n",
- " \n",
- "  Open in Cloud Skills Boost\n",
- " \n",
- " | \n",
- " \n",
- "\n",
- "\n",
- "\n",
- "Share to:\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- "\n",
- "\n",
- "\n",
- " \n",
- " \n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "5ad877ea09dd"
- },
- "source": [
- "| | |\n",
- "|-|-|\n",
- "|Author(s) | [Holt Skinner](https://github.com/holtskinner) |"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "r11Gu7qNgx1p"
- },
- "source": [
- "## Getting Started\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "No17Cw5hgx12"
- },
- "source": [
- "### Install Vertex AI SDK for Python"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "tFy3H3aPgx12"
- },
- "outputs": [],
- "source": [
- "%pip install --upgrade --user -q google-cloud-aiplatform"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "R5Xep4W9lq-Z"
- },
- "source": [
- "### Restart current runtime\n",
- "\n",
- "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which will restart the current kernel."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "XRvKdaPDTznN"
- },
- "outputs": [],
- "source": [
- "# Restart kernel after installs so that your environment can access the new packages\n",
- "import IPython\n",
- "\n",
- "app = IPython.Application.instance()\n",
- "app.kernel.do_shutdown(True)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "SbmM4z7FOBpM"
- },
- "source": [
- "\n",
- "⚠️ The kernel is going to restart. Please wait until it is finished before continuing to the next step. ⚠️\n",
- " \n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "dmWOrTJ3gx13"
- },
- "source": [
- "### Authenticate your notebook environment (Colab only)\n",
- "\n",
- "If you are running this notebook on Google Colab, run the following cell to authenticate your environment. This step is not required if you are using [Vertex AI Workbench](https://cloud.google.com/vertex-ai-workbench).\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "NyKGtVQjgx13"
- },
- "outputs": [],
- "source": [
- "import sys\n",
- "\n",
- "# Additional authentication is required for Google Colab\n",
- "if \"google.colab\" in sys.modules:\n",
- " # Authenticate user to Google Cloud\n",
- " from google.colab import auth\n",
- "\n",
- " auth.authenticate_user()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "DF4l8DTdWgPY"
- },
- "source": [
- "### Set Google Cloud project information and initialize Vertex AI SDK\n",
- "\n",
- "To get started using Vertex AI, you must have an existing Google Cloud project and [enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).\n",
- "\n",
- "Learn more about [setting up a project and a development environment](https://cloud.google.com/vertex-ai/docs/start/cloud-environment)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "Nqwi-5ufWp_B"
- },
- "outputs": [],
- "source": [
- "# Define project information\n",
- "PROJECT_ID = \"YOUR_PROJECT_ID\" # @param {type:\"string\"}\n",
- "LOCATION = \"us-central1\" # @param {type:\"string\"}\n",
- "\n",
- "# Initialize Vertex AI\n",
- "import vertexai\n",
- "\n",
- "vertexai.init(project=PROJECT_ID, location=LOCATION)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "jXHfaVS66_01"
- },
- "source": [
- "### Import libraries\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "lslYAvw37JGQ"
- },
- "outputs": [],
- "source": [
- "from IPython.display import Markdown, Video, display\n",
- "from vertexai.preview.generative_models import (\n",
- " GenerationConfig,\n",
- " GenerativeModel,\n",
- " HarmBlockThreshold,\n",
- " HarmCategory,\n",
- " Part,\n",
- ")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "FTMywdzUORIA"
- },
- "source": [
- "### Load the Gemini 2.0 model\n",
- "\n",
- "Gemini 2.0 (`gemini-2.0-flash`) is a multimodal model that supports multimodal prompts. You can include text, image(s), PDFs, audio, and video in your prompt requests and get text or code responses."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "lRyTw2iPhEXG"
- },
- "outputs": [],
- "source": [
- "model = GenerativeModel(\"gemini-2.0-flash\")\n",
- "\n",
- "generation_config = GenerationConfig(temperature=1, top_p=0.95, max_output_tokens=8192)\n",
- "\n",
- "safety_settings = {\n",
- " HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
- " HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
- " HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
- " HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_ONLY_HIGH,\n",
- "}"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "acf455813c5f"
- },
- "source": [
- "### Helper functions"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "b82d26e475c7"
- },
- "outputs": [],
- "source": [
- "def get_url_from_gcs(gcs_uri: str) -> str:\n",
- " # converts GCS uri to url for display.\n",
- " return gcs_uri.replace(\"gs://\", \"https://storage.googleapis.com/\").replace(\n",
- " \" \", \"%20\"\n",
- " )"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Wy75sLb-yjNn"
- },
- "source": [
- "## Sentiment Analysis from Video and Audio\n",
- "\n",
- "For this example, we will be analyzing a video of the first televised US presidential debate between John F. Kennedy and Richard Nixon. This debate is largely believed to have been critical in securing JFK's victory due to his calm and personable demeanor for the broadcast.\n",
- "\n",
- "The video is an hour long, which is roughly equivalent to 1 Million Tokens for Gemini 2.0."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "75ef371ea16f"
- },
- "outputs": [],
- "source": [
- "video_analysis_prompt = \"\"\"You are an expert in politics and history. Provide a detailed analysis of the video including each speakers facial expressions and mood at key points.\"\"\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "KzqjpEiryjNo"
- },
- "outputs": [],
- "source": [
- "# Load file directly from Google Cloud Storage\n",
- "video_uri = \"gs://github-repo/video/KennedyNixon1960PresidentialDebate.mp4\"\n",
- "\n",
- "# Load contents\n",
- "contents = [\n",
- " Part.from_uri(\n",
- " uri=video_uri,\n",
- " mime_type=\"video/mp4\",\n",
- " ),\n",
- " video_analysis_prompt,\n",
- "]\n",
- "\n",
- "# Display the Video\n",
- "display(Video(get_url_from_gcs(video_uri)))"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "fc7394e34bce"
- },
- "source": [
- "Note: due to the length of the video, this will take a few minutes to complete."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "9548449631b4"
- },
- "outputs": [],
- "source": [
- "# Send to Gemini\n",
- "response = model.generate_content(contents, generation_config=generation_config)\n",
- "\n",
- "# Display results\n",
- "display(Markdown(response.text))"
- ]
- }
- ],
- "metadata": {
- "colab": {
- "name": "video_analysis.ipynb",
- "toc_visible": true
- },
- "kernelspec": {
- "display_name": "Python 3",
- "name": "python3"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb
index bd2105b5379..aec76a8fae5 100644
--- a/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb
+++ b/gemini/use-cases/video-analysis/youtube_video_analysis.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {
"id": "ur8xi4C7S06n"
},
@@ -85,9 +85,9 @@
"id": "84f0f73a0f76"
},
"source": [
- "| | |\n",
- "|-|-|\n",
- "| Author(s) | [Alok Pattani](https://github.com/alokpattani/) |"
+ "| Author |\n",
+ "| --- |\n",
+ "| [Alok Pattani](https://github.com/alokpattani/) |"
]
},
{
@@ -133,45 +133,7 @@
},
"outputs": [],
"source": [
- "%pip install --upgrade --quiet google-genai itables"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "R5Xep4W9lq-Z"
- },
- "source": [
- "### Restart runtime\n",
- "\n",
- "To use the newly installed packages in this Jupyter runtime, you must restart the runtime. You can do this by running the cell below, which restarts the current kernel.\n",
- "\n",
- "The restart might take a minute or longer. After it's restarted, continue to the next step."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {
- "id": "XRvKdaPDTznN"
- },
- "outputs": [],
- "source": [
- "import IPython\n",
- "\n",
- "app = IPython.Application.instance()\n",
- "app.kernel.do_shutdown(True)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "SbmM4z7FOBpM"
- },
- "source": [
- "\n",
- "⚠️ The kernel is going to restart. Wait until it's finished before continuing to the next step. ⚠️\n",
- " \n"
+ "%pip install --upgrade --quiet google-genai itables pandas"
]
},
{
@@ -187,7 +149,7 @@
},
{
"cell_type": "code",
- "execution_count": 1,
+ "execution_count": null,
"metadata": {
"id": "NyKGtVQjgx13"
},
@@ -216,7 +178,7 @@
},
{
"cell_type": "code",
- "execution_count": 26,
+ "execution_count": null,
"metadata": {
"id": "Nqwi-5ufWp_B"
},
@@ -225,11 +187,13 @@
"# Use the environment variable if the user doesn't provide Project ID.\n",
"import os\n",
"\n",
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
- "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"us-central1\")\n",
+ "LOCATION = os.environ.get(\"GOOGLE_CLOUD_REGION\", \"global\")\n",
"\n",
"from google import genai\n",
"\n",
@@ -256,7 +220,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": null,
"metadata": {
"id": "6fc324893334"
},
@@ -264,11 +228,11 @@
"source": [
"import json\n",
"\n",
- "from IPython.display import HTML, Markdown, display\n",
- "from google.genai.types import GenerateContentConfig, Part\n",
- "from itables import show\n",
"import itables.options as itable_opts\n",
"import pandas as pd\n",
+ "from IPython.display import Markdown, display\n",
+ "from google.genai.types import GenerateContentConfig, Part\n",
+ "from itables import show\n",
"from tenacity import retry, stop_after_attempt, wait_random_exponential"
]
},
@@ -283,7 +247,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"metadata": {
"id": "4730b9f09e1e"
},
@@ -297,38 +261,6 @@
"itable_opts.column_filters = \"header\""
]
},
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "9d46fd0dfdf7"
- },
- "source": [
- "### Create a helper function"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "id": "e9034b0991f4"
- },
- "outputs": [],
- "source": [
- "def display_youtube_video(url: str) -> None:\n",
- " youtube_video_embed_url = url.replace(\"/watch?v=\", \"/embed/\")\n",
- "\n",
- " # Create HTML code to directly embed video\n",
- " youtube_video_embed_html_code = f\"\"\"\n",
- " \n",
- " \"\"\"\n",
- "\n",
- " # Display embedded YouTube video\n",
- " display(HTML(youtube_video_embed_html_code))"
- ]
- },
{
"cell_type": "markdown",
"metadata": {
@@ -340,15 +272,15 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": null,
"metadata": {
"id": "cf93d5f0ce00"
},
"outputs": [],
"source": [
"# Set Gemini Flash and Pro models to be used in this notebook\n",
- "GEMINI_FLASH_MODEL_ID = \"gemini-2.0-flash-001\"\n",
- "GEMINI_PRO_MODEL_ID = \"gemini-2.0-flash\""
+ "GEMINI_FLASH_MODEL_ID = \"gemini-3-flash-preview\"\n",
+ "GEMINI_PRO_MODEL_ID = \"gemini-3-pro-preview\""
]
},
{
@@ -382,9 +314,7 @@
"# Provide link to a public YouTube video to summarize\n",
"YOUTUBE_VIDEO_URL = (\n",
" \"https://www.youtube.com/watch?v=O_W_VGUeHVI\" # @param {type:\"string\"}\n",
- ")\n",
- "\n",
- "display_youtube_video(YOUTUBE_VIDEO_URL)"
+ ")"
]
},
{
@@ -445,9 +375,7 @@
"# cloud_next_keynote_video_url = \"https://www.youtube.com/watch?v=V6DJYGn2SFk\"\n",
"\n",
"# Uncomment line below to replace with 14-min keynote summary video instead (faster)\n",
- "cloud_next_keynote_video_url = \"https://www.youtube.com/watch?v=M-CzbTUVykg\"\n",
- "\n",
- "display_youtube_video(cloud_next_keynote_video_url)"
+ "cloud_next_keynote_video_url = \"https://www.youtube.com/watch?v=M-CzbTUVykg\""
]
},
{
@@ -498,7 +426,6 @@
"}\n",
"\n",
"video_extraction_json_generation_config = GenerateContentConfig(\n",
- " temperature=0.0,\n",
" max_output_tokens=8192,\n",
" response_mime_type=\"application/json\",\n",
" response_schema=video_extraction_response_schema,\n",
@@ -589,7 +516,7 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": null,
"metadata": {
"id": "b8589a51547d"
},
@@ -625,7 +552,6 @@
"}\n",
"\n",
"multiple_video_extraction_json_generation_config = GenerateContentConfig(\n",
- " temperature=0.0,\n",
" max_output_tokens=8192,\n",
" response_mime_type=\"application/json\",\n",
" response_schema=multiple_video_extraction_response_schema,\n",
@@ -643,7 +569,7 @@
},
{
"cell_type": "code",
- "execution_count": 27,
+ "execution_count": null,
"metadata": {
"id": "5aa93ca907bc"
},
@@ -653,7 +579,7 @@
"\n",
"\n",
"@retry(wait=wait_random_exponential(multiplier=1, max=120), stop=stop_after_attempt(2))\n",
- "async def async_generate(prompt, yt_link):\n",
+ "async def async_generate(prompt: str, yt_link: str):\n",
" try:\n",
" response = await client.aio.models.generate_content(\n",
" model=GEMINI_PRO_MODEL_ID,\n",
diff --git a/notebook_template.ipynb b/notebook_template.ipynb
index a46ed37b20b..ab06ebb4cf5 100644
--- a/notebook_template.ipynb
+++ b/notebook_template.ipynb
@@ -443,7 +443,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash\" # @param {type:\"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type:\"string\"}"
]
},
{
diff --git a/sdk/intro_genai_sdk.ipynb b/sdk/intro_genai_sdk.ipynb
index ef76a271521..d3e70b232d5 100644
--- a/sdk/intro_genai_sdk.ipynb
+++ b/sdk/intro_genai_sdk.ipynb
@@ -296,7 +296,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\" # @param {type: \"string\"}"
+ "MODEL_ID = \"gemini-3-flash-preview\" # @param {type: \"string\"}"
]
},
{
@@ -941,7 +941,7 @@
"\n",
"[Context caching](https://cloud.google.com/vertex-ai/generative-ai/docs/context-cache/context-cache-overview) lets you to store frequently used input tokens in a dedicated cache and reference them for subsequent requests, eliminating the need to repeatedly pass the same set of tokens to a model.\n",
"\n",
- "**Note**: Context caching is only available for stable models with fixed versions (for example, `gemini-2.0-flash-001`). You must include the version postfix (for example, the `-001`)."
+ "**Note**: Context caching is only available for stable models with fixed versions (for example, `gemini-3-flash-preview`). You must include the version postfix (for example, the `-001`)."
]
},
{
@@ -979,7 +979,7 @@
"]\n",
"\n",
"cached_content = client.caches.create(\n",
- " model=\"gemini-2.0-flash-001\",\n",
+ " model=\"gemini-3-flash-preview\",\n",
" config=CreateCachedContentConfig(\n",
" system_instruction=system_instruction,\n",
" contents=pdf_parts,\n",
@@ -1006,7 +1006,7 @@
"outputs": [],
"source": [
"response = client.models.generate_content(\n",
- " model=\"gemini-2.0-flash-001\",\n",
+ " model=\"gemini-3-flash-preview\",\n",
" contents=\"What is the research goal shared by these research papers?\",\n",
" config=GenerateContentConfig(\n",
" cached_content=cached_content.name,\n",
@@ -1243,7 +1243,7 @@
"Example output:\n",
"\n",
"```json\n",
- "{\"status\": \"\", \"processed_time\": \"2024-11-13T14:04:28.376+00:00\", \"request\": {\"contents\": [{\"parts\": [{\"file_data\": null, \"text\": \"List objects in this image.\"}, {\"file_data\": {\"file_uri\": \"gs://cloud-samples-data/generative-ai/image/gardening-tools.jpeg\", \"mime_type\": \"image/jpeg\"}, \"text\": null}], \"role\": \"user\"}], \"generationConfig\": {\"temperature\": 0.4}}, \"response\": {\"candidates\": [{\"avgLogprobs\": -0.10394711927934126, \"content\": {\"parts\": [{\"text\": \"Here's a list of the objects in the image:\\n\\n* **Watering can:** A green plastic watering can with a white rose head.\\n* **Plant:** A small plant (possibly oregano) in a terracotta pot.\\n* **Terracotta pots:** Two terracotta pots, one containing the plant and another empty, stacked on top of each other.\\n* **Gardening gloves:** A pair of striped gardening gloves.\\n* **Gardening tools:** A small trowel and a hand cultivator (hoe). Both are green with black handles.\"}], \"role\": \"model\"}, \"finishReason\": \"STOP\"}], \"modelVersion\": \"gemini-2.0-flash-001@default\", \"usageMetadata\": {\"candidatesTokenCount\": 110, \"promptTokenCount\": 264, \"totalTokenCount\": 374}}}\n",
+ "{\"status\": \"\", \"processed_time\": \"2024-11-13T14:04:28.376+00:00\", \"request\": {\"contents\": [{\"parts\": [{\"file_data\": null, \"text\": \"List objects in this image.\"}, {\"file_data\": {\"file_uri\": \"gs://cloud-samples-data/generative-ai/image/gardening-tools.jpeg\", \"mime_type\": \"image/jpeg\"}, \"text\": null}], \"role\": \"user\"}], \"generationConfig\": {\"temperature\": 0.4}}, \"response\": {\"candidates\": [{\"avgLogprobs\": -0.10394711927934126, \"content\": {\"parts\": [{\"text\": \"Here's a list of the objects in the image:\\n\\n* **Watering can:** A green plastic watering can with a white rose head.\\n* **Plant:** A small plant (possibly oregano) in a terracotta pot.\\n* **Terracotta pots:** Two terracotta pots, one containing the plant and another empty, stacked on top of each other.\\n* **Gardening gloves:** A pair of striped gardening gloves.\\n* **Gardening tools:** A small trowel and a hand cultivator (hoe). Both are green with black handles.\"}], \"role\": \"model\"}, \"finishReason\": \"STOP\"}], \"modelVersion\": \"gemini-3-flash-preview@default\", \"usageMetadata\": {\"candidatesTokenCount\": 110, \"promptTokenCount\": 264, \"totalTokenCount\": 374}}}\n",
"```"
]
},
diff --git a/workshops/ai-agents/ai_agents_for_engineers.ipynb b/workshops/ai-agents/ai_agents_for_engineers.ipynb
index a7e179d4e42..463b4c7c703 100644
--- a/workshops/ai-agents/ai_agents_for_engineers.ipynb
+++ b/workshops/ai-agents/ai_agents_for_engineers.ipynb
@@ -256,7 +256,9 @@
},
"outputs": [],
"source": [
+ "# fmt: off\n",
"PROJECT_ID = \"[your-project-id]\" # @param {type: \"string\", placeholder: \"[your-project-id]\", isTemplate: true}\n",
+ "# fmt: on\n",
"if not PROJECT_ID or PROJECT_ID == \"[your-project-id]\":\n",
" PROJECT_ID = str(os.environ.get(\"GOOGLE_CLOUD_PROJECT\"))\n",
"\n",
@@ -273,7 +275,7 @@
"source": [
"## Generating Essays Using a Zero-Shot Approach with the Gemini API\n",
"\n",
- "With just a single call to the `generate_content` method, users can create detailed, structured essays on any topic by leveraging state-of-the-art language models such as Gemini 2.0 or Gemini 2.0.\n",
+ "With just a single call to the `generate_content` method, users can create detailed, structured essays on any topic by leveraging state-of-the-art language models such as Gemini 3 or Gemini 3.\n",
"\n",
" "
]
@@ -338,7 +340,7 @@
"outputs": [],
"source": [
"if not client.vertexai:\n",
- " print(f\"Using Gemini Developer API.\")\n",
+ " print(\"Using Gemini Developer API.\")\n",
"elif client._api_client.project:\n",
" print(\n",
" f\"Using Vertex AI with project: {client._api_client.project} in location: {client._api_client.location}\"\n",
@@ -366,7 +368,7 @@
},
"outputs": [],
"source": [
- "MODEL_ID = \"gemini-2.0-flash-001\""
+ "MODEL_ID = \"gemini-3-flash-preview\""
]
},
{
@@ -683,14 +685,15 @@
"# LangChain integrations for Gemini API in Google AI Studio and Vertex AI\n",
"from langchain_google_genai import ChatGoogleGenerativeAI\n",
"from langchain_google_vertexai import ChatVertexAI\n",
- "from langgraph.checkpoint.memory import MemorySaver\n",
- "from langgraph.graph import END, StateGraph\n",
"\n",
"# Typing utilities for data validation and schema definitions\n",
"from pydantic.v1 import BaseModel\n",
"\n",
"# Tavily client for performing web searches\n",
- "from tavily import TavilyClient"
+ "from tavily import TavilyClient\n",
+ "\n",
+ "from langgraph.checkpoint.memory import MemorySaver\n",
+ "from langgraph.graph import END, StateGraph"
]
},
{
@@ -727,7 +730,7 @@
"\n",
"# Define a schema for search queries\n",
"class Queries(BaseModel):\n",
- " \"\"\"Variants of query to search for\"\"\"\n",
+ " \"\"\"Variants of query to search for.\"\"\"\n",
"\n",
" queries: list[str]"
]
| |