Skip to content

Commit c2ba9f5

Browse files
authored
chore: split Multimodal snippets (#1683)
1 parent 93e88da commit c2ba9f5

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

samples/snippets/multimodal_test.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def test_multimodal_dataframe(gcs_dst_bucket: str) -> None:
7777
df_image
7878
# [END bigquery_dataframes_multimodal_dataframe_image_transform]
7979

80-
# [START bigquery_dataframes_multimodal_dataframe_ai]
80+
# [START bigquery_dataframes_multimodal_dataframe_ml_text]
8181
from bigframes.ml import llm
8282

8383
gemini = llm.GeminiTextGenerator(model_name="gemini-1.5-flash-002")
@@ -89,7 +89,9 @@ def test_multimodal_dataframe(gcs_dst_bucket: str) -> None:
8989
df_image = df_image.head(2)
9090
answer = gemini.predict(df_image, prompt=["what item is it?", df_image["image"]])
9191
answer[["ml_generate_text_llm_result", "image"]]
92+
# [END bigquery_dataframes_multimodal_dataframe_ml_text]
9293

94+
# [START bigquery_dataframes_multimodal_dataframe_ml_text_alt]
9395
# Ask different questions
9496
df_image["question"] = [ # type: ignore
9597
"what item is it?",
@@ -99,12 +101,14 @@ def test_multimodal_dataframe(gcs_dst_bucket: str) -> None:
99101
df_image, prompt=[df_image["question"], df_image["image"]]
100102
)
101103
answer_alt[["ml_generate_text_llm_result", "image"]]
104+
# [END bigquery_dataframes_multimodal_dataframe_ml_text_alt]
102105

106+
# [START bigquery_dataframes_multimodal_dataframe_ml_embed]
103107
# Generate embeddings on images
104108
embed_model = llm.MultimodalEmbeddingGenerator()
105109
embeddings = embed_model.predict(df_image["image"])
106110
embeddings
107-
# [END bigquery_dataframes_multimodal_dataframe_ai]
111+
# [END bigquery_dataframes_multimodal_dataframe_ml_embed]
108112

109113
# [START bigquery_dataframes_multimodal_dataframe_pdf_chunk]
110114
# PDF chunking

0 commit comments

Comments
 (0)