@@ -77,7 +77,7 @@ def test_multimodal_dataframe(gcs_dst_bucket: str) -> None:
77
77
df_image
78
78
# [END bigquery_dataframes_multimodal_dataframe_image_transform]
79
79
80
- # [START bigquery_dataframes_multimodal_dataframe_ai ]
80
+ # [START bigquery_dataframes_multimodal_dataframe_ml_text ]
81
81
from bigframes .ml import llm
82
82
83
83
gemini = llm .GeminiTextGenerator (model_name = "gemini-1.5-flash-002" )
@@ -89,7 +89,9 @@ def test_multimodal_dataframe(gcs_dst_bucket: str) -> None:
89
89
df_image = df_image .head (2 )
90
90
answer = gemini .predict (df_image , prompt = ["what item is it?" , df_image ["image" ]])
91
91
answer [["ml_generate_text_llm_result" , "image" ]]
92
+ # [END bigquery_dataframes_multimodal_dataframe_ml_text]
92
93
94
+ # [START bigquery_dataframes_multimodal_dataframe_ml_text_alt]
93
95
# Ask different questions
94
96
df_image ["question" ] = [ # type: ignore
95
97
"what item is it?" ,
@@ -99,12 +101,14 @@ def test_multimodal_dataframe(gcs_dst_bucket: str) -> None:
99
101
df_image , prompt = [df_image ["question" ], df_image ["image" ]]
100
102
)
101
103
answer_alt [["ml_generate_text_llm_result" , "image" ]]
104
+ # [END bigquery_dataframes_multimodal_dataframe_ml_text_alt]
102
105
106
+ # [START bigquery_dataframes_multimodal_dataframe_ml_embed]
103
107
# Generate embeddings on images
104
108
embed_model = llm .MultimodalEmbeddingGenerator ()
105
109
embeddings = embed_model .predict (df_image ["image" ])
106
110
embeddings
107
- # [END bigquery_dataframes_multimodal_dataframe_ai ]
111
+ # [END bigquery_dataframes_multimodal_dataframe_ml_embed ]
108
112
109
113
# [START bigquery_dataframes_multimodal_dataframe_pdf_chunk]
110
114
# PDF chunking
0 commit comments