@@ -24,16 +24,14 @@ class UnitTests(absltest.TestCase):
24
24
def test_tokens_context_window (self ):
25
25
# [START tokens_context_window]
26
26
model_info = genai .get_model ("models/gemini-1.0-pro-001" )
27
- # Returns the "context window" for the model (the combined input and output token limits)
27
+
28
+ # Returns the "context window" for the model,
29
+ # which is the combined input and output token limits.
28
30
print (f"{ model_info .input_token_limit = } " )
29
31
print (f"{ model_info .output_token_limit = } " )
32
+ # ( input_token_limit=30720, output_token_limit=2048 )
30
33
# [END tokens_context_window]
31
34
32
- # [START tokens_context_window_return]
33
- # input_token_limit=30720
34
- # output_token_limit=2048
35
- # [END tokens_context_window_return]
36
-
37
35
def test_tokens_text_only (self ):
38
36
# [START tokens_text_only]
39
37
model = genai .GenerativeModel ("models/gemini-1.5-flash" )
@@ -42,22 +40,18 @@ def test_tokens_text_only(self):
42
40
43
41
# Call `count_tokens` to get the input token count (`total_tokens`).
44
42
print ("total_tokens: " , model .count_tokens (prompt ))
43
+ # ( total_tokens: 10 )
45
44
46
45
response = model .generate_content (prompt )
47
46
48
- # Use `usage_metadata` to get both input and output token counts
49
- # (`prompt_token_count` and `candidates_token_count`, respectively).
47
+ # On the response for `generate_content`, use `usage_metadata`
48
+ # to get separate input and output token counts
49
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
50
+ # as well as the combined token count (`total_token_count`).
50
51
print (response .usage_metadata )
52
+ # ( prompt_token_count: 11, candidates_token_count: 73, total_token_count: 84 )
51
53
# [END tokens_text_only]
52
54
53
- # [START tokens_text_only_return]
54
- # total_tokens: total_tokens: 10
55
- #
56
- # prompt_token_count: 11
57
- # candidates_token_count: 73
58
- # total_token_count: 84
59
- # [END tokens_text_only_return]
60
-
61
55
def test_tokens_chat (self ):
62
56
# [START tokens_chat]
63
57
model = genai .GenerativeModel ("models/gemini-1.5-flash" )
@@ -70,30 +64,26 @@ def test_tokens_chat(self):
70
64
)
71
65
# Call `count_tokens` to get the input token count (`total_tokens`).
72
66
print (model .count_tokens (chat .history ))
67
+ # ( total_tokens: 10 )
73
68
74
69
response = chat .send_message (
75
70
"In one sentence, explain how a computer works to a young child."
76
71
)
77
- # Use `usage_metadata` to get both input and output token counts
78
- # (`prompt_token_count` and `candidates_token_count`, respectively).
72
+
73
+ # On the response for `send_message`, use `usage_metadata`
74
+ # to get separate input and output token counts
75
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
76
+ # as well as the combined token count (`total_token_count`).
79
77
print (response .usage_metadata )
78
+ # ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )
80
79
81
- # TODO add comment...
82
80
from google .generativeai .types .content_types import to_contents
83
81
82
+ # You can call `count_tokens` on the combined history and content of the next turn.
84
83
print (model .count_tokens (chat .history + to_contents ("What is the meaning of life?" )))
84
+ # ( total_tokens: 56 )
85
85
# [END tokens_chat]
86
86
87
- # [START tokens_chat_return]
88
- # total_tokens: 10
89
- #
90
- # prompt_token_count: 25
91
- # candidates_token_count: 21
92
- # total_token_count: 46
93
- #
94
- # total_tokens: 56
95
- # [END tokens_chat_return]
96
-
97
87
def test_tokens_multimodal_image_inline (self ):
98
88
# [START tokens_multimodal_image_inline]
99
89
import PIL .Image
@@ -103,52 +93,47 @@ def test_tokens_multimodal_image_inline(self):
103
93
prompt = "Tell me about this image"
104
94
your_image_file = PIL .Image .open ("image.jpg" )
105
95
106
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
107
- # An image's display size does not affect its token count.
108
- # Optionally, you can call `count_tokens` for the prompt and file separately.
96
+ # Call `count_tokens` to get the input token count
97
+ # of the combined text and file (`total_tokens`).
98
+ # An image's display or file size does not affect its token count.
99
+ # Optionally, you can call `count_tokens` for the text and file separately.
109
100
print (model .count_tokens ([prompt , your_image_file ]))
101
+ # ( total_tokens: 263 )
110
102
111
103
response = model .generate_content ([prompt , your_image_file ])
112
- # Use `usage_metadata` to get both input and output token counts
113
- # (`prompt_token_count` and `candidates_token_count`, respectively).
104
+
105
+ # On the response for `generate_content`, use `usage_metadata`
106
+ # to get separate input and output token counts
107
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
108
+ # as well as the combined token count (`total_token_count`).
114
109
print (response .usage_metadata )
110
+ # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
115
111
# [END tokens_multimodal_image_inline]
116
112
117
- # [START tokens_multimodal_image_inline_return]
118
- # total_tokens: 263
119
- #
120
- # prompt_token_count: 264
121
- # candidates_token_count: 81
122
- # total_token_count: 345
123
- # [END tokens_multimodal_image_inline_return]
124
-
125
113
def test_tokens_multimodal_image_file_api (self ):
126
114
# [START tokens_multimodal_image_file_api]
127
115
model = genai .GenerativeModel ("models/gemini-1.5-flash" )
128
116
129
117
prompt = "Tell me about this image"
130
118
your_image_file = genai .upload_file (path = "image.jpg" )
131
119
132
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
133
- # An image's display size does not affect its token count.
134
- # Optionally, you can call `count_tokens` for the prompt and file separately.
120
+ # Call `count_tokens` to get the input token count
121
+ # of the combined text and file (`total_tokens`).
122
+ # An image's display or file size does not affect its token count.
123
+ # Optionally, you can call `count_tokens` for the text and file separately.
135
124
print (model .count_tokens ([prompt , your_image_file ]))
125
+ # ( total_tokens: 263 )
136
126
137
127
response = model .generate_content ([prompt , your_image_file ])
138
128
response .text
139
- # Use `usage_metadata` to get both input and output token counts
140
- # (`prompt_token_count` and `candidates_token_count`, respectively).
129
+ # On the response for `generate_content`, use `usage_metadata`
130
+ # to get separate input and output token counts
131
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
132
+ # as well as the combined token count (`total_token_count`).
141
133
print (response .usage_metadata )
134
+ # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
142
135
# [END tokens_multimodal_image_file_api]
143
136
144
- # [START tokens_multimodal_image_file_api_return]
145
- # total_tokens: 263
146
- #
147
- # prompt_token_count: 264
148
- # candidates_token_count: 80
149
- # total_token_count: 344
150
- # [END tokens_multimodal_image_file_api_return]
151
-
152
137
def test_tokens_multimodal_video_audio_file_api (self ):
153
138
# [START tokens_multimodal_video_audio_file_api]
154
139
import time
@@ -164,28 +149,24 @@ def test_tokens_multimodal_video_audio_file_api(self):
164
149
time .sleep (5 )
165
150
your_file = genai .get_file (your_file .name )
166
151
167
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
152
+ # Call `count_tokens` to get the input token count
153
+ # of the combined text and video/audio file (`total_tokens`).
168
154
# A video or audio file is converted to tokens at a fixed rate of tokens per second.
169
- # Optionally, you can call `count_tokens` for the prompt and file separately.
155
+ # Optionally, you can call `count_tokens` for the text and file separately.
170
156
print (model .count_tokens ([prompt , your_file ]))
157
+ # ( total_tokens: 300 )
171
158
172
159
response = model .generate_content ([prompt , your_file ])
173
160
174
- # Use `usage_metadata` to get both input and output token counts
175
- # (`prompt_token_count` and `candidates_token_count`, respectively).
161
+ # On the response for `generate_content`, use `usage_metadata`
162
+ # to get separate input and output token counts
163
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
164
+ # as well as the combined token count (`total_token_count`).
176
165
print (response .usage_metadata )
166
+ # ( prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 )
177
167
178
168
# [END tokens_multimodal_video_audio_file_api]
179
169
180
- # [START tokens_multimodal_video_audio_file_api_return]
181
- # processing video...
182
- # total_tokens: 300
183
- #
184
- # prompt_token_count: 301
185
- # candidates_token_count: 60
186
- # total_token_count: 361
187
- # [END tokens_multimodal_video_audio_file_api_return]
188
-
189
170
def test_tokens_cached_content (self ):
190
171
# [START tokens_cached_content]
191
172
import time
@@ -196,71 +177,63 @@ def test_tokens_cached_content(self):
196
177
197
178
cache = genai .caching .CachedContent .create (
198
179
model = "models/gemini-1.5-flash-001" ,
199
- # You could set the system_instruction and tools
180
+ # You can set the system_instruction and tools
200
181
system_instruction = None ,
201
182
tools = None ,
202
183
contents = ["Here the Apollo 11 transcript:" , your_file ],
203
184
)
204
185
205
186
model = genai .GenerativeModel .from_cached_content (cache )
206
187
207
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
208
- # A video or audio file is converted to tokens at a fixed rate of tokens per second.
209
- # Optionally, you can call `count_tokens` for the prompt and file separately.
210
188
prompt = "Please give a short summary of this file."
189
+
190
+ # Call `count_tokens` to get input token count
191
+ # of the combined text and file (`total_tokens`).
192
+ # A video or audio file is converted to tokens at a fixed rate of tokens per second.
193
+ # Optionally, you can call `count_tokens` for the text and file separately.
211
194
print (model .count_tokens (prompt ))
195
+ # ( total_tokens: 9 )
212
196
213
197
response = model .generate_content (prompt )
214
- # Use `usage_metadata` to get both input and output token counts
215
- # (`prompt_token_count` and `candidates_token_count`, respectively).
198
+
199
+ # On the response for `generate_content`, use `usage_metadata`
200
+ # to get separate input and output token counts
201
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
202
+ # as well as the cached content token count and the combined total token count.
216
203
print (response .usage_metadata )
204
+ # ( prompt_token_count: 323393, cached_content_token_count: 323383, candidates_token_count: 64)
205
+ # ( total_token_count: 323457 )
217
206
218
207
cache .delete ()
219
208
# [END tokens_cached_content]
220
209
221
- # [START tokens_cached_content_return]
222
- # total_tokens: 9
223
- #
224
- # prompt_token_count: 323393
225
- # cached_content_token_count: 323383
226
- # candidates_token_count: 64
227
- # total_token_count: 323457
228
- # [END tokens_cached_content_return]
229
-
230
210
def test_tokens_system_instruction (self ):
231
211
# [START tokens_system_instruction]
232
212
model = genai .GenerativeModel (model_name = "gemini-1.5-flash" )
233
213
234
- # The total token count includes everything sent to the generate_content request.
235
- print (model .count_tokens ("The quick brown fox jumps over the lazy dog." ))
214
+ prompt = "The quick brown fox jumps over the lazy dog."
215
+
216
+ print (model .count_tokens (prompt ))
236
217
# total_tokens: 10
237
218
238
219
model = genai .GenerativeModel (
239
220
model_name = "gemini-1.5-flash" , system_instruction = "You are a cat. Your name is Neko."
240
221
)
241
222
242
- # The total token count includes everything sent to the generate_content request.
223
+ # The total token count includes everything sent to the ` generate_content` request.
243
224
# When you use system instructions, the total token count increases.
244
- print (model .count_tokens ("The quick brown fox jumps over the lazy dog." ))
225
+ print (model .count_tokens (prompt ))
226
+ # ( total_tokens: 21 )
245
227
# [END tokens_system_instruction]
246
228
247
- # [START tokens_system_instruction_return]
248
- # total_tokens: 10
249
- #
250
- # total_tokens: 21
251
- # [END tokens_system_instruction_return]
252
-
253
229
def test_tokens_tools (self ):
254
230
# [START tokens_tools]
255
231
model = genai .GenerativeModel (model_name = "gemini-1.5-flash" )
256
232
257
- # The total token count includes everything sent to the generate_content request.
258
- print (
259
- model .count_tokens (
260
- "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
261
- )
262
- )
263
- # total_tokens: 10
233
+ prompt = "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
234
+
235
+ print (model .count_tokens (prompt ))
236
+ # ( total_tokens: 22 )
264
237
265
238
def add (a : float , b : float ):
266
239
"""returns a + b."""
@@ -282,19 +255,12 @@ def divide(a: float, b: float):
282
255
"models/gemini-1.5-flash-001" , tools = [add , subtract , multiply , divide ]
283
256
)
284
257
285
- print (
286
- model .count_tokens (
287
- "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
288
- )
289
- )
258
+ # The total token count includes everything sent to the `generate_content` request.
259
+ # When you use tools (like function calling), the total token count increases.
260
+ print (model .count_tokens (prompt ))
261
+ # ( total_tokens: 206 )
290
262
# [END tokens_tools]
291
263
292
- # [START tokens_tools_return]
293
- # total_tokens: 22
294
- #
295
- # total_tokens: 206
296
- # [END tokens_tools_return]
297
-
298
264
299
265
if __name__ == "__main__" :
300
266
absltest .main ()
0 commit comments