Skip to content

Commit b6bd3ca

Browse files
Count tokens samples (#403)
* Skeleton for samples of count tokens * Add skeleton for count_tokens * Add media samples and update count_tokens samples. Still need to work on caching * Updated all count_tokens functions * Updated all count_tokens functions * Move cache.delete() after comment * Update count tokens samples
1 parent 1ffbcb3 commit b6bd3ca

File tree

3 files changed

+121
-0
lines changed

3 files changed

+121
-0
lines changed

samples/count_tokens.py

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
# -*- coding: utf-8 -*-
2+
# Copyright 2023 Google LLC
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
from absl.testing import absltest
16+
17+
import google.generativeai as genai
18+
import pathlib
19+
20+
media = pathlib.Path(__file__).parents[1] / "third_party"
21+
22+
23+
24+
25+
26+
class UnitTests(absltest.TestCase):
27+
def test_tokens_text_only(self):
28+
# [START tokens_text_only]
29+
model = genai.GenerativeModel("models/gemini-1.5-flash")
30+
print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
31+
# [END tokens_text_only]
32+
33+
def test_tokens_chat(self):
34+
# [START tokens_chat]
35+
model = genai.GenerativeModel("models/gemini-1.5-flash")
36+
chat = model.start_chat(
37+
history=[
38+
{"role": "user", "parts": "Hi, my name is Bob."},
39+
{"role": "model", "parts": "Hi Bob!"},
40+
]
41+
)
42+
model.count_tokens(chat.history)
43+
44+
from google.generativeai.types.content_types import to_contents
45+
46+
model.count_tokens(chat.history + to_contents("What is the meaning of life?"))
47+
# [END tokens_chat]
48+
49+
def test_tokens_multimodal_image_inline(self):
50+
# [START tokens_multimodal_image_inline]
51+
model = genai.GenerativeModel("models/gemini-1.5-flash")
52+
import PIL
53+
54+
organ = PIL.Image.open(media / "organ.jpg")
55+
print(model.count_tokens(["Tell me about this instrument", organ]))
56+
# [END tokens_multimodal_image_inline]
57+
58+
def test_tokens_multimodal_image_file_api(self):
59+
# [START tokens_multimodal_image_file_api]
60+
model = genai.GenerativeModel("models/gemini-1.5-flash")
61+
organ_upload = genai.upload_file(media / "organ.jpg")
62+
print(model.count_tokens(["Tell me about this instrument", organ_upload]))
63+
# [END tokens_multimodal_image_file_api]
64+
65+
def test_tokens_video_audio_file_api(self):
66+
# [START tokens_video_audio_file_api]
67+
model = genai.GenerativeModel("models/gemini-1.5-flash")
68+
audio_upload = genai.upload_file(media / "sample.mp3")
69+
print(model.count_tokens(audio_upload))
70+
# [END tokens_video_audio_file_api]
71+
72+
def test_tokens_cached_content(self):
73+
# [START tokens_cached_content]
74+
document = genai.upload_file(path=media / "a11.txt")
75+
model_name = "gemini-1.5-flash-001"
76+
cache = genai.caching.CachedContent.create(
77+
model=model_name,
78+
contents=[document],
79+
)
80+
print(genai.GenerativeModel().count_tokens(cache))
81+
# [END tokens_cached_content]
82+
cache.delete() # Clear
83+
84+
def test_tokens_system_instruction(self):
85+
# [START tokens_system_instruction]
86+
document = genai.upload_file(path=media / "a11.txt")
87+
model = genai.GenerativeModel("models/gemini-1.5-flash-001",
88+
system_instruction="You are an expert analyzing transcripts. Give a summary of this document.")
89+
print(model.count_tokens(document))
90+
# [END tokens_system_instruction]
91+
92+
def test_tokens_tools(self):
93+
# [START tokens_tools]
94+
def add(a: float, b: float):
95+
"""returns a + b."""
96+
return a + b
97+
98+
99+
def subtract(a: float, b: float):
100+
"""returns a - b."""
101+
return a - b
102+
103+
104+
def multiply(a: float, b: float):
105+
"""returns a * b."""
106+
return a * b
107+
108+
109+
def divide(a: float, b: float):
110+
"""returns a / b."""
111+
return a / b
112+
113+
model = genai.GenerativeModel("models/gemini-1.5-flash-001",
114+
tools=[add, subtract, multiply, divide])
115+
116+
print(model.count_tokens("I have 57 cats, each owns 44 mittens, how many mittens is that in total?"))
117+
# [END tokens_tools]
118+
119+
120+
if __name__ == "__main__":
121+
absltest.main()

third_party/organ.jpg

375 KB
Loading

third_party/sample.mp3

39.8 MB
Binary file not shown.

0 commit comments

Comments
 (0)