Skip to content

Commit 23ce3bd

Browse files
Add cache_from_name and cache_chat (#413)
* Add cache_from_name and cache_chat * Resolve comments. Change-Id: I74aa097499fc426e4e39327c2ffcdcf3f68534dd * update cache sample Change-Id: I1b261cdcca6e564471bb8ca5a59e9138d4a5f253 --------- Co-authored-by: Mark Daoust <[email protected]>
1 parent e1c31d7 commit 23ce3bd

File tree

1 file changed

+56
-1
lines changed

1 file changed

+56
-1
lines changed

samples/cache.py

Lines changed: 56 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,10 @@ def test_cache_create(self):
3232
contents=[document],
3333
)
3434
print(cache)
35+
36+
model = genai.GenerativeModel.from_cached_content(cache)
37+
response = model.generate_content("Please summarize this transcript")
38+
print(response.text)
3539
# [END cache_create]
3640
cache.delete()
3741

@@ -92,10 +96,61 @@ def test_cache_update(self):
9296
print(f"After update:\n {cache}")
9397

9498
# Or you can update the expire_time
95-
cache.update(expire_time=datetime.now() + datetime.timedelta(minutes=15))
99+
cache.update(expire_time=datetime.datetime.now() + datetime.timedelta(minutes=15))
96100
# [END cache_update]
97101
cache.delete()
98102

103+
def test_cache_create_from_name(self):
104+
# [START cache_create_from_name]
105+
document = genai.upload_file(path=media / "a11.txt")
106+
model_name = "gemini-1.5-flash-001"
107+
cache = genai.caching.CachedContent.create(
108+
model=model_name,
109+
system_instruction="You are an expert analyzing transcripts.",
110+
contents=[document],
111+
)
112+
cache_name = cache.name # Save the name for later
113+
114+
# Later
115+
cache = genai.caching.CachedContent.get(cache_name)
116+
apollo_model = genai.GenerativeModel.from_cached_content(cache)
117+
response = apollo_model.generate_content("Find a lighthearted moment from this transcript")
118+
print(response.text)
119+
# [END cache_create_from_name]
120+
cache.delete()
121+
122+
def test_cache_chat(self):
123+
# [START cache_chat]
124+
model_name = "gemini-1.5-flash-001"
125+
system_instruction = "You are an expert analyzing transcripts."
126+
127+
model = genai.GenerativeModel(model_name=model_name, system_instruction=system_instruction)
128+
chat = model.start_chat()
129+
document = genai.upload_file(path=media / "a11.txt")
130+
response = chat.send_message(["Hi, could you summarize this transcript?", document])
131+
print("\n\nmodel: ", response.text)
132+
response = chat.send_message(
133+
["Okay, could you tell me more about the trans-lunar injection"]
134+
)
135+
print("\n\nmodel: ", response.text)
136+
137+
# To cache the conversation so far, pass the chat history as the list of "contents".
138+
cache = genai.caching.CachedContent.create(
139+
model=model_name,
140+
system_instruction=system_instruction,
141+
contents=chat.history,
142+
)
143+
model = genai.GenerativeModel.from_cached_content(cached_content=cache)
144+
145+
# Continue the chat where you left off.
146+
chat = model.start_chat()
147+
response = chat.send_message(
148+
"I didn't understand that last part, could you explain it in simpler language?"
149+
)
150+
print("\n\nmodel: ", response.text)
151+
# [END cache_chat]
152+
cache.delete()
153+
99154

100155
if __name__ == "__main__":
101156
absltest.main()

0 commit comments

Comments
 (0)