@@ -32,6 +32,10 @@ def test_cache_create(self):
32
32
contents = [document ],
33
33
)
34
34
print (cache )
35
+
36
+ model = genai .GenerativeModel .from_cached_content (cache )
37
+ response = model .generate_content ("Please summarize this transcript" )
38
+ print (response .text )
35
39
# [END cache_create]
36
40
cache .delete ()
37
41
@@ -92,10 +96,61 @@ def test_cache_update(self):
92
96
print (f"After update:\n { cache } " )
93
97
94
98
# Or you can update the expire_time
95
- cache .update (expire_time = datetime .now () + datetime .timedelta (minutes = 15 ))
99
+ cache .update (expire_time = datetime .datetime . now () + datetime .timedelta (minutes = 15 ))
96
100
# [END cache_update]
97
101
cache .delete ()
98
102
103
+ def test_cache_create_from_name (self ):
104
+ # [START cache_create_from_name]
105
+ document = genai .upload_file (path = media / "a11.txt" )
106
+ model_name = "gemini-1.5-flash-001"
107
+ cache = genai .caching .CachedContent .create (
108
+ model = model_name ,
109
+ system_instruction = "You are an expert analyzing transcripts." ,
110
+ contents = [document ],
111
+ )
112
+ cache_name = cache .name # Save the name for later
113
+
114
+ # Later
115
+ cache = genai .caching .CachedContent .get (cache_name )
116
+ apollo_model = genai .GenerativeModel .from_cached_content (cache )
117
+ response = apollo_model .generate_content ("Find a lighthearted moment from this transcript" )
118
+ print (response .text )
119
+ # [END cache_create_from_name]
120
+ cache .delete ()
121
+
122
+ def test_cache_chat (self ):
123
+ # [START cache_chat]
124
+ model_name = "gemini-1.5-flash-001"
125
+ system_instruction = "You are an expert analyzing transcripts."
126
+
127
+ model = genai .GenerativeModel (model_name = model_name , system_instruction = system_instruction )
128
+ chat = model .start_chat ()
129
+ document = genai .upload_file (path = media / "a11.txt" )
130
+ response = chat .send_message (["Hi, could you summarize this transcript?" , document ])
131
+ print ("\n \n model: " , response .text )
132
+ response = chat .send_message (
133
+ ["Okay, could you tell me more about the trans-lunar injection" ]
134
+ )
135
+ print ("\n \n model: " , response .text )
136
+
137
+ # To cache the conversation so far, pass the chat history as the list of "contents".
138
+ cache = genai .caching .CachedContent .create (
139
+ model = model_name ,
140
+ system_instruction = system_instruction ,
141
+ contents = chat .history ,
142
+ )
143
+ model = genai .GenerativeModel .from_cached_content (cached_content = cache )
144
+
145
+ # Continue the chat where you left off.
146
+ chat = model .start_chat ()
147
+ response = chat .send_message (
148
+ "I didn't understand that last part, could you explain it in simpler language?"
149
+ )
150
+ print ("\n \n model: " , response .text )
151
+ # [END cache_chat]
152
+ cache .delete ()
153
+
99
154
100
155
if __name__ == "__main__" :
101
156
absltest .main ()
0 commit comments