@@ -73,7 +73,10 @@ with LangCache(
7373 service_key = os.getenv(" LANGCACHE_SERVICE_KEY" , " " ),
7474) as lang_cache:
7575
76- res = lang_cache.search(prompt = " User prompt text" , similarity_threshold = 0.9 )
76+ res = lang_cache.search(
77+ prompt = " User prompt text" ,
78+ similarity_threshold = 0.9
79+ )
7780
7881 print (res)
7982```
@@ -173,14 +176,17 @@ Use [`POST /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai/langcache/api-
173176 tab1="REST API"
174177 tab2="Python"
175178 tab3="Javascript" >}}
179+
176180``` sh
177181POST https://[host]/v1/caches/{cacheId}/entries
178182{
179183 " prompt" : " User prompt text" ,
180184 " response" : " LLM response text"
181185}
182186```
187+
183188-tab-sep-
189+
184190``` python
185191from langcache import LangCache
186192import os
@@ -199,7 +205,9 @@ with LangCache(
199205
200206 print (res)
201207```
208+
202209-tab-sep-
210+
203211``` js
204212import { LangCache } from " @redis-ai/langcache" ;
205213
@@ -220,6 +228,7 @@ async function run() {
220228
221229run ();
222230```
231+
223232{{< /multitabs >}}
224233
225234Place this call in your client app after you get a response from the LLM. This will store the response in the cache for future use.
@@ -242,6 +251,7 @@ POST https://[host]/v1/caches/{cacheId}/entries
242251}
243252```
244253-tab-sep-
254+
245255``` python
246256from langcache import LangCache
247257import os
@@ -261,8 +271,33 @@ with LangCache(
261271
262272 print (res)
263273```
274+
264275-tab-sep-
265276
277+ ``` js
278+ import { LangCache } from " @redis-ai/langcache" ;
279+
280+ const langCache = new LangCache ({
281+ serverURL: " https://<host>" ,
282+ cacheId: " <cacheId>" ,
283+ serviceKey: " <LANGCACHE_SERVICE_KEY>" ,
284+ });
285+
286+ async function run () {
287+ const result = await langCache .set ({
288+ prompt: " User prompt text" ,
289+ response: " LLM response text" ,
290+ attributes: {
291+ " customAttributeName" : " customAttributeValue" ,
292+ },
293+ });
294+
295+ console .log (result);
296+ }
297+
298+ run ();
299+ ```
300+
266301{{< /multitabs >}}
267302
268303### Delete cached responses
@@ -278,6 +313,7 @@ Use [`DELETE /v1/caches/{cacheId}/entries/{entryId}`]({{< relref "/develop/ai/la
278313DELETE https://[host]/v1/caches/{cacheId}/entries/{entryId}
279314```
280315-tab-sep-
316+
281317``` python
282318from langcache import LangCache
283319import os
@@ -293,7 +329,9 @@ with LangCache(
293329
294330 print (res)
295331```
332+
296333-tab-sep-
334+
297335``` js
298336import { LangCache } from " @redis-ai/langcache" ;
299337
@@ -313,6 +351,7 @@ async function run() {
313351
314352run ();
315353```
354+
316355{{< /multitabs >}}
317356
318357You can also use [ ` DELETE /v1/caches/{cacheId}/entries ` ] ({{< relref "/develop/ai/langcache/api-reference#tag/Cache-Entries/operation/deleteQuery" >}}) to delete multiple cached responses based on the ` attributes ` you specify. If you specify multiple ` attributes ` , LangCache will delete entries that contain all given attributes.
@@ -321,6 +360,8 @@ You can also use [`DELETE /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai
321360If you do not specify any ` attributes ` , all responses in the cache will be deleted. This cannot be undone.
322361{{< /warning >}}
323362
363+ <br />
364+
324365{{< multitabs id="delete-attributes"
325366 tab1="REST API"
326367 tab2="Python"
@@ -334,7 +375,9 @@ DELETE https://[host]/v1/caches/{cacheId}/entries
334375 }
335376}
336377```
378+
337379-tab-sep-
380+
338381``` python
339382from langcache import LangCache
340383import os
@@ -352,7 +395,9 @@ with LangCache(
352395
353396 print (res)
354397```
398+
355399-tab-sep-
400+
356401``` js
357402import { LangCache } from " @redis-ai/langcache" ;
358403
@@ -374,5 +419,6 @@ async function run() {
374419
375420run ();
376421```
422+
377423{{< /multitabs >}}
378424
0 commit comments