From 2a263bf9c327185e7543e6f3e0a10bd8130882fa Mon Sep 17 00:00:00 2001 From: Cameron Bates Date: Tue, 22 Jul 2025 15:22:34 -0400 Subject: [PATCH 1/3] remove SDK section and move info to the top of the page --- content/develop/ai/langcache/api-examples.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/content/develop/ai/langcache/api-examples.md b/content/develop/ai/langcache/api-examples.md index 7c94eeba2e..db8eac605c 100644 --- a/content/develop/ai/langcache/api-examples.md +++ b/content/develop/ai/langcache/api-examples.md @@ -40,9 +40,12 @@ curl -s -X POST "https://$HOST/v1/caches/$CACHE_ID/entires/search" \ This example uses `cURL` and Linux shell scripts to demonstrate the API; you can use any standard REST client or library. {{% /info %}} -You can also use the [LangCache SDKs](#langcache-sdk) for Javascript and Python to access the API. +If your app is written in Javascript or Python, you can also use the LangCache Software Development Kits (SDKs) to access the API: -## API examples +- [LangCache SDK for Javascript](https://www.npmjs.com/package/@redis-ai/langcache) +- [LangCache SDK for Python](https://pypi.org/project/langcache/) + +## Examples ### Search LangCache for similar responses @@ -112,11 +115,4 @@ DELETE https://[host]/v1/caches/{cacheId}/entries } } ``` -## LangCache SDK - -If your app is written in Javascript or Python, you can also use the LangCache Software Development Kits (SDKs) to access the API. -To learn how to use the LangCache SDKs: - -- [LangCache SDK for Javascript](https://www.npmjs.com/package/@redis-ai/langcache) -- [LangCache SDK for Python](https://pypi.org/project/langcache/) From 1c71062f0dd1ba0a1330094b7e01e5c6fe51c5ce Mon Sep 17 00:00:00 2001 From: Cameron Bates Date: Tue, 22 Jul 2025 15:58:07 -0400 Subject: [PATCH 2/3] Add multitabbed code examples --- content/develop/ai/langcache/api-examples.md | 274 ++++++++++++++++++- 1 file changed, 267 insertions(+), 7 deletions(-) diff --git a/content/develop/ai/langcache/api-examples.md b/content/develop/ai/langcache/api-examples.md index db8eac605c..1c97d63f72 100644 --- a/content/develop/ai/langcache/api-examples.md +++ b/content/develop/ai/langcache/api-examples.md @@ -40,30 +40,76 @@ curl -s -X POST "https://$HOST/v1/caches/$CACHE_ID/entires/search" \ This example uses `cURL` and Linux shell scripts to demonstrate the API; you can use any standard REST client or library. {{% /info %}} -If your app is written in Javascript or Python, you can also use the LangCache Software Development Kits (SDKs) to access the API: +If your app is written in Python or Javascript, you can also use the LangCache Software Development Kits (SDKs) to access the API: -- [LangCache SDK for Javascript](https://www.npmjs.com/package/@redis-ai/langcache) - [LangCache SDK for Python](https://pypi.org/project/langcache/) +- [LangCache SDK for Javascript](https://www.npmjs.com/package/@redis-ai/langcache) ## Examples ### Search LangCache for similar responses -Use `POST /v1/caches/{cacheId}/entries/search` to search the cache for matching responses to a user prompt. +Use [`POST /v1/caches/{cacheId}/entries/search`]({{< relref "/develop/ai/langcache/api-reference#tag/Cache-Entries/operation/search" >}}}) to search the cache for matching responses to a user prompt. +{{< multitabs id="search-basic" + tab1="REST API" + tab2="Python" + tab3="Javascript" >}} ```sh POST https://[host]/v1/caches/{cacheId}/entries/search { "prompt": "User prompt text" } ``` +-tab-sep- +```python +from langcache import LangCache +import os + + +with LangCache( + server_url="https://", + cache_id="", + service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), +) as lang_cache: + + res = lang_cache.search(prompt="User prompt text", similarity_threshold=0.9) + + print(res) +``` +-tab-sep- +```js +import { LangCache } from "@redis-ai/langcache"; + +const langCache = new LangCache({ + serverURL: "https://", + cacheId: "", + serviceKey: "", +}); + +async function run() { + const result = await langCache.search({ + prompt: "User prompt text", + similarityThreshold: 0.9 + }); + + console.log(result); +} + +run(); +``` +{{< /multitabs >}} Place this call in your client app right before you call your LLM's REST API. If LangCache returns a response, you can send that response back to the user instead of calling the LLM. If LangCache does not return a response, you should call your LLM's REST API to generate a new response. After you get a response from the LLM, you can [store it in LangCache](#store-a-new-response-in-langcache) for future use. -You can also scope the responses returned from LangCache by adding an `attributes` object to the request. LangCache will only return responses that match the attributes you specify. +You can also scope the responses returned from LangCache by adding an `attributes` object to the request. LangCache will only return responses that match the attributes you specify. +{{< multitabs id="search-attributes" + tab1="REST API" + tab2="Python" + tab3="Javascript" >}} ```sh POST https://[host]/v1/caches/{cacheId}/entries/search { @@ -73,11 +119,60 @@ POST https://[host]/v1/caches/{cacheId}/entries/search } } ``` +-tab-sep- +```python +from langcache import LangCache +import os + + +with LangCache( + server_url="https://", + cache_id="", + service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), +) as lang_cache: + + res = lang_cache.search( + prompt="User prompt text", + attributes={"customAttributeName": "customAttributeValue"}, + similarity_threshold=0.9, + ) + + print(res) +``` +-tab-sep- +```js +import { LangCache } from "@redis-ai/langcache"; + +const langCache = new LangCache({ + serverURL: "https://", + cacheId: "", + serviceKey: "", +}); + +async function run() { + const result = await langCache.search({ + prompt: "User prompt text", + similarityThreshold: 0.9, + attributes: { + "customAttributeName": "customAttributeValue", + }, + }); + + console.log(result); +} + +run(); +``` +{{< /multitabs >}} ### Store a new response in LangCache -Use `POST /v1/caches/{cacheId}/entries` to store a new response in the cache. +Use [`POST /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai/langcache/api-reference#tag/Cache-Entries/operation/set" >}}) to store a new response in the cache. +{{< multitabs id="store-basic" + tab1="REST API" + tab2="Python" + tab3="Javascript" >}} ```sh POST https://[host]/v1/caches/{cacheId}/entries { @@ -85,11 +180,57 @@ POST https://[host]/v1/caches/{cacheId}/entries "response": "LLM response text" } ``` +-tab-sep- +```python +from langcache import LangCache +import os + + +with LangCache( + server_url="https://[host]", + cache_id="{cacheId}", + service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), +) as lang_cache: + + res = lang_cache.set( + prompt="User prompt text", + response="LLM response text", + ) + + print(res) +``` +-tab-sep- +```js +import { LangCache } from "@redis-ai/langcache"; + +const langCache = new LangCache({ + serverURL: "https://", + cacheId: "", + serviceKey: "", +}); + +async function run() { + const result = await langCache.set({ + prompt: "User prompt text", + response: "LLM response text", + }); + + console.log(result); +} + +run(); +``` +{{< /multitabs >}} Place this call in your client app after you get a response from the LLM. This will store the response in the cache for future use. You can also store the responses with custom attributes by adding an `attributes` object to the request. +{{< multitabs id="store-attributes" + tab1="REST API" + tab2="Python" + tab3="Javascript" >}} + ```sh POST https://[host]/v1/caches/{cacheId}/entries { @@ -100,12 +241,90 @@ POST https://[host]/v1/caches/{cacheId}/entries } } ``` +-tab-sep- +```python +from langcache import LangCache +import os + + +with LangCache( + server_url="https://[host]", + cache_id="{cacheId}", + service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), +) as lang_cache: + + res = lang_cache.set( + prompt="User prompt text", + response="LLM response text", + attributes={"customAttributeName": "customAttributeValue"}, + ) + + print(res) +``` +-tab-sep- + +{{< /multitabs >}} ### Delete cached responses -Use `DELETE /v1/caches/{cacheId}/entries/{entryId}` to delete a cached response from the cache. +Use [`DELETE /v1/caches/{cacheId}/entries/{entryId}`]({{< relref "/develop/ai/langcache/api-reference#tag/Cache-Entries/operation/delete" >}}) to delete a cached response from the cache. -You can also use `DELETE /v1/caches/{cacheId}/entries` to delete multiple cached responses at once. If you provide an `attributes` object, LangCache will delete all responses that match the attributes you specify. +{{< multitabs id="delete-entry" + tab1="REST API" + tab2="Python" + tab3="Javascript" >}} + +```sh +DELETE https://[host]/v1/caches/{cacheId}/entries/{entryId} +``` +-tab-sep- +```python +from langcache import LangCache +import os + + +with LangCache( + server_url="https://[host]", + cache_id="{cacheId}", + service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), +) as lang_cache: + + res = lang_cache.delete_by_id(entry_id="{entryId}") + + print(res) +``` +-tab-sep- +```js +import { LangCache } from "@redis-ai/langcache"; + +const langCache = new LangCache({ + serverURL: "https://", + cacheId: "", + serviceKey: "", +}); + +async function run() { + const result = await langCache.deleteById({ + entryId: "", + }); + + console.log(result); +} + +run(); +``` +{{< /multitabs >}} + +You can also use [`DELETE /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai/langcache/api-reference#tag/Cache-Entries/operation/deleteQuery" >}}) to delete multiple cached responses based on the `attributes` you specify. If you specify multiple `attributes`, LangCache will delete entries that contain all given attributes. + +{{< warning >}} +If you do not specify any `attributes`, all responses in the cache will be deleted. This cannot be undone. +{{< /warning >}} + +{{< multitabs id="delete-attributes" + tab1="REST API" + tab2="Python" + tab3="Javascript" >}} ```sh DELETE https://[host]/v1/caches/{cacheId}/entries @@ -115,4 +334,45 @@ DELETE https://[host]/v1/caches/{cacheId}/entries } } ``` +-tab-sep- +```python +from langcache import LangCache +import os + + +with LangCache( + server_url="https://[host]", + cache_id="{cacheId}", + service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), +) as lang_cache: + + res = lang_cache.delete_query( + attributes={"customAttributeName": "customAttributeValue"}, + ) + + print(res) +``` +-tab-sep- +```js +import { LangCache } from "@redis-ai/langcache"; + +const langCache = new LangCache({ + serverURL: "https://", + cacheId: "", + serviceKey: "", +}); + +async function run() { + const result = await langCache.deleteQuery({ + attributes: { + "customAttributeName": "customAttributeValue", + }, + }); + + console.log(result); +} + +run(); +``` +{{< /multitabs >}} From 95fd241f748b9475b7c632877d439e4593d8a942 Mon Sep 17 00:00:00 2001 From: Cameron Bates Date: Tue, 22 Jul 2025 16:10:48 -0400 Subject: [PATCH 3/3] Attempt to fix visual glitches --- content/develop/ai/langcache/api-examples.md | 48 +++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/content/develop/ai/langcache/api-examples.md b/content/develop/ai/langcache/api-examples.md index 1c97d63f72..18d3e0e049 100644 --- a/content/develop/ai/langcache/api-examples.md +++ b/content/develop/ai/langcache/api-examples.md @@ -73,7 +73,10 @@ with LangCache( service_key=os.getenv("LANGCACHE_SERVICE_KEY", ""), ) as lang_cache: - res = lang_cache.search(prompt="User prompt text", similarity_threshold=0.9) + res = lang_cache.search( + prompt="User prompt text", + similarity_threshold=0.9 + ) print(res) ``` @@ -173,6 +176,7 @@ Use [`POST /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai/langcache/api- tab1="REST API" tab2="Python" tab3="Javascript" >}} + ```sh POST https://[host]/v1/caches/{cacheId}/entries { @@ -180,7 +184,9 @@ POST https://[host]/v1/caches/{cacheId}/entries "response": "LLM response text" } ``` + -tab-sep- + ```python from langcache import LangCache import os @@ -199,7 +205,9 @@ with LangCache( print(res) ``` + -tab-sep- + ```js import { LangCache } from "@redis-ai/langcache"; @@ -220,6 +228,7 @@ async function run() { run(); ``` + {{< /multitabs >}} Place this call in your client app after you get a response from the LLM. This will store the response in the cache for future use. @@ -242,6 +251,7 @@ POST https://[host]/v1/caches/{cacheId}/entries } ``` -tab-sep- + ```python from langcache import LangCache import os @@ -261,8 +271,33 @@ with LangCache( print(res) ``` + -tab-sep- +```js +import { LangCache } from "@redis-ai/langcache"; + +const langCache = new LangCache({ + serverURL: "https://", + cacheId: "", + serviceKey: "", +}); + +async function run() { + const result = await langCache.set({ + prompt: "User prompt text", + response: "LLM response text", + attributes: { + "customAttributeName": "customAttributeValue", + }, + }); + + console.log(result); +} + +run(); +``` + {{< /multitabs >}} ### Delete cached responses @@ -278,6 +313,7 @@ Use [`DELETE /v1/caches/{cacheId}/entries/{entryId}`]({{< relref "/develop/ai/la DELETE https://[host]/v1/caches/{cacheId}/entries/{entryId} ``` -tab-sep- + ```python from langcache import LangCache import os @@ -293,7 +329,9 @@ with LangCache( print(res) ``` + -tab-sep- + ```js import { LangCache } from "@redis-ai/langcache"; @@ -313,6 +351,7 @@ async function run() { run(); ``` + {{< /multitabs >}} You can also use [`DELETE /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai/langcache/api-reference#tag/Cache-Entries/operation/deleteQuery" >}}) to delete multiple cached responses based on the `attributes` you specify. If you specify multiple `attributes`, LangCache will delete entries that contain all given attributes. @@ -321,6 +360,8 @@ You can also use [`DELETE /v1/caches/{cacheId}/entries`]({{< relref "/develop/ai If you do not specify any `attributes`, all responses in the cache will be deleted. This cannot be undone. {{< /warning >}} +
+ {{< multitabs id="delete-attributes" tab1="REST API" tab2="Python" @@ -334,7 +375,9 @@ DELETE https://[host]/v1/caches/{cacheId}/entries } } ``` + -tab-sep- + ```python from langcache import LangCache import os @@ -352,7 +395,9 @@ with LangCache( print(res) ``` + -tab-sep- + ```js import { LangCache } from "@redis-ai/langcache"; @@ -374,5 +419,6 @@ async function run() { run(); ``` + {{< /multitabs >}}