@@ -57,14 +57,8 @@ npm install @xenova/transformers
5757
5858In your JavaScript source file, import the required classes:
5959
60- ``` js
61- import * as transformers from ' @xenova/transformers' ;
62- import {
63- VectorAlgorithms ,
64- createClient ,
65- SCHEMA_FIELD_TYPE ,
66- } from ' redis' ;
67- ```
60+ {{< clients-example set="home_query_vec" step="import" lang_filter="Node.js" >}}
61+ {{< /clients-example >}}
6862
6963The ` @xenova/transformers ` module handles embedding models. This example uses the
7064[ ` all-distilroberta-v1 ` ] ( https://huggingface.co/sentence-transformers/all-distilroberta-v1 )
@@ -78,31 +72,15 @@ The `pipe` function generates embeddings. The `pipeOptions` object specifies how
7872[ ` all-distilroberta-v1 ` ] ( https://huggingface.co/sentence-transformers/all-distilroberta-v1 )
7973documentation for details):
8074
81- ``` js
82- let pipe = await transformers .pipeline (
83- ' feature-extraction' , ' Xenova/all-distilroberta-v1'
84- );
85-
86- const pipeOptions = {
87- pooling: ' mean' ,
88- normalize: true ,
89- };
90- ```
75+ {{< clients-example set="home_query_vec" step="pipeline" lang_filter="Node.js" >}}
76+ {{< /clients-example >}}
9177
9278## Create the index
9379
9480First, connect to Redis and remove any existing index named ` vector_idx ` :
9581
96- ``` js
97- const client = createClient ({url: ' redis://localhost:6379' });
98- await client .connect ();
99-
100- try {
101- await client .ft .dropIndex (' vector_idx' );
102- } catch (e) {
103- // Index doesn't exist, which is fine
104- }
105- ```
82+ {{< clients-example set="home_query_vec" step="connect" lang_filter="Node.js" >}}
83+ {{< /clients-example >}}
10684
10785Next, create the index with the following schema:
10886- ` content ` : Text field for the content to index
@@ -117,26 +95,8 @@ Next, create the index with the following schema:
11795 - Float32 values
11896 - 768 dimensions (matching the embedding model)
11997
120- ``` js
121- await client .ft .create (' vector_idx' , {
122- ' content' : {
123- type: SchemaFieldTypes .TEXT ,
124- },
125- ' genre' : {
126- type: SchemaFieldTypes .TAG ,
127- },
128- ' embedding' : {
129- type: SchemaFieldTypes .VECTOR ,
130- TYPE : ' FLOAT32' ,
131- ALGORITHM : VectorAlgorithms .HNSW ,
132- DISTANCE_METRIC : ' L2' ,
133- DIM : 768 ,
134- }
135- }, {
136- ON : ' HASH' ,
137- PREFIX : ' doc:'
138- });
139- ```
98+ {{< clients-example set="home_query_vec" step="create_index" lang_filter="Node.js" >}}
99+ {{< /clients-example >}}
140100
141101## Add data
142102
@@ -149,40 +109,8 @@ For each document:
149109
150110Use ` Promise.all() ` to batch the commands and reduce network round trips:
151111
152- ``` js
153- const sentence1 = ' That is a very happy person' ;
154- const doc1 = {
155- ' content' : sentence1,
156- ' genre' : ' persons' ,
157- ' embedding' : Buffer .from (
158- (await pipe (sentence1, pipeOptions)).data .buffer
159- ),
160- };
161-
162- const sentence2 = ' That is a happy dog' ;
163- const doc2 = {
164- ' content' : sentence2,
165- ' genre' : ' pets' ,
166- ' embedding' : Buffer .from (
167- (await pipe (sentence2, pipeOptions)).data .buffer
168- )
169- };
170-
171- const sentence3 = ' Today is a sunny day' ;
172- const doc3 = {
173- ' content' : sentence3,
174- ' genre' : ' weather' ,
175- ' embedding' : Buffer .from (
176- (await pipe (sentence3, pipeOptions)).data .buffer
177- )
178- };
179-
180- await Promise .all ([
181- client .hSet (' doc:1' , doc1),
182- client .hSet (' doc:2' , doc2),
183- client .hSet (' doc:3' , doc3)
184- ]);
185- ```
112+ {{< clients-example set="home_query_vec" step="add_data" lang_filter="Node.js" >}}
113+ {{< /clients-example >}}
186114
187115## Run a query
188116
@@ -195,27 +123,8 @@ The query returns an array of document objects. Each object contains:
195123- ` id ` : The document's key
196124- ` value ` : An object with fields specified in the ` RETURN ` option
197125
198- ``` js
199- const similar = await client .ft .search (
200- ' vector_idx' ,
201- ' *=>[KNN 3 @embedding $B AS score]' ,
202- {
203- ' PARAMS' : {
204- B : Buffer .from (
205- (await pipe (' That is a happy person' , pipeOptions)).data .buffer
206- ),
207- },
208- ' RETURN' : [' score' , ' content' ],
209- ' DIALECT' : ' 2'
210- },
211- );
212-
213- for (const doc of similar .documents ) {
214- console .log (` ${ doc .id } : '${ doc .value .content } ', Score: ${ doc .value .score } ` );
215- }
216-
217- await client .quit ();
218- ```
126+ {{< clients-example set="home_query_vec" step="query" lang_filter="Node.js" >}}
127+ {{< /clients-example >}}
219128
220129The first run may take longer as it downloads the model data. The output shows results ordered by score (vector distance), with lower scores indicating greater similarity:
221130
@@ -237,78 +146,18 @@ JSON documents support richer data modeling with nested fields. Key differences
237146
238147Create the index with path aliases:
239148
240- ``` js
241- await client .ft .create (' vector_json_idx' , {
242- ' $.content' : {
243- type: SchemaFieldTypes .TEXT ,
244- AS : ' content' ,
245- },
246- ' $.genre' : {
247- type: SchemaFieldTypes .TAG ,
248- AS : ' genre' ,
249- },
250- ' $.embedding' : {
251- type: SchemaFieldTypes .VECTOR ,
252- TYPE : ' FLOAT32' ,
253- ALGORITHM : VectorAlgorithms .HNSW ,
254- DISTANCE_METRIC : ' L2' ,
255- DIM : 768 ,
256- AS : ' embedding' ,
257- }
258- }, {
259- ON : ' JSON' ,
260- PREFIX : ' jdoc:'
261- });
262- ```
149+ {{< clients-example set="home_query_vec" step="json_index" lang_filter="Node.js" >}}
150+ {{< /clients-example >}}
263151
264152Add data using ` json.set() ` . Convert the ` Float32Array ` to a standard JavaScript array using the spread operator:
265153
266- ``` js
267- const jSentence1 = ' That is a very happy person' ;
268- const jdoc1 = {
269- ' content' : jSentence1,
270- ' genre' : ' persons' ,
271- ' embedding' : [... (await pipe (jSentence1, pipeOptions)).data ],
272- };
273-
274- const jSentence2 = ' That is a happy dog' ;
275- const jdoc2 = {
276- ' content' : jSentence2,
277- ' genre' : ' pets' ,
278- ' embedding' : [... (await pipe (jSentence2, pipeOptions)).data ],
279- };
280-
281- const jSentence3 = ' Today is a sunny day' ;
282- const jdoc3 = {
283- ' content' : jSentence3,
284- ' genre' : ' weather' ,
285- ' embedding' : [... (await pipe (jSentence3, pipeOptions)).data ],
286- };
287-
288- await Promise .all ([
289- client .json .set (' jdoc:1' , ' $' , jdoc1),
290- client .json .set (' jdoc:2' , ' $' , jdoc2),
291- client .json .set (' jdoc:3' , ' $' , jdoc3)
292- ]);
293- ```
154+ {{< clients-example set="home_query_vec" step="json_data" lang_filter="Node.js" >}}
155+ {{< /clients-example >}}
294156
295157Query JSON documents using the same syntax, but note that the vector parameter must still be a binary string:
296158
297- ``` js
298- const jsons = await client .ft .search (
299- ' vector_json_idx' ,
300- ' *=>[KNN 3 @embedding $B AS score]' ,
301- {
302- " PARAMS" : {
303- B : Buffer .from (
304- (await pipe (' That is a happy person' , pipeOptions)).data .buffer
305- ),
306- },
307- ' RETURN' : [' score' , ' content' ],
308- ' DIALECT' : ' 2'
309- },
310- );
311- ```
159+ {{< clients-example set="home_query_vec" step="json_query" lang_filter="Node.js" >}}
160+ {{< /clients-example >}}
312161
313162The results are identical to the hash document query, except for the ` jdoc: ` prefix:
314163
0 commit comments