@@ -242,6 +242,14 @@ def search(self, words):
242
242
Returns:
243
243
`numpy.array`: The vectors of specifying words.
244
244
245
+ Examples:
246
+ .. code-block::
247
+
248
+ from paddlenlp.embeddings import TokenEmbedding
249
+
250
+ embed = TokenEmbedding()
251
+ vector = embed.search('Welcome to use PaddlePaddle and PaddleNLP!')
252
+
245
253
"""
246
254
idx_list = self .get_idx_list_from_words (words )
247
255
idx_tensor = paddle .to_tensor (idx_list )
@@ -271,6 +279,15 @@ def get_idx_list_from_words(self, words):
271
279
Returns:
272
280
`list`: The indexes list of specifying words.
273
281
282
+ Examples:
283
+ .. code-block::
284
+
285
+ from paddlenlp.embeddings import TokenEmbedding
286
+
287
+ embed = TokenEmbedding()
288
+ index = embed.get_idx_from_word('Welcome to use PaddlePaddle and PaddleNLP!')
289
+ #635963
290
+
274
291
"""
275
292
if isinstance (words , str ):
276
293
idx_list = [self .get_idx_from_word (words )]
@@ -305,7 +322,16 @@ def dot(self, word_a, word_b):
305
322
word_b (`str`): The second word string.
306
323
307
324
Returns:
308
- `Float`: The dot product of 2 words.
325
+ float: The dot product of 2 words.
326
+
327
+ Examples:
328
+ .. code-block::
329
+
330
+ from paddlenlp.embeddings import TokenEmbedding
331
+
332
+ embed = TokenEmbedding()
333
+ dot_product = embed.dot('PaddlePaddle', 'PaddleNLP!')
334
+ #0.11827179
309
335
310
336
"""
311
337
dot = self ._dot_np
@@ -321,7 +347,16 @@ def cosine_sim(self, word_a, word_b):
321
347
word_b (`str`): The second word string.
322
348
323
349
Returns:
324
- `Float`: The cosine similarity of 2 words.
350
+ float: The cosine similarity of 2 words.
351
+
352
+ Examples:
353
+ .. code-block::
354
+
355
+ from paddlenlp.embeddings import TokenEmbedding
356
+
357
+ embed = TokenEmbedding()
358
+ cosine_simi = embed.cosine_sim('PaddlePaddle', 'PaddleNLP!')
359
+ #0.99999994
325
360
326
361
"""
327
362
dot = self ._dot_np
0 commit comments