Skip to content

Commit a145bca

Browse files
authored
fix ernie api docstring (#630)
1 parent 5944cc9 commit a145bca

File tree

3 files changed

+12
-12
lines changed

3 files changed

+12
-12
lines changed

paddlenlp/transformers/ernie/modeling.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -420,7 +420,7 @@ def forward(self,
420420
model = ErnieForSequenceClassification.from_pretrained('ernie-1.0')
421421
422422
inputs = tokenizer("这是个测试样例")
423-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
423+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
424424
logits = model(**inputs)
425425
426426
"""
@@ -505,7 +505,7 @@ def forward(self,
505505
model = ErnieForQuestionAnswering.from_pretrained('ernie-1.0')
506506
507507
inputs = tokenizer("这是个测试样例")
508-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
508+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
509509
logits = model(**inputs)
510510
"""
511511

@@ -601,7 +601,7 @@ def forward(self,
601601
model = ErnieForTokenClassification.from_pretrained('ernie-1.0')
602602
603603
inputs = tokenizer("这是个测试样例")
604-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
604+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
605605
logits = model(**inputs)
606606
"""
607607
sequence_output, _ = self.ernie(
@@ -742,7 +742,7 @@ def forward(self,
742742
model = ErnieForTokenClassification.from_pretrained('ernie-1.0')
743743
744744
inputs = tokenizer("这是个测试样例")
745-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
745+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
746746
logits = model(**inputs)
747747
748748
"""

paddlenlp/transformers/ernie_gram/modeling.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,7 @@ def forward(self,
272272
model = ErnieGramModel.from_pretrained('ernie-gram-zh')
273273
274274
inputs = tokenizer("这是个测试样例")
275-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
275+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
276276
sequence_output, pooled_output = model(**inputs)
277277
278278
"""
@@ -376,7 +376,7 @@ def forward(self,
376376
model = ErnieGramForTokenClassification.from_pretrained('ernie-gram-zh')
377377
378378
inputs = tokenizer("这是个测试样例")
379-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
379+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
380380
logits = model(**inputs)
381381
"""
382382
sequence_output, _ = self.ernie_gram(
@@ -460,7 +460,7 @@ def forward(self,
460460
model = ErnieGramForQuestionAnswering.from_pretrained('ernie-gram-zh')
461461
462462
inputs = tokenizer("这是个测试样例")
463-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
463+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
464464
logits = model(**inputs)
465465
"""
466466

@@ -554,7 +554,7 @@ def forward(self,
554554
model = ErnieGramForSequenceClassification.from_pretrained('ernie-gram-zh')
555555
556556
inputs = tokenizer("这是个测试样例")
557-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
557+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
558558
logits = model(**inputs)
559559
560560
"""

paddlenlp/transformers/roberta/modeling.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def forward(self,
325325
model = RobertaModel.from_pretrained('roberta-wwm-ext')
326326
327327
inputs = tokenizer("这是个测试样例")
328-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
328+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
329329
sequence_output, pooled_output = model(**inputs)
330330
331331
"""
@@ -413,7 +413,7 @@ def forward(self, input_ids, token_type_ids=None):
413413
model = RobertaForSequenceClassification.from_pretrained('roberta-wwm-ext')
414414
415415
inputs = tokenizer("这是个测试样例")
416-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
416+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
417417
logits = model(**inputs)
418418
419419
"""
@@ -509,7 +509,7 @@ def forward(self,
509509
model = RobertaForSequenceClassification.from_pretrained('roberta-wwm-ext')
510510
511511
inputs = tokenizer("这是个测试样例")
512-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
512+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
513513
logits = model(**inputs)
514514
515515
"""
@@ -603,7 +603,7 @@ def forward(self,
603603
model = RobertaForTokenClassification.from_pretrained('roberta-wwm-ext')
604604
605605
inputs = tokenizer("这是个测试样例")
606-
inputs = {k:paddle.to_tensor(v) for (k, v) in inputs.items()}
606+
inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}
607607
logits = model(**inputs)
608608
609609
"""

0 commit comments

Comments
 (0)