Skip to content

Commit 98e3564

Browse files
authored
Fix rnn example. (PaddlePaddle#971)
1. typo fix: stm_hidden_size -> lstm_hidden_size. 2. import fix: add missing imports in export_model.py 3. nit: remove unused imports.
1 parent c7fb2b9 commit 98e3564

File tree

3 files changed

+4
-5
lines changed

3 files changed

+4
-5
lines changed

examples/text_classification/rnn/deploy/python/predict.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616

1717
import numpy as np
1818
import paddle
19-
from paddle import inference
2019
from paddlenlp.data import JiebaTokenizer, Stack, Tuple, Pad, Vocab
2120
from scipy.special import softmax
2221

examples/text_classification/rnn/export_model.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,10 @@
1515
import argparse
1616

1717
import paddle
18-
import paddlenlp as ppnlp
1918
from paddlenlp.data import Vocab
2019

20+
from model import BoWModel, BiLSTMAttentionModel, CNNModel, LSTMModel, GRUModel, RNNModel, SelfInteractiveAttention
21+
2122
# yapf: disable
2223
parser = argparse.ArgumentParser(__doc__)
2324
parser.add_argument("--vocab_path", type=str, default="./senta_word_dict.txt", help="The path to vocabulary.")
@@ -56,7 +57,7 @@ def main():
5657
padding_idx=pad_token_id)
5758
elif network == 'bilstm_attn':
5859
lstm_hidden_size = 196
59-
attention = SelfInteractiveAttention(hidden_size=2 * stm_hidden_size)
60+
attention = SelfInteractiveAttention(hidden_size=2 * lstm_hidden_size)
6061
model = BiLSTMAttentionModel(
6162
attention_layer=attention,
6263
vocab_size=vocab_size,

examples/text_classification/rnn/predict.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515

1616
import paddle
1717
import paddle.nn.functional as F
18-
import paddlenlp as ppnlp
1918
from paddlenlp.data import JiebaTokenizer, Stack, Tuple, Pad, Vocab
2019

2120
from model import BoWModel, BiLSTMAttentionModel, CNNModel, LSTMModel, GRUModel, RNNModel, SelfInteractiveAttention
@@ -102,7 +101,7 @@ def predict(model, data, label_map, batch_size=1, pad_token_id=0):
102101
padding_idx=pad_token_id)
103102
elif network == 'bilstm_attn':
104103
lstm_hidden_size = 196
105-
attention = SelfInteractiveAttention(hidden_size=2 * stm_hidden_size)
104+
attention = SelfInteractiveAttention(hidden_size=2 * lstm_hidden_size)
106105
model = BiLSTMAttentionModel(
107106
attention_layer=attention,
108107
vocab_size=vocab_size,

0 commit comments

Comments
 (0)