|
| 1 | +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from __future__ import print_function |
| 16 | + |
| 17 | +import paddle |
| 18 | +import paddle.fluid as fluid |
| 19 | +from functools import partial |
| 20 | +import numpy as np |
| 21 | + |
| 22 | +CLASS_DIM = 2 |
| 23 | +EMB_DIM = 128 |
| 24 | +BATCH_SIZE = 128 |
| 25 | +LSTM_SIZE = 128 |
| 26 | + |
| 27 | + |
| 28 | +def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): |
| 29 | + emb = fluid.layers.embedding( |
| 30 | + input=data, size=[input_dim, emb_dim], is_sparse=True) |
| 31 | + sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh') |
| 32 | + |
| 33 | + rnn = fluid.layers.DynamicRNN() |
| 34 | + with rnn.block(): |
| 35 | + word = rnn.step_input(sentence) |
| 36 | + prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) |
| 37 | + prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) |
| 38 | + |
| 39 | + def gate_common(ipt, hidden, size): |
| 40 | + gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) |
| 41 | + gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) |
| 42 | + return gate0 + gate1 |
| 43 | + |
| 44 | + forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, |
| 45 | + lstm_size)) |
| 46 | + input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, |
| 47 | + lstm_size)) |
| 48 | + output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, |
| 49 | + lstm_size)) |
| 50 | + cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, |
| 51 | + lstm_size)) |
| 52 | + |
| 53 | + cell = forget_gate * prev_cell + input_gate * cell_gate |
| 54 | + hidden = output_gate * fluid.layers.tanh(x=cell) |
| 55 | + rnn.update_memory(prev_cell, cell) |
| 56 | + rnn.update_memory(prev_hidden, hidden) |
| 57 | + rnn.output(hidden) |
| 58 | + |
| 59 | + last = fluid.layers.sequence_last_step(rnn()) |
| 60 | + prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") |
| 61 | + return prediction |
| 62 | + |
| 63 | + |
| 64 | +def inference_program(word_dict): |
| 65 | + data = fluid.layers.data( |
| 66 | + name="words", shape=[1], dtype="int64", lod_level=1) |
| 67 | + |
| 68 | + dict_dim = len(word_dict) |
| 69 | + pred = dynamic_rnn_lstm(data, dict_dim, CLASS_DIM, EMB_DIM, LSTM_SIZE) |
| 70 | + return pred |
| 71 | + |
| 72 | + |
| 73 | +def train_program(word_dict): |
| 74 | + prediction = inference_program(word_dict) |
| 75 | + label = fluid.layers.data(name="label", shape=[1], dtype="int64") |
| 76 | + cost = fluid.layers.cross_entropy(input=prediction, label=label) |
| 77 | + avg_cost = fluid.layers.mean(cost) |
| 78 | + accuracy = fluid.layers.accuracy(input=prediction, label=label) |
| 79 | + return [avg_cost, accuracy] |
| 80 | + |
| 81 | + |
| 82 | +def train(use_cuda, train_program, save_dirname): |
| 83 | + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() |
| 84 | + optimizer = fluid.optimizer.Adagrad(learning_rate=0.002) |
| 85 | + |
| 86 | + word_dict = paddle.dataset.imdb.word_dict() |
| 87 | + trainer = fluid.Trainer( |
| 88 | + train_func=partial(train_program, word_dict), |
| 89 | + place=place, |
| 90 | + optimizer=optimizer) |
| 91 | + |
| 92 | + def event_handler(event): |
| 93 | + if isinstance(event, fluid.EndEpochEvent): |
| 94 | + test_reader = paddle.batch( |
| 95 | + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) |
| 96 | + avg_cost, acc = trainer.test( |
| 97 | + reader=test_reader, feed_order=['words', 'label']) |
| 98 | + |
| 99 | + print("avg_cost: %s" % avg_cost) |
| 100 | + print("acc : %s" % acc) |
| 101 | + |
| 102 | + if acc > 0.2: # Smaller value to increase CI speed |
| 103 | + trainer.save_params(save_dirname) |
| 104 | + trainer.stop() |
| 105 | + |
| 106 | + else: |
| 107 | + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( |
| 108 | + event.epoch + 1, avg_cost, acc)) |
| 109 | + if math.isnan(avg_cost): |
| 110 | + sys.exit("got NaN loss, training failed.") |
| 111 | + elif isinstance(event, fluid.EndStepEvent): |
| 112 | + print("Step {0}, Epoch {1} Metrics {2}".format( |
| 113 | + event.step, event.epoch, map(np.array, event.metrics))) |
| 114 | + if event.step == 1: # Run 2 iterations to speed CI |
| 115 | + trainer.save_params(save_dirname) |
| 116 | + trainer.stop() |
| 117 | + |
| 118 | + train_reader = paddle.batch( |
| 119 | + paddle.reader.shuffle( |
| 120 | + paddle.dataset.imdb.train(word_dict), buf_size=25000), |
| 121 | + batch_size=BATCH_SIZE) |
| 122 | + |
| 123 | + trainer.train( |
| 124 | + num_epochs=1, |
| 125 | + event_handler=event_handler, |
| 126 | + reader=train_reader, |
| 127 | + feed_order=['words', 'label']) |
| 128 | + |
| 129 | + |
| 130 | +def infer(use_cuda, inference_program, save_dirname=None): |
| 131 | + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() |
| 132 | + word_dict = paddle.dataset.imdb.word_dict() |
| 133 | + |
| 134 | + inferencer = fluid.Inferencer( |
| 135 | + infer_func=partial(inference_program, word_dict), |
| 136 | + param_path=save_dirname, |
| 137 | + place=place) |
| 138 | + |
| 139 | + def create_random_lodtensor(lod, place, low, high): |
| 140 | + data = np.random.random_integers(low, high, |
| 141 | + [lod[-1], 1]).astype("int64") |
| 142 | + res = fluid.LoDTensor() |
| 143 | + res.set(data, place) |
| 144 | + res.set_lod([lod]) |
| 145 | + return res |
| 146 | + |
| 147 | + lod = [0, 4, 10] |
| 148 | + tensor_words = create_random_lodtensor( |
| 149 | + lod, place, low=0, high=len(word_dict) - 1) |
| 150 | + results = inferencer.infer({'words': tensor_words}) |
| 151 | + print("infer results: ", results) |
| 152 | + |
| 153 | + |
| 154 | +def main(use_cuda): |
| 155 | + if use_cuda and not fluid.core.is_compiled_with_cuda(): |
| 156 | + return |
| 157 | + save_path = "understand_sentiment_conv.inference.model" |
| 158 | + train(use_cuda, train_program, save_path) |
| 159 | + infer(use_cuda, inference_program, save_path) |
| 160 | + |
| 161 | + |
| 162 | +if __name__ == '__main__': |
| 163 | + for use_cuda in (False, True): |
| 164 | + main(use_cuda=use_cuda) |
0 commit comments