|
| 1 | +import paddle.v2 as paddle |
| 2 | +import paddle.v2.framework.layers as layers |
| 3 | +import paddle.v2.framework.nets as nets |
| 4 | +import paddle.v2.framework.core as core |
| 5 | +import paddle.v2.framework.optimizer as optimizer |
| 6 | + |
| 7 | +from paddle.v2.framework.framework import Program, g_main_program, g_startup_program |
| 8 | +from paddle.v2.framework.executor import Executor |
| 9 | + |
| 10 | +import numpy as np |
| 11 | + |
| 12 | + |
| 13 | +def stacked_lstm_net(input_dim, |
| 14 | + class_dim=2, |
| 15 | + emb_dim=128, |
| 16 | + hid_dim=512, |
| 17 | + stacked_num=3): |
| 18 | + assert stacked_num % 2 == 1 |
| 19 | + data = layers.data(name="words", shape=[1], data_type="int64") |
| 20 | + label = layers.data(name="label", shape=[1], data_type="int64") |
| 21 | + |
| 22 | + emb = layers.embedding(input=data, size=[input_dim, emb_dim]) |
| 23 | + # add bias attr |
| 24 | + |
| 25 | + # TODO(qijun) linear act |
| 26 | + fc1 = layers.fc(input=emb, size=hid_dim) |
| 27 | + lstm1, cell1 = layers.dynamic_lstm(input=fc1, size=hid_dim) |
| 28 | + |
| 29 | + inputs = [fc1, lstm1] |
| 30 | + |
| 31 | + for i in range(2, stacked_num + 1): |
| 32 | + fc = layers.fc(input=inputs, size=hid_dim) |
| 33 | + lstm, cell = layers.dynamic_lstm( |
| 34 | + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) |
| 35 | + inputs = [fc, lstm] |
| 36 | + |
| 37 | + fc_last = layers.sequence_pool(input=inputs[0], pool_type='max') |
| 38 | + lstm_last = layers.sequence_pool(input=inputs[1], pool_type='max') |
| 39 | + |
| 40 | + prediction = layers.fc(input=[fc_last, lstm_last], |
| 41 | + size=class_dim, |
| 42 | + act='softmax') |
| 43 | + cost = layers.cross_entropy(input=prediction, label=label) |
| 44 | + avg_cost = layers.mean(x=cost) |
| 45 | + adam_optimizer = optimizer.AdamOptimizer(learning_rate=0.002) |
| 46 | + opts = adam_optimizer.minimize(avg_cost) |
| 47 | + acc = layers.accuracy(input=prediction, label=label) |
| 48 | + return avg_cost, acc |
| 49 | + |
| 50 | + |
| 51 | +def to_lodtensor(data, place): |
| 52 | + seq_lens = [len(seq) for seq in data] |
| 53 | + cur_len = 0 |
| 54 | + lod = [cur_len] |
| 55 | + for l in seq_lens: |
| 56 | + cur_len += l |
| 57 | + lod.append(cur_len) |
| 58 | + flattened_data = np.concatenate(data, axis=0).astype("int64") |
| 59 | + flattened_data = flattened_data.reshape([len(flattened_data), 1]) |
| 60 | + res = core.LoDTensor() |
| 61 | + res.set(flattened_data, place) |
| 62 | + res.set_lod([lod]) |
| 63 | + return res |
| 64 | + |
| 65 | + |
| 66 | +def main(): |
| 67 | + BATCH_SIZE = 100 |
| 68 | + PASS_NUM = 5 |
| 69 | + |
| 70 | + word_dict = paddle.dataset.imdb.word_dict() |
| 71 | + print "load word dict successfully" |
| 72 | + dict_dim = len(word_dict) |
| 73 | + class_dim = 2 |
| 74 | + |
| 75 | + cost, acc = stacked_lstm_net(input_dim=dict_dim, class_dim=class_dim) |
| 76 | + |
| 77 | + train_data = paddle.batch( |
| 78 | + paddle.reader.shuffle( |
| 79 | + paddle.dataset.imdb.train(word_dict), buf_size=1000), |
| 80 | + batch_size=BATCH_SIZE) |
| 81 | + place = core.CPUPlace() |
| 82 | + exe = Executor(place) |
| 83 | + |
| 84 | + exe.run(g_startup_program) |
| 85 | + |
| 86 | + for pass_id in xrange(PASS_NUM): |
| 87 | + for data in train_data(): |
| 88 | + tensor_words = to_lodtensor(map(lambda x: x[0], data), place) |
| 89 | + |
| 90 | + label = np.array(map(lambda x: x[1], data)).astype("int64") |
| 91 | + label = label.reshape([BATCH_SIZE, 1]) |
| 92 | + |
| 93 | + tensor_label = core.LoDTensor() |
| 94 | + tensor_label.set(label, place) |
| 95 | + |
| 96 | + outs = exe.run(g_main_program, |
| 97 | + feed={"words": tensor_words, |
| 98 | + "label": tensor_label}, |
| 99 | + fetch_list=[cost, acc]) |
| 100 | + cost_val = np.array(outs[0]) |
| 101 | + acc_val = np.array(outs[1]) |
| 102 | + |
| 103 | + print("cost=" + str(cost_val) + " acc=" + str(acc_val)) |
| 104 | + if cost_val < 1.0 and acc_val > 0.7: |
| 105 | + exit(0) |
| 106 | + exit(1) |
| 107 | + |
| 108 | + |
| 109 | +if __name__ == '__main__': |
| 110 | + main() |
0 commit comments