|
| 1 | +import numpy as np |
| 2 | +import paddle.v2 as paddle |
| 3 | +import paddle.v2.dataset.conll05 as conll05 |
| 4 | +import paddle.v2.fluid.core as core |
| 5 | +import paddle.v2.fluid.framework as framework |
| 6 | +import paddle.v2.fluid.layers as layers |
| 7 | +from paddle.v2.fluid.executor import Executor, g_scope |
| 8 | +from paddle.v2.fluid.optimizer import SGDOptimizer |
| 9 | +import paddle.v2.fluid as fluid |
| 10 | +import paddle.v2.fluid.layers as pd |
| 11 | + |
| 12 | +dict_size = 30000 |
| 13 | +source_dict_dim = target_dict_dim = dict_size |
| 14 | +src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) |
| 15 | +hidden_dim = 512 |
| 16 | +word_dim = 512 |
| 17 | +IS_SPARSE = True |
| 18 | +batch_size = 50 |
| 19 | +max_length = 50 |
| 20 | +topk_size = 50 |
| 21 | +trg_dic_size = 10000 |
| 22 | + |
| 23 | +src_word_id = layers.data(name="src_word_id", shape=[1], dtype='int64') |
| 24 | +src_embedding = layers.embedding( |
| 25 | + input=src_word_id, |
| 26 | + size=[dict_size, word_dim], |
| 27 | + dtype='float32', |
| 28 | + is_sparse=IS_SPARSE, |
| 29 | + param_attr=fluid.ParamAttr(name='vemb')) |
| 30 | + |
| 31 | + |
| 32 | +def encoder(): |
| 33 | + |
| 34 | + lstm_hidden0, lstm_0 = layers.dynamic_lstm( |
| 35 | + input=src_embedding, |
| 36 | + size=hidden_dim, |
| 37 | + candidate_activation='sigmoid', |
| 38 | + cell_activation='sigmoid') |
| 39 | + |
| 40 | + lstm_hidden1, lstm_1 = layers.dynamic_lstm( |
| 41 | + input=src_embedding, |
| 42 | + size=hidden_dim, |
| 43 | + candidate_activation='sigmoid', |
| 44 | + cell_activation='sigmoid', |
| 45 | + is_reverse=True) |
| 46 | + |
| 47 | + bidirect_lstm_out = layers.concat([lstm_hidden0, lstm_hidden1], axis=0) |
| 48 | + |
| 49 | + return bidirect_lstm_out |
| 50 | + |
| 51 | + |
| 52 | +def decoder_trainer(context): |
| 53 | + ''' |
| 54 | + decoder with trainer |
| 55 | + ''' |
| 56 | + pass |
| 57 | + |
| 58 | + |
| 59 | +def to_lodtensor(data, place): |
| 60 | + seq_lens = [len(seq) for seq in data] |
| 61 | + cur_len = 0 |
| 62 | + lod = [cur_len] |
| 63 | + for l in seq_lens: |
| 64 | + cur_len += l |
| 65 | + lod.append(cur_len) |
| 66 | + flattened_data = np.concatenate(data, axis=0).astype("int64") |
| 67 | + flattened_data = flattened_data.reshape([len(flattened_data), 1]) |
| 68 | + res = core.LoDTensor() |
| 69 | + res.set(flattened_data, place) |
| 70 | + res.set_lod([lod]) |
| 71 | + return res |
| 72 | + |
| 73 | + |
| 74 | +def main(): |
| 75 | + encoder_out = encoder() |
| 76 | + # TODO(jacquesqiao) call here |
| 77 | + decoder_trainer(encoder_out) |
| 78 | + |
| 79 | + train_data = paddle.batch( |
| 80 | + paddle.reader.shuffle( |
| 81 | + paddle.dataset.wmt14.train(8000), buf_size=1000), |
| 82 | + batch_size=batch_size) |
| 83 | + |
| 84 | + place = core.CPUPlace() |
| 85 | + exe = Executor(place) |
| 86 | + |
| 87 | + exe.run(framework.default_startup_program()) |
| 88 | + |
| 89 | + batch_id = 0 |
| 90 | + for pass_id in xrange(2): |
| 91 | + print 'pass_id', pass_id |
| 92 | + for data in train_data(): |
| 93 | + print 'batch', batch_id |
| 94 | + batch_id += 1 |
| 95 | + if batch_id > 10: break |
| 96 | + word_data = to_lodtensor(map(lambda x: x[0], data), place) |
| 97 | + outs = exe.run(framework.default_main_program(), |
| 98 | + feed={'src_word_id': word_data, }, |
| 99 | + fetch_list=[encoder_out]) |
| 100 | + |
| 101 | + |
| 102 | +if __name__ == '__main__': |
| 103 | + main() |
0 commit comments