Skip to content

Commit 4344e57

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into distribute-transpiler-handle-adam-accumulator
2 parents 39d88eb + a376efb commit 4344e57

File tree

3 files changed

+119
-3
lines changed

3 files changed

+119
-3
lines changed

python/paddle/fluid/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@
6565
'io',
6666
'initializer',
6767
'layers',
68-
'transpiler'
68+
'transpiler',
6969
'nets',
7070
'optimizer',
7171
'learning_rate_decay',
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import paddle.fluid as fluid
16+
import paddle.v2 as paddle
17+
import numpy as np
18+
import unittest
19+
20+
21+
class TestReaderReset(unittest.TestCase):
22+
def prepare_data(self):
23+
def fake_data_generator():
24+
for n in xrange(self.total_ins_num):
25+
yield np.ones(self.ins_shape) * n, n
26+
27+
# Prepare data
28+
with fluid.program_guard(fluid.Program(), fluid.Program()):
29+
reader = paddle.batch(fake_data_generator, batch_size=1)
30+
feeder = fluid.DataFeeder(
31+
feed_list=[
32+
fluid.layers.data(
33+
name='data', shape=[3], dtype='float32'),
34+
fluid.layers.data(
35+
name='label', shape=[1], dtype='int64'),
36+
],
37+
place=fluid.CPUPlace())
38+
fluid.recordio_writer.convert_reader_to_recordio_file(
39+
self.data_file_name, reader, feeder)
40+
41+
def setUp(self):
42+
self.use_cuda = fluid.core.is_compiled_with_cuda()
43+
self.data_file_name = './reader_reset_test.recordio'
44+
self.ins_shape = [3]
45+
self.batch_size = 5
46+
self.total_ins_num = self.batch_size * 20
47+
self.test_pass_num = 100
48+
self.prepare_data()
49+
50+
def main(self, with_double_buffer):
51+
main_prog = fluid.Program()
52+
startup_prog = fluid.Program()
53+
54+
with fluid.program_guard(main_prog, startup_prog):
55+
data_reader_handle = fluid.layers.io.open_files(
56+
filenames=[self.data_file_name],
57+
shapes=[[-1] + self.ins_shape, [-1, 1]],
58+
lod_levels=[0, 0],
59+
dtypes=['float32', 'int64'],
60+
thread_num=1,
61+
pass_num=1)
62+
data_reader = fluid.layers.io.batch(data_reader_handle,
63+
self.batch_size)
64+
if with_double_buffer:
65+
data_reader = fluid.layers.double_buffer(data_reader)
66+
image, label = fluid.layers.read_file(data_reader)
67+
fetch_list = [image.name, label.name]
68+
69+
place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
70+
exe = fluid.Executor(place)
71+
exe.run(startup_prog)
72+
73+
build_strategy = fluid.BuildStrategy()
74+
if with_double_buffer:
75+
build_strategy.enable_data_balance = True
76+
exec_strategy = fluid.ExecutionStrategy()
77+
parallel_exe = fluid.ParallelExecutor(
78+
use_cuda=self.use_cuda,
79+
main_program=main_prog,
80+
build_strategy=build_strategy,
81+
exec_strategy=exec_strategy)
82+
83+
data_appeared = [False] * self.total_ins_num
84+
pass_count = 0
85+
while (True):
86+
try:
87+
data_val, label_val = parallel_exe.run(fetch_list,
88+
return_numpy=True)
89+
ins_num = data_val.shape[0]
90+
broadcasted_label = np.ones((ins_num, ) + tuple(
91+
self.ins_shape)) * label_val.reshape((ins_num, 1))
92+
self.assertEqual(data_val.all(), broadcasted_label.all())
93+
for l in label_val:
94+
self.assertFalse(data_appeared[l[0]])
95+
data_appeared[l[0]] = True
96+
97+
except fluid.core.EOFException:
98+
pass_count += 1
99+
if with_double_buffer:
100+
data_appeared = data_appeared[:-parallel_exe.device_count *
101+
self.batch_size]
102+
for i in data_appeared:
103+
self.assertTrue(i)
104+
if pass_count < self.test_pass_num:
105+
data_appeared = [False] * self.total_ins_num
106+
data_reader_handle.reset()
107+
else:
108+
break
109+
110+
def test_all(self):
111+
self.main(with_double_buffer=False)
112+
self.main(with_double_buffer=True)
113+
114+
115+
if __name__ == '__main__':
116+
unittest.main()

python/setup.py.in

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,12 +42,12 @@ def get_patch():
4242

4343
def is_taged():
4444
try:
45-
cmd = ['git', 'describe', '--exact-match', '--tags']
45+
cmd = ['git', 'describe', '--exact-match', '--tags', 'HEAD', '2>/dev/null']
4646
git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip()
4747
except:
4848
return False
4949

50-
if git_tag.replace('v', '') == '@PADDLE_VERSION@':
50+
if str(git_tag).replace('v', '') == '@PADDLE_VERSION@':
5151
return True
5252
else:
5353
return False

0 commit comments

Comments
 (0)