Skip to content

Commit ce1e0d3

Browse files
committed
test_py_reader_using_executor support test use_decorate_paddle_reader
1 parent ea97e83 commit ce1e0d3

File tree

1 file changed

+38
-24
lines changed

1 file changed

+38
-24
lines changed

python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py

Lines changed: 38 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -58,19 +58,19 @@ def simple_fc_net(in_size,
5858
if use_feed_list:
5959
data = fluid.layers.data(name="data", dtype='float32', shape=[in_size])
6060
label = fluid.layers.data(name='label', dtype='int64', shape=[1])
61-
reader = fluid.layers.create_py_reader_by_data(
61+
py_reader = fluid.layers.create_py_reader_by_data(
6262
capacity=queue_capacity,
6363
use_double_buffer=False,
6464
feed_list=[data, label])
6565
else:
66-
reader = fluid.layers.py_reader(
66+
py_reader = fluid.layers.py_reader(
6767
capacity=queue_capacity,
6868
shapes=[[-1, in_size], [-1, 1]],
6969
lod_levels=[0, 0],
7070
dtypes=['float32', 'int64'],
7171
use_double_buffer=False)
72-
feed_queue = reader.queue
73-
reader = fluid.layers.batch(reader, batch_size=batch_size)
72+
feed_queue = py_reader.queue
73+
reader = fluid.layers.batch(py_reader, batch_size=batch_size)
7474
if use_double_buffer:
7575
reader = fluid.layers.double_buffer(reader)
7676

@@ -92,7 +92,7 @@ def simple_fc_net(in_size,
9292

9393
optimizer = fluid.optimizer.Adam()
9494
optimizer.minimize(loss)
95-
return in_data, label, loss, optimizer, feed_queue
95+
return in_data, label, loss, optimizer, feed_queue, py_reader
9696

9797

9898
class TestPyReaderUsingExecutor(unittest.TestCase):
@@ -110,17 +110,21 @@ def test(self):
110110
for use_parallel_executor in [False, True]:
111111
for use_double_buffer in [False, True]:
112112
for use_feed_list in [False, True]:
113-
print('Test Parameters:'),
114-
print({
115-
'use_cuda': use_cuda,
116-
'use_parallel_executor': use_parallel_executor,
117-
'use_double_buffer': use_double_buffer,
118-
'use_feed_list': use_feed_list
119-
})
120-
self.main(use_cuda, use_parallel_executor,
121-
use_double_buffer, use_feed_list)
122-
123-
def random_reader(self):
113+
for use_decorate_paddle_reader in [False, True]:
114+
print('Test Parameters:'),
115+
print({
116+
'use_cuda': use_cuda,
117+
'use_parallel_executor': use_parallel_executor,
118+
'use_double_buffer': use_double_buffer,
119+
'use_feed_list': use_feed_list,
120+
'use_decorate_paddle_reader':
121+
use_decorate_paddle_reader
122+
})
123+
self.main(use_cuda, use_parallel_executor,
124+
use_double_buffer, use_feed_list,
125+
use_decorate_paddle_reader)
126+
127+
def tensor_reader(self, use_decorate_paddle_reader):
124128
def reader():
125129
self.inputs = []
126130
cnt = 0
@@ -144,30 +148,36 @@ def reader():
144148
elif not self.use_double_buffer:
145149
break
146150

147-
yield tensors
151+
if use_decorate_paddle_reader:
152+
yield [(in_data, label)]
153+
else:
154+
yield tensors
148155
cnt += 1
149156

150-
yield None
157+
if not use_decorate_paddle_reader:
158+
yield None
151159

152160
return reader
153161

154162
def main(self,
155163
use_cuda=True,
156164
use_parallel_executor=False,
157165
use_double_buffer=False,
158-
use_feed_list=False):
166+
use_feed_list=False,
167+
use_decorate_paddle_reader=False):
159168
assert not use_cuda or use_cuda and core.is_compiled_with_cuda()
160169

161170
self.use_cuda = use_cuda
162171
self.use_parallel_executor = use_parallel_executor
163172
self.use_double_buffer = use_double_buffer
164173
self.use_feed_list = use_feed_list
174+
self.use_decorate_paddle_reader = use_decorate_paddle_reader
165175

166176
startup_program = fluid.Program()
167177
main_program = fluid.Program()
168178

169179
with fluid.program_guard(main_program, startup_program):
170-
in_data, label, loss, optimizer, feed_queue = simple_fc_net(
180+
in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net(
171181
in_size=self.in_size,
172182
class_num=self.class_num,
173183
hidden_sizes=self.hidden_sizes,
@@ -192,10 +202,14 @@ def main(self,
192202
main_exe = startup_exe
193203
self.batch_size_times = 1
194204

195-
reader = self.random_reader()
196-
thread = threading.Thread(
197-
target=feed_data, args=(feed_queue, reader))
198-
thread.start()
205+
reader = self.tensor_reader(use_decorate_paddle_reader)
206+
if use_decorate_paddle_reader:
207+
py_reader.decorate_paddle_reader(reader)
208+
py_reader.start()
209+
else:
210+
thread = threading.Thread(
211+
target=feed_data, args=(feed_queue, reader))
212+
thread.start()
199213

200214
self.outputs = []
201215
for _ in range(self.iterations):

0 commit comments

Comments
 (0)