Skip to content

Commit d84a30b

Browse files
authored
[cherry-pick] append scale to static runner and remove loader place (#24649)
* Append scale for static runner outputs (#24627) * add scale for static runner outputs, test=develop * fix import relation, test=develop * remove len limit, test=develop * remove imperative data loader place limit, test=develop (#24641)
1 parent 62047d3 commit d84a30b

File tree

3 files changed

+44
-17
lines changed

3 files changed

+44
-17
lines changed

python/paddle/fluid/dygraph/static_runner.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from .. import framework
2525
from .. import backward
2626

27+
from ..layers import nn
2728
from .base import switch_to_static_graph
2829
from ... import compat as cpt
2930

@@ -359,8 +360,27 @@ def _load_static_model(self, model_dir, model_filename=None):
359360
# NOTE: reverse feed vars
360361
self._input_names.reverse()
361362

363+
# Step 4. add scale for outputs
364+
tmp_program = self._build_program_by_desc(program_desc)
365+
self._append_scale_to_output(tmp_program)
366+
362367
return program_desc
363368

369+
@switch_to_static_graph
370+
def _append_scale_to_output(self, program):
371+
# 1. append scale & save var
372+
scale_output_vars = []
373+
with framework.program_guard(program):
374+
for i, out in enumerate(self._output_descs):
375+
var = program.global_block().var(out.name())
376+
var = nn.scale(
377+
var, 1., name="static_model_runner/scale_{}".format(i))
378+
scale_output_vars.append(var)
379+
# 2. update output names & descs
380+
for i, var in enumerate(scale_output_vars):
381+
self._output_names[i] = var.name
382+
self._output_descs[i] = var.desc
383+
364384
@switch_to_static_graph
365385
def _append_backward_desc(self):
366386
assert self._infer_program_desc is not None, "The StaticModelRunner not initialized properly."

python/paddle/fluid/reader.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
import numpy as np
1919
import threading
2020
import paddle
21-
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places
21+
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place
2222
from .executor import global_scope
2323
from .data_feeder import DataFeeder, BatchedTensorProvider
2424
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
@@ -671,12 +671,12 @@ def __init__(self,
671671

672672
if not iterable:
673673
logging.warning(
674-
"Please NOTE: dygraph can support iterable mode only. Change to iterable mode."
674+
"Please NOTE: imperative mode can support iterable mode only. Change to iterable mode."
675675
)
676676
self._iterable = True
677677
if not return_list:
678678
logging.warning(
679-
"Please NOTE: dygraph can support return as list only. Change to return as list."
679+
"Please NOTE: imperative mode can support return as list only. Change to return as list."
680680
)
681681
self._return_list = True
682682

@@ -941,10 +941,11 @@ def __batch_reader_impl__():
941941

942942
def set_batch_generator(self, reader, places=None):
943943
self._batch_reader = reader
944-
assert places is not None, "Places cannot be None when DataLoader is iterable"
944+
if places is None:
945+
places = _current_expected_place()
945946
self._places = _convert_places(places)
946947
assert len(self._places) == 1, \
947-
"Number of places must be 1 in dygraph mode"
948+
"Number of places must be 1 in imperative mode"
948949
return self
949950

950951

python/paddle/fluid/tests/unittests/test_imperative_data_loader_base.py

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,14 @@ def setUp(self):
4141
self.epoch_num = 1
4242
self.capacity = 5
4343

44+
def iter_loader_data(self, loader):
45+
for _ in range(self.epoch_num):
46+
for image, label in loader():
47+
relu = fluid.layers.relu(image)
48+
self.assertEqual(image.shape, [self.batch_size, 784])
49+
self.assertEqual(label.shape, [self.batch_size, 1])
50+
self.assertEqual(relu.shape, [self.batch_size, 784])
51+
4452
def test_single_process_loader(self):
4553
with fluid.dygraph.guard():
4654
loader = fluid.io.DataLoader.from_generator(
@@ -49,12 +57,7 @@ def test_single_process_loader(self):
4957
sample_generator_creator(self.batch_size, self.batch_num),
5058
batch_size=self.batch_size,
5159
places=fluid.CPUPlace())
52-
for _ in range(self.epoch_num):
53-
for image, label in loader():
54-
relu = fluid.layers.relu(image)
55-
self.assertEqual(image.shape, [self.batch_size, 784])
56-
self.assertEqual(label.shape, [self.batch_size, 1])
57-
self.assertEqual(relu.shape, [self.batch_size, 784])
60+
self.iter_loader_data(loader)
5861

5962
def test_multi_process_loader(self):
6063
with fluid.dygraph.guard():
@@ -64,12 +67,15 @@ def test_multi_process_loader(self):
6467
sample_generator_creator(self.batch_size, self.batch_num),
6568
batch_size=self.batch_size,
6669
places=fluid.CPUPlace())
67-
for _ in range(self.epoch_num):
68-
for image, label in loader():
69-
relu = fluid.layers.relu(image)
70-
self.assertEqual(image.shape, [self.batch_size, 784])
71-
self.assertEqual(label.shape, [self.batch_size, 1])
72-
self.assertEqual(relu.shape, [self.batch_size, 784])
70+
self.iter_loader_data(loader)
71+
72+
def test_generator_no_places(self):
73+
with fluid.dygraph.guard():
74+
loader = fluid.io.DataLoader.from_generator(capacity=self.capacity)
75+
loader.set_sample_generator(
76+
sample_generator_creator(self.batch_size, self.batch_num),
77+
batch_size=self.batch_size)
78+
self.iter_loader_data(loader)
7379

7480

7581
if __name__ == '__main__':

0 commit comments

Comments
 (0)