Skip to content

Commit bb166a1

Browse files
committed
fix API.spec
test=develop
1 parent 3a09693 commit bb166a1

File tree

4 files changed

+81
-83
lines changed

4 files changed

+81
-83
lines changed

paddle/fluid/API.spec

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ paddle.fluid.AsyncExecutor.run (ArgSpec(args=['self', 'program', 'data_feed', 'f
4747
paddle.fluid.AsyncExecutor.save_model (ArgSpec(args=['self', 'save_path'], varargs=None, keywords=None, defaults=None), ('document', 'c8ac0dfcb3b187aba25d03af7fea56b2'))
4848
paddle.fluid.AsyncExecutor.stop (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '5f23d043607bb5d55e466ec3f578e093'))
4949
paddle.fluid.CompiledProgram.__init__ (ArgSpec(args=['self', 'program_or_graph'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
50-
paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', 'dbf542d1384741650a1238ddb05daa37'))
50+
paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', '5e8cca4619a5d7c3280fb3cae7021b14'))
5151
paddle.fluid.CompiledProgram.with_inference_optimize (ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=None), ('document', '9e5b009d850191a010e859189c127fd8'))
5252
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.ExecutionStrategy) -> None
5353
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None
@@ -61,8 +61,8 @@ paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program'
6161
paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2'))
6262
paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '582d87b8df75a5a639a107db8ff86f9c'))
6363
paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a5255386075dac3c75b7058254fcdcb'))
64-
paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', 'b3d72958b2568aae3f90f72abdcb7d1a'))
65-
paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd10224fef1095247063b6976da793021'))
64+
paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
65+
paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a3fefec8bacd6ce83f49906a9d05e779'))
6666
paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '7abd9cf7d695bab5bb6cf7ded5903cb2'))
6767
paddle.fluid.io.PyReader.decorate_sample_list_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'faef298f73e91aedcfaf5d184f3109b7'))
6868
paddle.fluid.io.PyReader.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'ff1cc1e2beb8824d453656c72c28ddfb'))
@@ -521,7 +521,7 @@ paddle.fluid.unique_name.guard (ArgSpec(args=['new_generator'], varargs=None, ke
521521
paddle.fluid.recordio_writer.convert_reader_to_recordio_file (ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)), ('document', '65c7523e86f0c50bb729b01667f36310'))
522522
paddle.fluid.recordio_writer.convert_reader_to_recordio_files (ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)), ('document', 'bc643f0f5f1b9db57ff0d8a57d379bd7'))
523523
paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope
524-
paddle.reader.cache (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '83b94750674c6a04b5f96599d4bf3105'))
524+
paddle.reader.cache (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '1676886070eb607cb608f7ba47be0d3c'))
525525
paddle.reader.map_readers (ArgSpec(args=['func'], varargs='readers', keywords=None, defaults=None), ('document', '77cbadb09df588e21e5cc0819b69c87d'))
526526
paddle.reader.buffered (ArgSpec(args=['reader', 'size'], varargs=None, keywords=None, defaults=None), ('document', '0d6186f109feceb99f60ec50a0a624cb'))
527527
paddle.reader.compose (ArgSpec(args=[], varargs='readers', keywords='kwargs', defaults=None), ('document', '884291104e1c3f37f33aae44b7deeb0d'))

python/paddle/fluid/compiler.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def with_data_parallel(self,
123123
will share variables from `share_vars_from`. `share_vars_from`
124124
must be run by the executor before this CompiledProgram so that
125125
vars are ready.
126-
places(list(CUDAPlace)|list(CPUPlace)|None): If provide, only compile
126+
places(list(CUDAPlace)|list(CPUPlace)|None): If provided, only compile
127127
program in the given places. Otherwise, the places used when compiled
128128
is determined by the Executor, and the places used are controlled
129129
by environment variables: FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES
@@ -148,7 +148,7 @@ def with_data_parallel(self,
148148
if places is not None:
149149
if not isinstance(places, (list, tuple)):
150150
places = [places]
151-
self._places = [_place_obj(p) for p in places]
151+
self._places = places
152152
else:
153153
self._places = None
154154
self._build_strategy.is_distribution = _is_pserver_mode(self._program)
@@ -195,14 +195,12 @@ def _compile_data_parallel(self, use_cuda=False, scope=None):
195195
self._exec_strategy.use_cuda = use_cuda
196196
has_set_place = (self._places is not None)
197197
if has_set_place:
198-
desire_place = _place_obj(self._place)
199198
for p in self._places:
200-
assert p._type() == desire_place._type(), \
199+
assert p._type() == self._place._type(), \
201200
"Place type not match. You may set the wrong type of places"
202201
else:
203-
places = cuda_places(
202+
self._places = cuda_places(
204203
) if self._exec_strategy.use_cuda else cpu_places()
205-
self._places = [_place_obj(p) for p in places]
206204
assert self._places, "no place for execution"
207205

208206
if self._exec_strategy.num_threads == 0:

python/paddle/fluid/reader.py

Lines changed: 72 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -40,83 +40,84 @@ def _convert_places(places):
4040

4141

4242
class PyReader(object):
43+
"""
44+
Create a reader object for data feeding in Python.
45+
Data would be prefetched using Python thread and be pushed
46+
into a queue asynchronously. Data in the queue would be extracted
47+
automatically when `Executor.run(...)` is called.
48+
49+
Args:
50+
feed_list (list(Variable)|tuple(Variable)): feed variable list.
51+
The variables should be created by :code:`fluid.layers.data()`.
52+
capacity (int): capacity of the queue maintained in PyReader object.
53+
use_double_buffer (bool): whether to use double_buffer_reader to
54+
speed up data feeding.
55+
iterable (bool): whether the created reader object is iterable.
56+
57+
Returns:
58+
reader (Reader): the created reader object.
59+
60+
Examples:
61+
1. If iterable = False, the created PyReader object is almost the
62+
same as :code:`fluid.layers.py_reader()`. Operators would be
63+
inserted into the program. User should call :code:`start()`
64+
before each epoch and catch :code:`fluid.core.EOFException`
65+
thrown by :code:`Executor.run()` when epoch ends. Once the
66+
exception is caught, user should call :code:`reset()` to reset
67+
the reader manually.
68+
69+
.. code-block:: python
70+
71+
image = fluid.layers.data(
72+
name='image', shape=[784], dtype='float32')
73+
label = fluid.layers.data(
74+
name='label', shape=[1], dtype='int64')
75+
76+
reader = fluid.io.PyReader(feed_list=[image, label],
77+
capacity=4, iterable=False)
78+
reader.decorate_sample_list_generator(user_defined_reader)
79+
... # definition of network is omitted
80+
executor.run(fluid.default_main_program())
81+
for _ in range(EPOCH_NUM):
82+
reader.start()
83+
while True:
84+
try:
85+
executor.run(feed=None, ...)
86+
except fluid.core.EOFException:
87+
reader.reset()
88+
break
89+
90+
2. If iterable=True, the created PyReader object is decoupled with
91+
the program. No operator would be inserted into the program.
92+
In this case, the created reader is a Python generator, which
93+
is iterable. User should feed the data yielded from PyReader
94+
object into :code:`Executor.run(feed=...)`.
95+
96+
.. code-block:: python
97+
98+
image = fluid.layers.data(
99+
name='image', shape=[784], dtype='float32')
100+
label = fluid.layers.data(
101+
name='label', shape=[1], dtype='int64')
102+
103+
reader = fluid.io.PyReader(feed_list=[image, label],
104+
capacity=4, iterable=True)
105+
reader.decorate_sample_list_generator(user_defined_reader,
106+
places=fluid.cuda_places())
107+
... # definition of network is omitted
108+
executor.run(fluid.default_main_program())
109+
for _ in range(EPOCH_NUM):
110+
for data in reader():
111+
executor.run(feed=data, ...)
112+
"""
113+
43114
unique_name_generator = UniqueNameGenerator()
44115

45116
def __init__(self,
46117
feed_list,
47118
capacity,
48119
use_double_buffer=True,
49120
iterable=False):
50-
"""
51-
Create a reader object for data feeding in Python.
52-
Data would be prefetched using Python thread and be pushed
53-
into a queue asynchronously. Data in the queue would be extracted
54-
automatically when `Executor.run(...)` is called.
55-
56-
Args:
57-
feed_list (list(Variable)|tuple(Variable)): feed variable list.
58-
The variables should be created by :code:`fluid.layers.data()`.
59-
capacity (int): capacity of the queue maintained in PyReader object.
60-
use_double_buffer (bool): whether to use double_buffer_reader to
61-
speed up data feeding.
62-
iterable (bool): whether the created reader object is iterable.
63-
64-
Returns:
65-
reader (Reader): the created reader object.
66-
67-
Examples:
68-
1. If iterable = False, the created PyReader object is almost the
69-
same as :code:`fluid.layers.py_reader()`. Operators would be
70-
inserted into the program. User should call :code:`start()`
71-
before each epoch and catch :code:`fluid.core.EOFException`
72-
thrown by :code:`Executor.run()` when epoch ends. Once the
73-
exception is caught, user should call :code:`reset()` to reset
74-
the reader manually.
75-
76-
.. code-block:: python
77-
78-
image = fluid.layers.data(
79-
name='image', shape=[784], dtype='float32')
80-
label = fluid.layers.data(
81-
name='label', shape=[1], dtype='int64')
82-
83-
reader = fluid.io.PyReader(feed_list=[image, label],
84-
capacity=4, iterable=False)
85-
reader.decorate_sample_list_generator(user_defined_reader)
86-
... # definition of network is omitted
87-
executor.run(fluid.default_main_program())
88-
for _ in range(EPOCH_NUM):
89-
reader.start()
90-
while True:
91-
try:
92-
executor.run(feed=None, ...)
93-
except fluid.core.EOFException:
94-
reader.reset()
95-
break
96-
97-
2. If iterable=True, the created PyReader object is decoupled with
98-
the program. No operator would be inserted into the program.
99-
In this case, the created reader is a Python generator, which
100-
is iterable. User should feed the data yielded from PyReader
101-
object into :code:`Executor.run(feed=...)`.
102-
103-
.. code-block:: python
104-
105-
image = fluid.layers.data(
106-
name='image', shape=[784], dtype='float32')
107-
label = fluid.layers.data(
108-
name='label', shape=[1], dtype='int64')
109-
110-
reader = fluid.io.PyReader(feed_list=[image, label],
111-
capacity=4, iterable=True)
112-
reader.decorate_sample_list_generator(user_defined_reader,
113-
places=fluid.cuda_places())
114-
... # definition of network is omitted
115-
executor.run(fluid.default_main_program())
116-
for _ in range(EPOCH_NUM):
117-
for data in reader():
118-
executor.run(feed=data, ...)
119-
"""
120121
self._tensor_reader = None
121122
self._thread = None
122123
self._iterable = iterable
@@ -361,7 +362,7 @@ def decorate_batch_generator(self, reader, places=None):
361362
Args:
362363
reader (generator): Python generator that yields LoDTensor-typed
363364
batched data.
364-
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
365+
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
365366
be provided when PyReader is iterable.
366367
'''
367368
assert self._tensor_reader is None, \

python/paddle/reader/decorator.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,7 @@ def cache(reader):
4646
data each time.
4747
4848
Returns:
49-
reader (generator): a decorated reader object
50-
which yields data from cached memory.
49+
generator: a decorated reader object which yields data from cached memory.
5150
"""
5251
all_data = tuple(reader())
5352

0 commit comments

Comments
 (0)