Skip to content

Commit e6cfb2f

Browse files
dzhwinterreyoung
authored andcommitted
"non-layer add doc for executor module" (#11602)
* "add doc for exec" * "add more changes" * "fix based on preview" * "chagne code format"
1 parent 49080ac commit e6cfb2f

File tree

1 file changed

+94
-19
lines changed

1 file changed

+94
-19
lines changed

python/paddle/fluid/executor.py

Lines changed: 94 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from . import core
1919

2020
__all__ = [
21-
'Executor', 'global_scope', 'scope_guard', 'switch_scope', 'fetch_var'
21+
'Executor', 'global_scope', 'scope_guard', '_switch_scope', 'fetch_var'
2222
]
2323

2424
g_scope = core.Scope()
@@ -35,7 +35,7 @@ def global_scope():
3535
return g_scope
3636

3737

38-
def switch_scope(scope):
38+
def _switch_scope(scope):
3939
global g_scope
4040
ex = g_scope
4141
g_scope = scope
@@ -57,12 +57,27 @@ def scope_guard(scope):
5757
Args:
5858
scope: The new global/default scope.
5959
"""
60-
ex = switch_scope(scope)
60+
ex = _switch_scope(scope)
6161
yield
62-
switch_scope(ex)
62+
_switch_scope(ex)
6363

6464

6565
def as_numpy(tensor):
66+
"""
67+
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
68+
For higher dimensional sequence data, please use LoDTensor directly.
69+
Examples:
70+
>>> import paddle.fluid as fluid
71+
>>> outs = executor.run(...)
72+
>>> np_outs = map(lambda x: as_numpy(x), outs)
73+
>>> ...
74+
75+
Args:
76+
tensor(Variable): a instance of Tensor
77+
78+
Returns:
79+
numpy.ndarray
80+
"""
6681
if isinstance(tensor, list):
6782
return [as_numpy(t) for t in tensor]
6883
assert isinstance(tensor, core.LoDTensor)
@@ -186,7 +201,7 @@ def fetch_var(name, scope=None, return_numpy=True):
186201
return tensor
187202

188203

189-
def get_program_cache_key(feed, fetch_list):
204+
def _get_program_cache_key(feed, fetch_list):
190205
feed_var_names = feed.keys()
191206

192207
def to_name_str(var):
@@ -205,6 +220,25 @@ def to_name_str(var):
205220

206221

207222
class Executor(object):
223+
"""
224+
An Executor in Python, only support the single-GPU running. For multi-cards, please refer to
225+
ParallelExecutor.
226+
Python executor takes a program, add feed operators and fetch operators to this program according
227+
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
228+
the variables(or names) that user want to get after program run. Note: the executor will run all
229+
operators in the program but not only the operators dependent by the fetch_list.
230+
It store the global variables into the global scope, and create a local scope for the temporary
231+
variables. The local scope contents will be discarded after every minibatch forward/backward finished.
232+
But the global scope variables will be persistent through different runs.
233+
All of ops in program will be running in sequence.
234+
235+
Args:
236+
place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device
237+
238+
Note: For debugging complicated network in parallel-GPUs, you can test it on the executor.
239+
They has the exactly same arguments, and expected the same results.
240+
"""
241+
208242
def __init__(self, place):
209243
self.place = place
210244
p = core.Place()
@@ -213,6 +247,23 @@ def __init__(self, place):
213247
self.program_caches = dict()
214248

215249
def as_lodtensor(self, data):
250+
"""
251+
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
252+
For higher dimensional sequence data, please use LoDTensor directly.
253+
254+
Examples:
255+
>>> import paddle.fluid as fluid
256+
>>> exe = fluid.executor(fluid.CPUPlace())
257+
>>> data = np.array(size=(100, 200, 300))
258+
>>> np_outs = map(lambda x: exe.as_lodtensor(x), data)
259+
>>> ...
260+
261+
Args:
262+
data(numpy.ndarray): a instance of array
263+
264+
Returns:
265+
LoDTensor
266+
"""
216267
if isinstance(data, list):
217268
raise RuntimeError("Some of your feed data hold LoD information. \
218269
They can not be completely cast from a list of Python \
@@ -304,23 +355,47 @@ def run(self,
304355
scope=None,
305356
return_numpy=True,
306357
use_program_cache=False):
307-
""" Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
308-
358+
"""
359+
Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
309360
Python executor takes a program, add feed operators and fetch operators to this program according
310361
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
311-
the variables(or names) that user want to get after program run. Note: the executor will run all
362+
the variables(or names) that user want to get after program run.
363+
364+
Note: the executor will run all
312365
operators in the program but not only the operators dependent by the fetch_list
313366
314-
:param program: the program that need to run, if not provied, then default_main_program will be used.
315-
:param feed: feed variable map, e.g. {"image": ImageData, "label": LableData}
316-
:param fetch_list: a list of variable or variable names that user want to get, run will return them according
317-
to this list.
318-
:param feed_var_name: the name for the input variable of feed Operator.
319-
:param fetch_var_name: the name for the output variable of feed Operator.
320-
:param scope: the scope used to run this program, you can switch it to different scope. default is global_scope
321-
:param return_numpy: if convert the fetched tensor to numpy
322-
:param use_program_cache: set use_program_cache to true if program not changed compare to the last step.
323-
:return: result according to fetch_list.
367+
Args:
368+
program(Program): the program that need to run, if not provied, then default_main_program will be used.
369+
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LableData}
370+
fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list.
371+
feed_var_name(str): the name for the input variable of feed Operator.
372+
fetch_var_name(str): the name for the output variable of fetch Operator.
373+
scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope
374+
return_numpy(bool): if convert the fetched tensor to numpy
375+
use_program_cache(bool): set use_program_cache to true if program not changed compare to the last step.
376+
377+
Returns:
378+
379+
list(numpy.array): fetch result according to fetch_list.
380+
381+
382+
Examples:
383+
384+
>>> data = layers.data(name='X', shape=[1], dtype='float32')
385+
>>> hidden = layers.fc(input=data, size=10)
386+
>>> layers.assign(hidden, out)
387+
>>> loss = layers.mean(out)
388+
>>> adam = fluid.optimizer.Adam()
389+
>>> adam.minimize(loss)
390+
391+
>>> cpu = core.CPUPlace()
392+
>>> exe = Executor(cpu)
393+
>>> exe.run(default_startup_program())
394+
395+
>>> x = numpy.random.random(size=(10, 1)).astype('float32')
396+
>>> outs = exe.run(
397+
>>> feed={'X': x},
398+
>>> fetch_list=[loss.name])
324399
"""
325400
if feed is None:
326401
feed = {}
@@ -341,7 +416,7 @@ def run(self,
341416
if scope is None:
342417
scope = global_scope()
343418

344-
cache_key = get_program_cache_key(feed, fetch_list)
419+
cache_key = _get_program_cache_key(feed, fetch_list)
345420
if use_program_cache:
346421
cached_program = self._get_program_cache(cache_key)
347422
if cached_program is None:

0 commit comments

Comments
 (0)