18
18
from . import core
19
19
20
20
__all__ = [
21
- 'Executor' , 'global_scope' , 'scope_guard' , 'switch_scope ' , 'fetch_var'
21
+ 'Executor' , 'global_scope' , 'scope_guard' , '_switch_scope ' , 'fetch_var'
22
22
]
23
23
24
24
g_scope = core .Scope ()
@@ -35,7 +35,7 @@ def global_scope():
35
35
return g_scope
36
36
37
37
38
- def switch_scope (scope ):
38
+ def _switch_scope (scope ):
39
39
global g_scope
40
40
ex = g_scope
41
41
g_scope = scope
@@ -57,12 +57,27 @@ def scope_guard(scope):
57
57
Args:
58
58
scope: The new global/default scope.
59
59
"""
60
- ex = switch_scope (scope )
60
+ ex = _switch_scope (scope )
61
61
yield
62
- switch_scope (ex )
62
+ _switch_scope (ex )
63
63
64
64
65
65
def as_numpy (tensor ):
66
+ """
67
+ Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
68
+ For higher dimensional sequence data, please use LoDTensor directly.
69
+ Examples:
70
+ >>> import paddle.fluid as fluid
71
+ >>> outs = executor.run(...)
72
+ >>> np_outs = map(lambda x: as_numpy(x), outs)
73
+ >>> ...
74
+
75
+ Args:
76
+ tensor(Variable): a instance of Tensor
77
+
78
+ Returns:
79
+ numpy.ndarray
80
+ """
66
81
if isinstance (tensor , list ):
67
82
return [as_numpy (t ) for t in tensor ]
68
83
assert isinstance (tensor , core .LoDTensor )
@@ -186,7 +201,7 @@ def fetch_var(name, scope=None, return_numpy=True):
186
201
return tensor
187
202
188
203
189
- def get_program_cache_key (feed , fetch_list ):
204
+ def _get_program_cache_key (feed , fetch_list ):
190
205
feed_var_names = feed .keys ()
191
206
192
207
def to_name_str (var ):
@@ -205,6 +220,25 @@ def to_name_str(var):
205
220
206
221
207
222
class Executor (object ):
223
+ """
224
+ An Executor in Python, only support the single-GPU running. For multi-cards, please refer to
225
+ ParallelExecutor.
226
+ Python executor takes a program, add feed operators and fetch operators to this program according
227
+ to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
228
+ the variables(or names) that user want to get after program run. Note: the executor will run all
229
+ operators in the program but not only the operators dependent by the fetch_list.
230
+ It store the global variables into the global scope, and create a local scope for the temporary
231
+ variables. The local scope contents will be discarded after every minibatch forward/backward finished.
232
+ But the global scope variables will be persistent through different runs.
233
+ All of ops in program will be running in sequence.
234
+
235
+ Args:
236
+ place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device
237
+
238
+ Note: For debugging complicated network in parallel-GPUs, you can test it on the executor.
239
+ They has the exactly same arguments, and expected the same results.
240
+ """
241
+
208
242
def __init__ (self , place ):
209
243
self .place = place
210
244
p = core .Place ()
@@ -213,6 +247,23 @@ def __init__(self, place):
213
247
self .program_caches = dict ()
214
248
215
249
def as_lodtensor (self , data ):
250
+ """
251
+ Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
252
+ For higher dimensional sequence data, please use LoDTensor directly.
253
+
254
+ Examples:
255
+ >>> import paddle.fluid as fluid
256
+ >>> exe = fluid.executor(fluid.CPUPlace())
257
+ >>> data = np.array(size=(100, 200, 300))
258
+ >>> np_outs = map(lambda x: exe.as_lodtensor(x), data)
259
+ >>> ...
260
+
261
+ Args:
262
+ data(numpy.ndarray): a instance of array
263
+
264
+ Returns:
265
+ LoDTensor
266
+ """
216
267
if isinstance (data , list ):
217
268
raise RuntimeError ("Some of your feed data hold LoD information. \
218
269
They can not be completely cast from a list of Python \
@@ -304,23 +355,47 @@ def run(self,
304
355
scope = None ,
305
356
return_numpy = True ,
306
357
use_program_cache = False ):
307
- """ Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
308
-
358
+ """
359
+ Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
309
360
Python executor takes a program, add feed operators and fetch operators to this program according
310
361
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
311
- the variables(or names) that user want to get after program run. Note: the executor will run all
362
+ the variables(or names) that user want to get after program run.
363
+
364
+ Note: the executor will run all
312
365
operators in the program but not only the operators dependent by the fetch_list
313
366
314
- :param program: the program that need to run, if not provied, then default_main_program will be used.
315
- :param feed: feed variable map, e.g. {"image": ImageData, "label": LableData}
316
- :param fetch_list: a list of variable or variable names that user want to get, run will return them according
317
- to this list.
318
- :param feed_var_name: the name for the input variable of feed Operator.
319
- :param fetch_var_name: the name for the output variable of feed Operator.
320
- :param scope: the scope used to run this program, you can switch it to different scope. default is global_scope
321
- :param return_numpy: if convert the fetched tensor to numpy
322
- :param use_program_cache: set use_program_cache to true if program not changed compare to the last step.
323
- :return: result according to fetch_list.
367
+ Args:
368
+ program(Program): the program that need to run, if not provied, then default_main_program will be used.
369
+ feed(dict): feed variable map, e.g. {"image": ImageData, "label": LableData}
370
+ fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list.
371
+ feed_var_name(str): the name for the input variable of feed Operator.
372
+ fetch_var_name(str): the name for the output variable of fetch Operator.
373
+ scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope
374
+ return_numpy(bool): if convert the fetched tensor to numpy
375
+ use_program_cache(bool): set use_program_cache to true if program not changed compare to the last step.
376
+
377
+ Returns:
378
+
379
+ list(numpy.array): fetch result according to fetch_list.
380
+
381
+
382
+ Examples:
383
+
384
+ >>> data = layers.data(name='X', shape=[1], dtype='float32')
385
+ >>> hidden = layers.fc(input=data, size=10)
386
+ >>> layers.assign(hidden, out)
387
+ >>> loss = layers.mean(out)
388
+ >>> adam = fluid.optimizer.Adam()
389
+ >>> adam.minimize(loss)
390
+
391
+ >>> cpu = core.CPUPlace()
392
+ >>> exe = Executor(cpu)
393
+ >>> exe.run(default_startup_program())
394
+
395
+ >>> x = numpy.random.random(size=(10, 1)).astype('float32')
396
+ >>> outs = exe.run(
397
+ >>> feed={'X': x},
398
+ >>> fetch_list=[loss.name])
324
399
"""
325
400
if feed is None :
326
401
feed = {}
@@ -341,7 +416,7 @@ def run(self,
341
416
if scope is None :
342
417
scope = global_scope ()
343
418
344
- cache_key = get_program_cache_key (feed , fetch_list )
419
+ cache_key = _get_program_cache_key (feed , fetch_list )
345
420
if use_program_cache :
346
421
cached_program = self ._get_program_cache (cache_key )
347
422
if cached_program is None :
0 commit comments