Skip to content

Commit 10cee7e

Browse files
committed
Add doc of fetch var
1 parent 74d1bf4 commit 10cee7e

File tree

2 files changed

+24
-21
lines changed

2 files changed

+24
-21
lines changed

python/paddle/fluid/data_feeder.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -71,25 +71,21 @@ def done(self):
7171

7272
class DataFeeder(object):
7373
"""
74-
DataFeeder converts the data that returned by paddle.reader into a
75-
data structure of Arguments which is defined in the API. The paddle.reader
74+
DataFeeder converts the data that returned by a reader into a data
75+
structure that can feed into Executor and ParallelExecutor. The reader
7676
usually returns a list of mini-batch data entries. Each data entry in
77-
the list is one sample. Each sample is a list or a tuple with one feature
78-
or multiple features. DataFeeder converts this mini-batch data entries
79-
into Arguments in order to feed it to C++ interface.
77+
the list is one sample. Each sample is a list or a tuple with one
78+
feature or multiple features.
8079
8180
The simple usage shows below:
8281
8382
.. code-block:: python
8483
8584
place = fluid.CPUPlace()
86-
data = fluid.layers.data(
87-
name='data', shape=[1], dtype='int64', lod_level=2)
85+
img = fluid.layers.data(name='image', shape=[1, 28, 28])
8886
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
89-
feeder = fluid.DataFeeder([data, label], place)
90-
91-
result = feeder.feed(
92-
[([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])])
87+
feeder = fluid.DataFeeder([img, label], fluid.CPUPlace())
88+
result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])])
9389
9490
9591
If you want to feed data into GPU side separately in advance when you
@@ -105,12 +101,15 @@ class DataFeeder(object):
105101
Args:
106102
feed_list(list): The Variables or Variables'name that will
107103
feed into model.
108-
place(Place): fluid.CPUPlace() or fluid.CUDAPlace(i).
104+
place(Place): place indicates feed data into CPU or GPU, if you want to
105+
feed data into GPU, please using `fluid.CUDAPlace(i)` (`i` represents
106+
the GPU id), or if you want to feed data into CPU, please using
107+
`fluid.CPUPlace()`.
109108
program(Program): The Program that will feed data into, if program
110109
is None, it will use default_main_program(). Default None.
111110
112111
Raises:
113-
ValueError: If the some Variable is not in the Program.
112+
ValueError: If some Variable is not in this Program.
114113
115114
Examples:
116115
.. code-block:: python
@@ -119,7 +118,7 @@ class DataFeeder(object):
119118
place = fluid.CPUPlace()
120119
feed_list = [
121120
main_program.global_block().var(var_name) for var_name in feed_vars_name
122-
]
121+
] # feed_vars_name is a list of variables' name.
123122
feeder = fluid.DataFeeder(feed_list, place)
124123
for data in reader():
125124
outs = exe.run(program=main_program,
@@ -156,8 +155,8 @@ def __init__(self, feed_list, place, program=None):
156155

157156
def feed(self, iterable):
158157
"""
159-
According to feed_list and iterable converter the input data
160-
into a dictionary that can feed into Executor or ParallelExecutor.
158+
According to feed_list and iterable, converters the input into
159+
a data structure that can feed into Executor and ParallelExecutor.
161160
162161
Args:
163162
iterable(list|tuple): the input data.
@@ -189,11 +188,11 @@ def feed(self, iterable):
189188
def feed_parallel(self, iterable, num_places=None):
190189
"""
191190
Takes multiple mini-batches. Each mini-batch will be feed on each
192-
device.
191+
device in advance.
193192
194193
Args:
195194
iterable(list|tuple): the input data.
196-
num_places(int): the number of places. Default None.
195+
num_places(int): the number of devices. Default None.
197196
198197
Returns:
199198
dict: the result of conversion.

python/paddle/fluid/executor.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,14 +135,18 @@ def has_fetch_operators(block, fetch_targets, fetch_holder_name):
135135

136136
def fetch_var(name, scope=None, return_numpy=True):
137137
"""
138-
Fetch the value of the variable with the given name from the given scope
138+
Fetch the value of the variable with the given name from the
139+
given scope.
140+
139141
Args:
140142
name(str): name of the variable. Typically, only persistable variables
141143
can be found in the scope used for running the program.
142144
scope(core.Scope|None): scope object. It should be the scope where
143145
you pass to Executor.run() when running your program.
144-
If None, global_scope() will be used.
145-
return_numpy(bool): whether convert the tensor to numpy.ndarray
146+
If None, global_scope() will be used. Default None.
147+
return_numpy(bool): whether convert the tensor to numpy.ndarray.
148+
Default True.
149+
146150
Returns:
147151
LodTensor|numpy.ndarray
148152
"""

0 commit comments

Comments
 (0)