Skip to content

Commit 96f1edf

Browse files
authored
Merge pull request #11629 from reyoung/hotfix/more_api_reference_docs
Cherry Pick the documentation PRs.
2 parents 49080ac + 35eb011 commit 96f1edf

File tree

5 files changed

+480
-117
lines changed

5 files changed

+480
-117
lines changed

python/paddle/fluid/evaluator.py

Lines changed: 74 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,12 @@ def _clone_var_(block, var):
4141

4242
class Evaluator(object):
4343
"""
44-
Base Class for all evaluators
44+
Warning: better to use the fluid.metrics.* things, more
45+
flexible support via pure Python and Operator, and decoupled
46+
with executor. Short doc are intended to urge new user
47+
start from Metrics.
48+
49+
Base Class for all evaluators.
4550
4651
Args:
4752
name(str): The name of evaluator. such as, "accuracy". Used for generate
@@ -69,6 +74,10 @@ def __init__(self, name, **kwargs):
6974
def reset(self, executor, reset_program=None):
7075
"""
7176
reset metric states at the begin of each pass/user specified batch
77+
78+
Args:
79+
executor(Executor|ParallelExecutor): a executor for executing the reset_program
80+
reset_program(Program): a single Program for reset process
7281
"""
7382
if reset_program is None:
7483
reset_program = Program()
@@ -85,15 +94,16 @@ def reset(self, executor, reset_program=None):
8594
def eval(self, executor, eval_program=None):
8695
"""
8796
Evaluate the statistics merged by multiple mini-batches.
97+
Args:
98+
executor(Executor|ParallelExecutor): a executor for executing the eval_program
99+
eval_program(Program): a single Program for eval process
88100
"""
89101
raise NotImplementedError()
90102

91-
def create_state(self, suffix, dtype, shape):
103+
def _create_state(self, suffix, dtype, shape):
92104
"""
93105
Create state variable.
94106
95-
NOTE: It is not a public API.
96-
97107
Args:
98108
suffix(str): the state suffix.
99109
dtype(str|core.VarDesc.VarType): the state data type
@@ -113,9 +123,35 @@ def create_state(self, suffix, dtype, shape):
113123

114124
class ChunkEvaluator(Evaluator):
115125
"""
126+
Warning: This would be deprecated in the future. Please use fluid.metrics.ChunkEvaluator
127+
instead.
128+
116129
Accumulate counter numbers output by chunk_eval from mini-batches and
117130
compute the precision recall and F1-score using the accumulated counter
118131
numbers.
132+
For some basics of chunking, please refer to
133+
'Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>'.
134+
135+
Args:
136+
input (Variable): prediction output of the network.
137+
label (Variable): label of the test data set.
138+
chunk_scheme (str): can be IOB/IOE/IOBES and IO. See the chunk_eval op for details.
139+
num_chunk_types (int): the number of chunk type.
140+
excluded_chunk_types (list): A list including chunk type ids, indicating chunk types that are not counted.
141+
142+
Returns:
143+
tuple: tuple containing: precision, recall, f1_score
144+
145+
Examples:
146+
.. code-block:: python
147+
148+
exe = fluid.executor(place)
149+
evaluator = fluid.Evaluator.ChunkEvaluator(input, label)
150+
for epoch in PASS_NUM:
151+
evaluator.reset(exe)
152+
for data in batches:
153+
loss = exe.run(fetch_list=[cost])
154+
distance, instance_error = distance_evaluator.eval(exe)
119155
"""
120156

121157
def __init__(
@@ -130,11 +166,11 @@ def __init__(
130166
if main_program.current_block().idx != 0:
131167
raise ValueError("You can only invoke Evaluator in root block")
132168

133-
self.num_infer_chunks = self.create_state(
169+
self.num_infer_chunks = self._create_state(
134170
dtype='int64', shape=[1], suffix='num_infer_chunks')
135-
self.num_label_chunks = self.create_state(
171+
self.num_label_chunks = self._create_state(
136172
dtype='int64', shape=[1], suffix='num_label_chunks')
137-
self.num_correct_chunks = self.create_state(
173+
self.num_correct_chunks = self._create_state(
138174
dtype='int64', shape=[1], suffix='num_correct_chunks')
139175
precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval(
140176
input=input,
@@ -178,6 +214,8 @@ def eval(self, executor, eval_program=None):
178214

179215
class EditDistance(Evaluator):
180216
"""
217+
Warning: This would be deprecated in the future. Please use fluid.metrics.EditDistance
218+
instead.
181219
Accumulate edit distance sum and sequence number from mini-batches and
182220
compute the average edit_distance and instance error of all batches.
183221
@@ -188,15 +226,16 @@ class EditDistance(Evaluator):
188226
ignored_tokens(list of int): Tokens that should be removed before
189227
calculating edit distance.
190228
191-
Example:
229+
Examples:
230+
.. code-block:: python
192231
193-
exe = fluid.executor(place)
194-
distance_evaluator = fluid.Evaluator.EditDistance(input, label)
195-
for epoch in PASS_NUM:
196-
distance_evaluator.reset(exe)
197-
for data in batches:
198-
loss = exe.run(fetch_list=[cost])
199-
distance, instance_error = distance_evaluator.eval(exe)
232+
exe = fluid.executor(place)
233+
distance_evaluator = fluid.Evaluator.EditDistance(input, label)
234+
for epoch in PASS_NUM:
235+
distance_evaluator.reset(exe)
236+
for data in batches:
237+
loss = exe.run(fetch_list=[cost])
238+
distance, instance_error = distance_evaluator.eval(exe)
200239
201240
In the above example:
202241
'distance' is the average of the edit distance in a pass.
@@ -210,11 +249,11 @@ def __init__(self, input, label, ignored_tokens=None, **kwargs):
210249
if main_program.current_block().idx != 0:
211250
raise ValueError("You can only invoke Evaluator in root block")
212251

213-
self.total_distance = self.create_state(
252+
self.total_distance = self._create_state(
214253
dtype='float32', shape=[1], suffix='total_distance')
215-
self.seq_num = self.create_state(
254+
self.seq_num = self._create_state(
216255
dtype='int64', shape=[1], suffix='seq_num')
217-
self.instance_error = self.create_state(
256+
self.instance_error = self._create_state(
218257
dtype='int64', shape=[1], suffix='instance_error')
219258
distances, seq_num = layers.edit_distance(
220259
input=input, label=label, ignored_tokens=ignored_tokens)
@@ -256,9 +295,10 @@ def eval(self, executor, eval_program=None):
256295

257296
class DetectionMAP(Evaluator):
258297
"""
298+
Warning: This would be deprecated in the future. Please use fluid.metrics.DetectionMAP
299+
instead.
259300
Calculate the detection mean average precision (mAP).
260301
261-
TODO (Dang Qingqing): update the following doc.
262302
The general steps are as follows:
263303
1. calculate the true positive and false positive according to the input
264304
of detection and labels.
@@ -293,17 +333,18 @@ class DetectionMAP(Evaluator):
293333
- 11point: the 11-point interpolated average precision.
294334
- integral: the natural integral of the precision-recall curve.
295335
296-
Example:
336+
Examples:
337+
.. code-block:: python
297338
298-
exe = fluid.executor(place)
299-
map_evaluator = fluid.Evaluator.DetectionMAP(input,
300-
gt_label, gt_box, gt_difficult)
301-
cur_map, accum_map = map_evaluator.get_map_var()
302-
fetch = [cost, cur_map, accum_map]
303-
for epoch in PASS_NUM:
304-
map_evaluator.reset(exe)
305-
for data in batches:
306-
loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch)
339+
exe = fluid.executor(place)
340+
map_evaluator = fluid.Evaluator.DetectionMAP(input,
341+
gt_label, gt_box, gt_difficult)
342+
cur_map, accum_map = map_evaluator.get_map_var()
343+
fetch = [cost, cur_map, accum_map]
344+
for epoch in PASS_NUM:
345+
map_evaluator.reset(exe)
346+
for data in batches:
347+
loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch)
307348
308349
In the above example:
309350
@@ -340,9 +381,10 @@ def __init__(self,
340381
evaluate_difficult=evaluate_difficult,
341382
ap_version=ap_version)
342383

343-
self.create_state(dtype='int32', shape=None, suffix='accum_pos_count')
344-
self.create_state(dtype='float32', shape=None, suffix='accum_true_pos')
345-
self.create_state(dtype='float32', shape=None, suffix='accum_false_pos')
384+
self._create_state(dtype='int32', shape=None, suffix='accum_pos_count')
385+
self._create_state(dtype='float32', shape=None, suffix='accum_true_pos')
386+
self._create_state(
387+
dtype='float32', shape=None, suffix='accum_false_pos')
346388

347389
self.has_state = None
348390
var = self.helper.create_variable(

python/paddle/fluid/executor.py

Lines changed: 94 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from . import core
1919

2020
__all__ = [
21-
'Executor', 'global_scope', 'scope_guard', 'switch_scope', 'fetch_var'
21+
'Executor', 'global_scope', 'scope_guard', '_switch_scope', 'fetch_var'
2222
]
2323

2424
g_scope = core.Scope()
@@ -35,7 +35,7 @@ def global_scope():
3535
return g_scope
3636

3737

38-
def switch_scope(scope):
38+
def _switch_scope(scope):
3939
global g_scope
4040
ex = g_scope
4141
g_scope = scope
@@ -57,12 +57,27 @@ def scope_guard(scope):
5757
Args:
5858
scope: The new global/default scope.
5959
"""
60-
ex = switch_scope(scope)
60+
ex = _switch_scope(scope)
6161
yield
62-
switch_scope(ex)
62+
_switch_scope(ex)
6363

6464

6565
def as_numpy(tensor):
66+
"""
67+
Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information.
68+
For higher dimensional sequence data, please use LoDTensor directly.
69+
Examples:
70+
>>> import paddle.fluid as fluid
71+
>>> outs = executor.run(...)
72+
>>> np_outs = map(lambda x: as_numpy(x), outs)
73+
>>> ...
74+
75+
Args:
76+
tensor(Variable): a instance of Tensor
77+
78+
Returns:
79+
numpy.ndarray
80+
"""
6681
if isinstance(tensor, list):
6782
return [as_numpy(t) for t in tensor]
6883
assert isinstance(tensor, core.LoDTensor)
@@ -186,7 +201,7 @@ def fetch_var(name, scope=None, return_numpy=True):
186201
return tensor
187202

188203

189-
def get_program_cache_key(feed, fetch_list):
204+
def _get_program_cache_key(feed, fetch_list):
190205
feed_var_names = feed.keys()
191206

192207
def to_name_str(var):
@@ -205,6 +220,25 @@ def to_name_str(var):
205220

206221

207222
class Executor(object):
223+
"""
224+
An Executor in Python, only support the single-GPU running. For multi-cards, please refer to
225+
ParallelExecutor.
226+
Python executor takes a program, add feed operators and fetch operators to this program according
227+
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
228+
the variables(or names) that user want to get after program run. Note: the executor will run all
229+
operators in the program but not only the operators dependent by the fetch_list.
230+
It store the global variables into the global scope, and create a local scope for the temporary
231+
variables. The local scope contents will be discarded after every minibatch forward/backward finished.
232+
But the global scope variables will be persistent through different runs.
233+
All of ops in program will be running in sequence.
234+
235+
Args:
236+
place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device
237+
238+
Note: For debugging complicated network in parallel-GPUs, you can test it on the executor.
239+
They has the exactly same arguments, and expected the same results.
240+
"""
241+
208242
def __init__(self, place):
209243
self.place = place
210244
p = core.Place()
@@ -213,6 +247,23 @@ def __init__(self, place):
213247
self.program_caches = dict()
214248

215249
def as_lodtensor(self, data):
250+
"""
251+
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
252+
For higher dimensional sequence data, please use LoDTensor directly.
253+
254+
Examples:
255+
>>> import paddle.fluid as fluid
256+
>>> exe = fluid.executor(fluid.CPUPlace())
257+
>>> data = np.array(size=(100, 200, 300))
258+
>>> np_outs = map(lambda x: exe.as_lodtensor(x), data)
259+
>>> ...
260+
261+
Args:
262+
data(numpy.ndarray): a instance of array
263+
264+
Returns:
265+
LoDTensor
266+
"""
216267
if isinstance(data, list):
217268
raise RuntimeError("Some of your feed data hold LoD information. \
218269
They can not be completely cast from a list of Python \
@@ -304,23 +355,47 @@ def run(self,
304355
scope=None,
305356
return_numpy=True,
306357
use_program_cache=False):
307-
""" Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
308-
358+
"""
359+
Run program by this Executor. Feed data by feed map, fetch result by fetch_list.
309360
Python executor takes a program, add feed operators and fetch operators to this program according
310361
to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides
311-
the variables(or names) that user want to get after program run. Note: the executor will run all
362+
the variables(or names) that user want to get after program run.
363+
364+
Note: the executor will run all
312365
operators in the program but not only the operators dependent by the fetch_list
313366
314-
:param program: the program that need to run, if not provied, then default_main_program will be used.
315-
:param feed: feed variable map, e.g. {"image": ImageData, "label": LableData}
316-
:param fetch_list: a list of variable or variable names that user want to get, run will return them according
317-
to this list.
318-
:param feed_var_name: the name for the input variable of feed Operator.
319-
:param fetch_var_name: the name for the output variable of feed Operator.
320-
:param scope: the scope used to run this program, you can switch it to different scope. default is global_scope
321-
:param return_numpy: if convert the fetched tensor to numpy
322-
:param use_program_cache: set use_program_cache to true if program not changed compare to the last step.
323-
:return: result according to fetch_list.
367+
Args:
368+
program(Program): the program that need to run, if not provied, then default_main_program will be used.
369+
feed(dict): feed variable map, e.g. {"image": ImageData, "label": LableData}
370+
fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list.
371+
feed_var_name(str): the name for the input variable of feed Operator.
372+
fetch_var_name(str): the name for the output variable of fetch Operator.
373+
scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope
374+
return_numpy(bool): if convert the fetched tensor to numpy
375+
use_program_cache(bool): set use_program_cache to true if program not changed compare to the last step.
376+
377+
Returns:
378+
379+
list(numpy.array): fetch result according to fetch_list.
380+
381+
382+
Examples:
383+
384+
>>> data = layers.data(name='X', shape=[1], dtype='float32')
385+
>>> hidden = layers.fc(input=data, size=10)
386+
>>> layers.assign(hidden, out)
387+
>>> loss = layers.mean(out)
388+
>>> adam = fluid.optimizer.Adam()
389+
>>> adam.minimize(loss)
390+
391+
>>> cpu = core.CPUPlace()
392+
>>> exe = Executor(cpu)
393+
>>> exe.run(default_startup_program())
394+
395+
>>> x = numpy.random.random(size=(10, 1)).astype('float32')
396+
>>> outs = exe.run(
397+
>>> feed={'X': x},
398+
>>> fetch_list=[loss.name])
324399
"""
325400
if feed is None:
326401
feed = {}
@@ -341,7 +416,7 @@ def run(self,
341416
if scope is None:
342417
scope = global_scope()
343418

344-
cache_key = get_program_cache_key(feed, fetch_list)
419+
cache_key = _get_program_cache_key(feed, fetch_list)
345420
if use_program_cache:
346421
cached_program = self._get_program_cache(cache_key)
347422
if cached_program is None:

python/paddle/fluid/layers/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@
2828
from math_op_patch import *
2929
import detection
3030
from detection import *
31-
import metric
32-
from metric import *
31+
import metric_op
32+
from metric_op import *
3333
from learning_rate_scheduler import *
3434

3535
__all__ = []
@@ -41,5 +41,5 @@
4141
__all__ += ops.__all__
4242
__all__ += device.__all__
4343
__all__ += detection.__all__
44-
__all__ += metric.__all__
44+
__all__ += metric_op.__all__
4545
__all__ += learning_rate_scheduler.__all__

0 commit comments

Comments
 (0)