Skip to content

Commit 8b77448

Browse files
authored
hide misc APIs (#12540)
* hide misc APIs * update * fix transformer test * update API.spec
1 parent d3da0ef commit 8b77448

File tree

15 files changed

+56
-100
lines changed

15 files changed

+56
-100
lines changed

doc/fluid/api/executor.rst

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,3 @@ _switch_scope
3838
.. autofunction:: paddle.fluid.executor._switch_scope
3939
:noindex:
4040

41-
.. _api_fluid_executor_fetch_var:
42-
43-
fetch_var
44-
---------
45-
46-
.. autofunction:: paddle.fluid.executor.fetch_var
47-
:noindex:
48-

doc/fluid/api/fluid.rst

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -106,22 +106,6 @@ _switch_scope
106106
.. autofunction:: paddle.fluid._switch_scope
107107
:noindex:
108108

109-
.. _api_fluid_fetch_var:
110-
111-
fetch_var
112-
---------
113-
114-
.. autofunction:: paddle.fluid.fetch_var
115-
:noindex:
116-
117-
.. _api_fluid_Go:
118-
119-
Go
120-
--
121-
122-
.. autoclass:: paddle.fluid.Go
123-
:members:
124-
:noindex:
125109

126110
.. _api_fluid_make_channel:
127111

paddle/fluid/API.spec

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -34,21 +34,10 @@ paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None,
3434
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
3535
paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,))
3636
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
37-
paddle.fluid.Executor.as_lodtensor ArgSpec(args=['self', 'data'], varargs=None, keywords=None, defaults=None)
3837
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
3938
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
4039
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
4140
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
42-
paddle.fluid.fetch_var ArgSpec(args=['name', 'scope', 'return_numpy'], varargs=None, keywords=None, defaults=(None, True))
43-
paddle.fluid.Go.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
44-
paddle.fluid.Go.construct_go_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
45-
paddle.fluid.make_channel ArgSpec(args=['dtype', 'capacity'], varargs=None, keywords=None, defaults=(0,))
46-
paddle.fluid.channel_send ArgSpec(args=['channel', 'value', 'is_copy'], varargs=None, keywords=None, defaults=(False,))
47-
paddle.fluid.channel_recv ArgSpec(args=['channel', 'return_value'], varargs=None, keywords=None, defaults=None)
48-
paddle.fluid.channel_close ArgSpec(args=['channel'], varargs=None, keywords=None, defaults=None)
49-
paddle.fluid.Select.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
50-
paddle.fluid.Select.case ArgSpec(args=['self', 'channel_action_fn', 'channel', 'value', 'is_copy'], varargs=None, keywords=None, defaults=(False,))
51-
paddle.fluid.Select.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
5241
paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None))
5342
paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None)
5443
paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
@@ -62,20 +51,16 @@ paddle.fluid.CheckpointConfig.__init__ ArgSpec(args=['self', 'checkpoint_dir', '
6251
paddle.fluid.Inferencer.__init__ ArgSpec(args=['self', 'infer_func', 'param_path', 'place', 'parallel'], varargs=None, keywords=None, defaults=(None, False))
6352
paddle.fluid.Inferencer.infer ArgSpec(args=['self', 'inputs', 'return_numpy'], varargs=None, keywords=None, defaults=(True,))
6453
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
65-
paddle.fluid.DistributeTranspiler.create_splited_vars ArgSpec(args=['self', 'source_var', 'block', 'tag'], varargs=None, keywords=None, defaults=None)
6654
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
6755
paddle.fluid.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program'], varargs=None, keywords=None, defaults=None)
6856
paddle.fluid.DistributeTranspiler.get_trainer_program ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
6957
paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True))
7058
paddle.fluid.InferenceTranspiler.__init__
71-
paddle.fluid.InferenceTranspiler.fuse_batch_norm ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=None)
72-
paddle.fluid.InferenceTranspiler.fuse_relu_mkldnn ArgSpec(args=['self', 'program'], varargs=None, keywords=None, defaults=None)
7359
paddle.fluid.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,))
7460
paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
7561
paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
7662
paddle.fluid.DistributeTranspilerConfig.__init__
7763
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id'], varargs=None, keywords='kwargs', defaults=(None, None, None, None, None, 1, 0))
78-
paddle.fluid.ParallelExecutor.bcast_params ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
7964
paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True))
8065
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None
8166
paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.GradientScaleStrategy, arg0: int) -> None
@@ -338,14 +323,11 @@ paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init',
338323
paddle.fluid.contrib.BeamSearchDecoder.update_array ArgSpec(args=['self', 'array', 'value'], varargs=None, keywords=None, defaults=None)
339324
paddle.fluid.contrib.memory_usage ArgSpec(args=['program', 'batch_size'], varargs=None, keywords=None, defaults=None)
340325
paddle.fluid.transpiler.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
341-
paddle.fluid.transpiler.DistributeTranspiler.create_splited_vars ArgSpec(args=['self', 'source_var', 'block', 'tag'], varargs=None, keywords=None, defaults=None)
342326
paddle.fluid.transpiler.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
343327
paddle.fluid.transpiler.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program'], varargs=None, keywords=None, defaults=None)
344328
paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
345329
paddle.fluid.transpiler.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True))
346330
paddle.fluid.transpiler.InferenceTranspiler.__init__
347-
paddle.fluid.transpiler.InferenceTranspiler.fuse_batch_norm ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=None)
348-
paddle.fluid.transpiler.InferenceTranspiler.fuse_relu_mkldnn ArgSpec(args=['self', 'program'], varargs=None, keywords=None, defaults=None)
349331
paddle.fluid.transpiler.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,))
350332
paddle.fluid.transpiler.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0))
351333
paddle.fluid.transpiler.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))

paddle/fluid/inference/analysis/analyzer.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,9 +48,9 @@ class DfgPassManagerImpl final : public DfgPassManager {
4848
if (!node->IsFunction()) return false;
4949

5050
const auto* func = static_cast<const Function*>(node);
51-
if (teller_set.count(func->func_type()))
51+
if (teller_set.count(func->func_type())) {
5252
return true;
53-
else {
53+
} else {
5454
return false;
5555
}
5656
};

paddle/fluid/inference/api/paddle_inference_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ class PaddleBuf {
4545
PaddleBuf(void* data, size_t length)
4646
: data_(data), length_(length), memory_owned_{false} {}
4747
// Own memory.
48-
PaddleBuf(size_t length)
48+
explicit PaddleBuf(size_t length)
4949
: data_(new char[length]), length_(length), memory_owned_(true) {}
5050
// Resize to `length` bytes.
5151
void Resize(size_t length);

paddle/fluid/pybind/pybind.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -664,7 +664,7 @@ All parameter, weight, gradient are variables in Paddle.
664664
const std::string &, Scope *, std::vector<Scope *> &,
665665
const ExecutionStrategy &, const BuildStrategy &, size_t,
666666
size_t>())
667-
.def("bcast_params", &ParallelExecutor::BCastParamsToDevices)
667+
.def("_bcast_params", &ParallelExecutor::BCastParamsToDevices)
668668
// NOTE: even we return a vec<Scope*>* to Python use reference policy.
669669
// We still cannot get local_scope from this vector, since the element
670670
// of vec<Scope*> will be freed by Python GC. We can only return Scope*

python/paddle/fluid/__init__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,6 @@
4848
from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope
4949
from .transpiler import DistributeTranspiler, InferenceTranspiler, \
5050
memory_optimize, release_memory, DistributeTranspilerConfig
51-
from .concurrency import (Go, make_channel, channel_send, channel_recv,
52-
channel_close, Select)
5351
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
5452
from . import clip
5553
from . import profiler
@@ -61,7 +59,7 @@
6159

6260
Tensor = LoDTensor
6361

64-
__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + \
62+
__all__ = framework.__all__ + executor.__all__ + \
6563
trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \
6664
parallel_executor.__all__ + lod_tensor.__all__ + [
6765
'io',

python/paddle/fluid/concurrency.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,7 @@
1919
from . import core
2020

2121
__all__ = [
22-
'Go', 'make_channel', 'channel_send', 'channel_recv', 'channel_close',
23-
'Select'
22+
'make_channel', 'channel_send', 'channel_recv', 'channel_close', 'Select'
2423
]
2524

2625

@@ -35,10 +34,10 @@ def __enter__(self):
3534
def __exit__(self, exc_type, exc_val, exc_tb):
3635
if exc_type is not None:
3736
return False
38-
self.construct_go_op()
37+
self._construct_go_op()
3938
return super(Go, self).__exit__(exc_type, exc_val, exc_tb)
4039

41-
def construct_go_op(self):
40+
def _construct_go_op(self):
4241
main_program = self.helper.main_program
4342
go_block = main_program.current_block()
4443
parent_block = main_program.block(main_program.current_block()

python/paddle/fluid/executor.py

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,7 @@
1818
from .framework import Program, default_main_program, Variable
1919
from . import core
2020

21-
__all__ = [
22-
'Executor', 'global_scope', 'scope_guard', '_switch_scope', 'fetch_var'
23-
]
21+
__all__ = ['Executor', 'global_scope', 'scope_guard', '_switch_scope']
2422

2523
g_scope = core.Scope()
2624

@@ -171,7 +169,7 @@ def has_fetch_operators(block, fetch_targets, fetch_holder_name):
171169
return fetch_count > 0
172170

173171

174-
def fetch_var(name, scope=None, return_numpy=True):
172+
def _fetch_var(name, scope=None, return_numpy=True):
175173
"""
176174
Fetch the value of the variable with the given name from the
177175
given scope.
@@ -222,6 +220,37 @@ def to_name_str(var):
222220
return str(feed_var_names + fetch_var_names)
223221

224222

223+
def _as_lodtensor(data, place):
224+
"""
225+
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
226+
For higher dimensional sequence data, please use LoDTensor directly.
227+
228+
Examples:
229+
>>> import paddle.fluid as fluid
230+
>>> place = fluid.CPUPlace()
231+
>>> exe = fluid.executor(place)
232+
>>> data = np.array(size=(100, 200, 300))
233+
>>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data)
234+
>>> ...
235+
236+
Args:
237+
data(numpy.ndarray): a instance of array
238+
239+
Returns:
240+
LoDTensor
241+
"""
242+
if isinstance(data, list):
243+
raise RuntimeError("Some of your feed data hold LoD information. \
244+
They can not be completely cast from a list of Python \
245+
ndarray to LoDTensor. Please convert data to LoDTensor \
246+
directly before feeding the data.\
247+
")
248+
# single tensor case
249+
tensor = core.LoDTensor()
250+
tensor.set(data, place)
251+
return tensor
252+
253+
225254
class Executor(object):
226255
"""
227256
An Executor in Python, only support the single-GPU running. For multi-cards, please refer to
@@ -250,35 +279,6 @@ def __init__(self, place):
250279
self.program_caches = dict()
251280
self._closed = False
252281

253-
def as_lodtensor(self, data):
254-
"""
255-
Convert numpy.ndarray to Tensor, its only support Tensor without LoD information.
256-
For higher dimensional sequence data, please use LoDTensor directly.
257-
258-
Examples:
259-
>>> import paddle.fluid as fluid
260-
>>> exe = fluid.executor(fluid.CPUPlace())
261-
>>> data = np.array(size=(100, 200, 300))
262-
>>> np_outs = map(lambda x: exe.as_lodtensor(x), data)
263-
>>> ...
264-
265-
Args:
266-
data(numpy.ndarray): a instance of array
267-
268-
Returns:
269-
LoDTensor
270-
"""
271-
if isinstance(data, list):
272-
raise RuntimeError("Some of your feed data hold LoD information. \
273-
They can not be completely cast from a list of Python \
274-
ndarray to LoDTensor. Please convert data to LoDTensor \
275-
directly before feeding the data.\
276-
")
277-
# single tensor case
278-
tensor = core.LoDTensor()
279-
tensor.set(data, self.place)
280-
return tensor
281-
282282
def _get_program_cache(self, program_cache_key):
283283
return self.program_caches.get(program_cache_key, None)
284284

@@ -337,7 +337,7 @@ def _feed_data(self, program, feed, feed_var_name, scope):
337337
feed_target_name = op.desc.output('Out')[0]
338338
cur_feed = feed[feed_target_name]
339339
if not isinstance(cur_feed, core.LoDTensor):
340-
cur_feed = self.as_lodtensor(cur_feed)
340+
cur_feed = _as_lodtensor(cur_feed, self.place)
341341
idx = op.desc.attr('col')
342342
core.set_feed_variable(scope, cur_feed, feed_var_name, idx)
343343
else:

python/paddle/fluid/parallel_executor.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -273,19 +273,19 @@ def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
273273
arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array()
274274

275275
if self.is_dist:
276-
self.bcast_params()
276+
self._bcast_params()
277277

278278
if return_numpy:
279279
return executor.as_numpy(arr)
280280

281281
return [arr[i] for i in range(len(arr))]
282282

283-
def bcast_params(self):
283+
def _bcast_params(self):
284284
"""
285285
Broadcast the parameters to other devices. It is used during
286286
distributed training.
287287
"""
288-
self.executor.bcast_params(set(self.persistable_vars))
288+
self.executor._bcast_params(set(self.persistable_vars))
289289

290290
@property
291291
def device_count(self):

0 commit comments

Comments
 (0)