Skip to content

Commit 1c9c8e8

Browse files
authored
Merge pull request #12065 from reyoung/feature/hide_apis
Add deprecated annotation and hide a lot of APIs
2 parents e568acb + ff07af8 commit 1c9c8e8

File tree

13 files changed

+90
-48
lines changed

13 files changed

+90
-48
lines changed

python/paddle/fluid/annotations.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import functools
16+
import sys
17+
18+
__all__ = ['deprecated']
19+
20+
21+
def deprecated(since, instead, extra_message=""):
22+
def decorator(func):
23+
err_msg = "API {0} is deprecated since {1}. Please use {2} instead.".format(
24+
func.__name__, since, instead)
25+
if len(extra_message) != 0:
26+
err_msg += "\n"
27+
err_msg += extra_message
28+
29+
@functools.wraps(func)
30+
def wrapper(*args, **kwargs):
31+
print >> sys.stderr, err_msg
32+
return func(*args, **kwargs)
33+
34+
wrapper.__doc__ += "\n "
35+
wrapper.__doc__ += err_msg
36+
return wrapper
37+
38+
return decorator

python/paddle/fluid/backward.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,7 @@
1818
import copy
1919
import unique_name
2020

21-
__all__ = [
22-
'append_backward',
23-
'calc_gradient',
24-
]
21+
__all__ = ['append_backward']
2522

2623

2724
def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None):

python/paddle/fluid/layers/device.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,12 @@
1818
from layer_function_generator import autodoc
1919
from ..framework import unique_name
2020
from ..layer_helper import LayerHelper
21+
from ..annotations import deprecated
2122

22-
__all__ = ['get_places']
23+
__all__ = []
2324

2425

26+
@deprecated(since='0.15.0', instead="ParallelExecutor")
2527
@autodoc()
2628
def get_places(device_count=None, device_type=None):
2729
helper = LayerHelper('get_places', **locals())

python/paddle/fluid/optimizer.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl',
3030
'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer',
3131
'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'RMSPropOptimizer',
32-
'FtrlOptimizer', 'Adadelta', 'ModelAverage', 'Optimizer', 'RMSPropOptimizer'
32+
'FtrlOptimizer', 'Adadelta', 'ModelAverage', 'RMSPropOptimizer'
3333
]
3434

3535

@@ -67,7 +67,7 @@ def __init__(self,
6767
self._LARS_weight_decay = LARS_weight_decay
6868

6969
def _create_global_learning_rate(self):
70-
lr = self.global_learning_rate()
70+
lr = self._global_learning_rate()
7171

7272
if isinstance(lr, framework.Variable):
7373
return
@@ -86,7 +86,7 @@ def _create_global_learning_rate(self):
8686
dtype='float32' if self._dtype == None else self._dtype,
8787
persistable=True)
8888

89-
def global_learning_rate(self, program=None):
89+
def _global_learning_rate(self, program=None):
9090
"""
9191
get global decayed learning rate
9292
:return:
@@ -110,9 +110,9 @@ def _create_param_lr(self, param_and_grad):
110110
return param_lr
111111
else:
112112
if param_lr == 1.0:
113-
return self.global_learning_rate()
113+
return self._global_learning_rate()
114114
else:
115-
return self.global_learning_rate() * param_lr
115+
return self._global_learning_rate() * param_lr
116116

117117
def _create_accumulators(self, block, parameters):
118118
"""Create all accumulators needed by the parameters
@@ -185,10 +185,10 @@ def _get_accumulator(self, name, param):
185185
format(name, param.name))
186186
return self._accumulators[name][param.name]
187187

188-
def create_optimization_pass(self,
189-
parameters_and_grads,
190-
loss,
191-
startup_program=None):
188+
def _create_optimization_pass(self,
189+
parameters_and_grads,
190+
loss,
191+
startup_program=None):
192192
"""Add optimization operators to update gradients to variables.
193193
194194
Args:
@@ -221,7 +221,7 @@ def create_optimization_pass(self,
221221
self._create_global_learning_rate()
222222
if self._LARS_weight_decay > 0.0:
223223
layers.append_LARS(parameters_and_grads,
224-
self.global_learning_rate(),
224+
self._global_learning_rate(),
225225
self._LARS_weight_decay)
226226

227227
optimize_ops = []
@@ -262,8 +262,8 @@ def minimize(self,
262262
params_grads = append_regularization_ops(params_grads,
263263
self.regularization)
264264

265-
optimize_ops = self.create_optimization_pass(params_grads, loss,
266-
startup_program)
265+
optimize_ops = self._create_optimization_pass(params_grads, loss,
266+
startup_program)
267267
return optimize_ops, params_grads
268268

269269

python/paddle/fluid/tests/book/notest_understand_sentiment.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
from __future__ import print_function
15-
15+
from paddle.fluid.layers.device import get_places
1616
import unittest
1717
import paddle.fluid as fluid
1818
import paddle
@@ -144,7 +144,7 @@ def train(word_dict,
144144
cost, acc_out, prediction = net_method(
145145
data, label, input_dim=dict_dim, class_dim=class_dim)
146146
else:
147-
places = fluid.layers.get_places()
147+
places = get_places()
148148
pd = fluid.layers.ParallelDo(places)
149149
with pd.do():
150150
cost, acc, _ = net_method(

python/paddle/fluid/tests/book/test_recognize_digits.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,17 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
from __future__ import print_function
15-
import argparse
16-
import paddle.fluid as fluid
17-
import paddle
18-
import sys
19-
import numpy
20-
import unittest
15+
2116
import math
22-
import sys
2317
import os
18+
import sys
19+
import unittest
20+
21+
import numpy
22+
23+
import paddle
24+
import paddle.fluid as fluid
25+
from paddle.fluid.layers.device import get_places
2426

2527
BATCH_SIZE = 64
2628

@@ -76,7 +78,7 @@ def train(nn_type,
7678
net_conf = conv_net
7779

7880
if parallel:
79-
places = fluid.layers.get_places()
81+
places = get_places()
8082
pd = fluid.layers.ParallelDo(places)
8183
with pd.do():
8284
img_ = pd.read_input(img)

python/paddle/fluid/tests/book/test_word2vec.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import paddle
1616
import paddle.fluid as fluid
17+
from paddle.fluid.layers.device import get_places
1718
import unittest
1819
import os
1920
import numpy as np
@@ -80,7 +81,7 @@ def __network__(words):
8081
avg_cost, predict_word = __network__(
8182
[first_word, second_word, third_word, forth_word, next_word])
8283
else:
83-
places = fluid.layers.get_places()
84+
places = get_places()
8485
pd = fluid.layers.ParallelDo(places)
8586
with pd.do():
8687
avg_cost, predict_word = __network__(

python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,13 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import numpy as np
16-
import paddle
17-
import paddle.fluid as fluid
1815
import math
1916
import sys
2017

18+
import paddle
19+
import paddle.fluid as fluid
20+
from paddle.fluid.layers.device import get_places
21+
2122
# need to fix random seed and training data to compare the loss
2223
# value accurately calculated by the default and the memory optimization
2324
# version.
@@ -34,7 +35,7 @@
3435
use_nccl = False
3536
place = fluid.CUDAPlace(0)
3637

37-
places = fluid.layers.get_places(device_count=0, device_type=device_type)
38+
places = get_places(device_count=0, device_type=device_type)
3839
pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl)
3940
with pd.do():
4041
x_ = pd.read_input(x)

python/paddle/fluid/tests/unittests/test_calc_gradient.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@
1616

1717
import paddle.fluid as fluid
1818
import paddle.fluid.layers as layers
19-
import paddle.fluid.framework as framework
20-
import paddle.fluid.optimizer as optimizer
2119
from paddle.fluid.backward import calc_gradient
2220

2321

python/paddle/fluid/tests/unittests/test_get_places_op.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,15 @@
1313
# limitations under the License.
1414

1515
import paddle.fluid as fluid
16+
from paddle.fluid.layers.device import get_places
1617
import decorators
1718
import unittest
1819

1920

2021
class TestGetPlaces(unittest.TestCase):
2122
@decorators.prog_scope()
2223
def test_get_places(self):
23-
places = fluid.layers.get_places()
24+
places = get_places()
2425
cpu = fluid.CPUPlace()
2526
exe = fluid.Executor(cpu)
2627
exe.run(fluid.default_main_program())

0 commit comments

Comments
 (0)