Skip to content

Commit 76086df

Browse files
authored
Merge pull request #12097 from reyoung/feature/hide_api_cont
Hide internal API of LoDTensors, Clipping, etc.
2 parents bc8c7cc + 4ff1bde commit 76086df

File tree

12 files changed

+86
-91
lines changed

12 files changed

+86
-91
lines changed

paddle/fluid/pybind/pybind.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -87,37 +87,37 @@ PYBIND11_PLUGIN(core) {
8787
py::class_<Tensor>(m, "Tensor", py::buffer_protocol())
8888
.def_buffer(
8989
[](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); })
90-
.def("get_dims",
90+
.def("_get_dims",
9191
[](const Tensor &self) { return vectorize(self.dims()); })
92-
.def("set_dims",
92+
.def("_set_dims",
9393
[](Tensor &self, const std::vector<int64_t> &dim) {
9494
self.Resize(make_ddim(dim));
9595
})
96-
.def("set_layout",
96+
.def("_set_layout",
9797
[](Tensor &self, const std::string &layout) {
9898
self.set_layout(StringToDataLayout(layout));
9999
})
100-
.def("alloc_float",
100+
.def("_alloc_float",
101101
[](Tensor &self, paddle::platform::CUDAPlace &place) {
102102
self.mutable_data<float>(place);
103103
})
104-
.def("alloc_float",
104+
.def("_alloc_float",
105105
[](Tensor &self, paddle::platform::CPUPlace &place) {
106106
self.mutable_data<float>(place);
107107
})
108-
.def("alloc_int",
108+
.def("_alloc_int",
109109
[](Tensor &self, paddle::platform::CPUPlace &place) {
110110
self.mutable_data<int>(place);
111111
})
112-
.def("alloc_int",
112+
.def("_alloc_int",
113113
[](Tensor &self, paddle::platform::CUDAPlace &place) {
114114
self.mutable_data<int>(place);
115115
})
116-
.def("alloc_int",
116+
.def("_alloc_int",
117117
[](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
118118
self.mutable_data<int>(place);
119119
})
120-
.def("alloc_float",
120+
.def("_alloc_float",
121121
[](Tensor &self, paddle::platform::CUDAPinnedPlace &place) {
122122
self.mutable_data<float>(place);
123123
})
@@ -145,11 +145,11 @@ PYBIND11_PLUGIN(core) {
145145
.def("set", PyCUDAPinnedTensorSetFromArray<uint8_t>)
146146
#endif
147147
.def("shape", [](Tensor &self) { return vectorize(self.dims()); })
148-
.def("set_float_element", TensorSetElement<float>)
149-
.def("get_float_element", TensorGetElement<float>)
150-
.def("set_double_element", TensorSetElement<double>)
151-
.def("get_double_element", TensorGetElement<double>)
152-
.def("dtype", [](Tensor &self) { return ToDataType(self.type()); });
148+
.def("_set_float_element", TensorSetElement<float>)
149+
.def("_get_float_element", TensorGetElement<float>)
150+
.def("_set_double_element", TensorSetElement<double>)
151+
.def("_get_double_element", TensorGetElement<double>)
152+
.def("_dtype", [](Tensor &self) { return ToDataType(self.type()); });
153153

154154
py::class_<LoDTensor, Tensor>(m, "LoDTensor")
155155
.def_buffer(

python/paddle/fluid/clip.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class BaseErrorClipAttr(object):
3131
def __str__(self):
3232
raise NotImplementedError()
3333

34-
def append_clip_op(self, block, grad_name):
34+
def _append_clip_op(self, block, grad_name):
3535
raise NotImplementedError()
3636

3737

@@ -67,7 +67,7 @@ def __init__(self, max, min=None):
6767
def __str__(self):
6868
return "ByValue, min=%f, max=%f" % (self.min, self.max)
6969

70-
def append_clip_op(self, block, grad_name):
70+
def _append_clip_op(self, block, grad_name):
7171
clip_op_desc = block.desc.append_op()
7272
clip_op_desc.set_type("clip")
7373
clip_op_desc.set_input("X", [grad_name])
@@ -90,28 +90,28 @@ def error_clip_callback(block, context):
9090
"Variable's error_clip should be an instance of BaseErrorClipAttr or None."
9191
)
9292
if error_clip is not None:
93-
error_clip.append_clip_op(block, grad_n)
93+
error_clip._append_clip_op(block, grad_n)
9494

9595

9696
class BaseGradientClipAttr(object):
9797
def __str__(self):
9898
raise NotImplementedError()
9999

100-
def process_context(self, context, param, grad):
100+
def _process_context(self, context, param, grad):
101101
raise NotImplementedError()
102102

103-
def create_operators(self, param, grad):
103+
def _create_operators(self, param, grad):
104104
raise NotImplementedError()
105105

106106

107107
class NullGradientClipAttr(BaseGradientClipAttr):
108108
def __str__(self):
109109
return "Null"
110110

111-
def process_context(self, context, param, grad):
111+
def _process_context(self, context, param, grad):
112112
pass
113113

114-
def create_operators(self, param, grad):
114+
def _create_operators(self, param, grad):
115115
return param, grad
116116

117117

@@ -153,10 +153,10 @@ def __init__(self, max, min=None):
153153
def __str__(self):
154154
return "ByValue, min=%f, max=%f" % (self.min, self.max)
155155

156-
def process_context(self, context, param, grad):
156+
def _process_context(self, context, param, grad):
157157
pass
158158

159-
def create_operators(self, param, grad):
159+
def _create_operators(self, param, grad):
160160
new_grad = layers.clip(x=grad, min=self.min, max=self.max)
161161
return param, new_grad
162162

@@ -199,10 +199,10 @@ def __init__(self, clip_norm):
199199
def __str__(self):
200200
return "ByNorm, clip_norm=%f" % self.clip_norm
201201

202-
def process_context(self, context, param, grad):
202+
def _process_context(self, context, param, grad):
203203
pass
204204

205-
def create_operators(self, param, grad):
205+
def _create_operators(self, param, grad):
206206
new_grad = layers.clip_by_norm(x=grad, max_norm=self.clip_norm)
207207
return param, new_grad
208208

@@ -257,7 +257,7 @@ def __str__(self):
257257
return "ByGlobalNorm, group_name=%s, clip_norm=%f" % (self.group_name,
258258
self.clip_norm)
259259

260-
def process_context(self, context, param, grad):
260+
def _process_context(self, context, param, grad):
261261
if self.group_name not in context:
262262
context[self.group_name] = []
263263
context[self.group_name + "_clip_value"] = self.clip_norm
@@ -274,7 +274,7 @@ def process_context(self, context, param, grad):
274274

275275
self.context = context
276276

277-
def create_operators(self, param, grad):
277+
def _create_operators(self, param, grad):
278278
group_scale_name = self.group_name + "_scale"
279279
if group_scale_name not in self.context:
280280
group_norm_var = layers.sums(input=self.context[self.group_name])
@@ -336,12 +336,12 @@ def append_gradient_clip_ops(param_grad):
336336
"clip attribute should be an instance of BaseGradientClipAttr"
337337
)
338338

339-
clip_attr.process_context(context=context, param=p, grad=g)
339+
clip_attr._process_context(context=context, param=p, grad=g)
340340

341341
res = []
342342
for p, g in param_grad:
343343
with p.block.program.optimized_guard(p):
344-
res.append(clip_attr.create_operators(param=p, grad=g))
344+
res.append(clip_attr._create_operators(param=p, grad=g))
345345

346346
return res
347347

python/paddle/fluid/layer_helper.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -68,11 +68,11 @@ def input(self, input_param_name='input'):
6868

6969
@property
7070
def param_attr(self):
71-
return ParamAttr.to_attr(self.kwargs.get('param_attr', None))
71+
return ParamAttr._to_attr(self.kwargs.get('param_attr', None))
7272

7373
@property
7474
def bias_attr(self):
75-
return ParamAttr.to_attr(self.kwargs.get('bias_attr', None))
75+
return ParamAttr._to_attr(self.kwargs.get('bias_attr', None))
7676

7777
def multiple_param_attr(self, length):
7878
param_attr = self.param_attr
@@ -262,11 +262,11 @@ def __weight_normalize(g, v, dim):
262262
g_param = self.startup_program.global_block().create_parameter(
263263
dtype=dtype,
264264
shape=g_param_shape,
265-
**g_param_attr.to_kwargs(with_initializer=False))
265+
**g_param_attr._to_kwargs(with_initializer=False))
266266
v_param = self.startup_program.global_block().create_parameter(
267267
dtype=dtype,
268268
shape=v_param_shape,
269-
**v_param_attr.to_kwargs(with_initializer=True))
269+
**v_param_attr._to_kwargs(with_initializer=True))
270270
__norm_except_dim(
271271
x=v_param,
272272
out=g_param,
@@ -275,9 +275,9 @@ def __weight_normalize(g, v, dim):
275275

276276
# Add weight normalization to main_program
277277
g_param = self.main_program.global_block().create_parameter(
278-
dtype=dtype, shape=g_param_shape, **g_param_attr.to_kwargs())
278+
dtype=dtype, shape=g_param_shape, **g_param_attr._to_kwargs())
279279
v_param = self.main_program.global_block().create_parameter(
280-
dtype=dtype, shape=v_param_shape, **v_param_attr.to_kwargs())
280+
dtype=dtype, shape=v_param_shape, **v_param_attr._to_kwargs())
281281
w_param = __weight_normalize(g_param, v_param, dim=attr.dim)
282282
return w_param
283283

@@ -296,11 +296,11 @@ def create_parameter(self,
296296

297297
if default_initializer is None and attr.initializer is None:
298298
if is_bias:
299-
attr.set_default_bias_initializer()
299+
attr._set_default_bias_initializer()
300300
else:
301-
attr.set_default_param_initializer()
301+
attr._set_default_param_initializer()
302302
else:
303-
attr.set_default_initializer(default_initializer)
303+
attr._set_default_initializer(default_initializer)
304304

305305
# If weight normalization is set, insert extra parameters and ops.
306306
# Refer to https://arxiv.org/pdf/1602.07868.pdf
@@ -310,9 +310,9 @@ def create_parameter(self,
310310
return param
311311

312312
self.startup_program.global_block().create_parameter(
313-
dtype=dtype, shape=shape, **attr.to_kwargs(with_initializer=True))
313+
dtype=dtype, shape=shape, **attr._to_kwargs(with_initializer=True))
314314
return self.main_program.global_block().create_parameter(
315-
dtype=dtype, shape=shape, **attr.to_kwargs())
315+
dtype=dtype, shape=shape, **attr._to_kwargs())
316316

317317
def get_parameter(self, name):
318318
param = self.main_program.global_block().var(name)

python/paddle/fluid/param_attr.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def __init__(self,
6767
self.gradient_clip = gradient_clip
6868
self.model_average = do_model_average
6969

70-
def set_default_initializer(self, initializer):
70+
def _set_default_initializer(self, initializer):
7171
"""
7272
Set the default initializer, the initializer should be Constant,
7373
Uniform, Normal, Xavier, MSRA.
@@ -88,7 +88,7 @@ def set_default_initializer(self, initializer):
8888

8989
self.initializer = initializer
9090

91-
def set_default_param_initializer(self):
91+
def _set_default_param_initializer(self):
9292
"""
9393
Set the default initializer for the parameter with Xavier.
9494
@@ -98,9 +98,9 @@ def set_default_param_initializer(self):
9898
Returns:
9999
None.
100100
"""
101-
self.set_default_initializer(Xavier())
101+
self._set_default_initializer(Xavier())
102102

103-
def set_default_bias_initializer(self):
103+
def _set_default_bias_initializer(self):
104104
"""
105105
Set the default initializer for the bias with Constant(0.0).
106106
@@ -110,10 +110,10 @@ def set_default_bias_initializer(self):
110110
Returns:
111111
None.
112112
"""
113-
self.set_default_initializer(Constant(0.0))
113+
self._set_default_initializer(Constant(0.0))
114114

115115
@staticmethod
116-
def to_attr(arg):
116+
def _to_attr(arg):
117117
"""
118118
Create ParamAttr[s].
119119
@@ -131,7 +131,7 @@ def to_attr(arg):
131131
if arg is None:
132132
return ParamAttr()
133133
elif isinstance(arg, list) or isinstance(arg, tuple):
134-
return [ParamAttr.to_attr(a) for a in arg]
134+
return [ParamAttr._to_attr(a) for a in arg]
135135
elif isinstance(arg, ParamAttr):
136136
return arg
137137
elif isinstance(arg, str) or isinstance(arg, unicode):
@@ -141,11 +141,11 @@ def to_attr(arg):
141141
elif isinstance(arg, WeightDecayRegularizer):
142142
return ParamAttr(regularizer=arg)
143143
elif isinstance(arg, bool):
144-
return ParamAttr.to_attr(None) if arg else False
144+
return ParamAttr._to_attr(None) if arg else False
145145
else:
146146
raise TypeError("{0} cast to ParamAttr".format(type(arg)))
147147

148-
def to_kwargs(self, with_initializer=False):
148+
def _to_kwargs(self, with_initializer=False):
149149
"""
150150
Returns the attributes of this parameter.
151151

python/paddle/fluid/regularizer.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,7 @@
1515
import framework
1616
from . import core
1717

18-
__all__ = [
19-
'append_regularization_ops', 'L1Decay', 'L2Decay', 'L1DecayRegularizer',
20-
'L2DecayRegularizer'
21-
]
18+
__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer']
2219

2320

2421
def append_regularization_ops(parameters_and_grads, regularization=None):

python/paddle/fluid/tests/unittests/op_test.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -60,8 +60,8 @@ def get_output():
6060
return np.array(sum).mean()
6161

6262
tensor_to_check = scope.find_var(input_to_check).get_tensor()
63-
tensor_size = product(tensor_to_check.get_dims())
64-
tensor_to_check_dtype = tensor_to_check.dtype()
63+
tensor_size = product(tensor_to_check.shape())
64+
tensor_to_check_dtype = tensor_to_check._dtype()
6565
if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
6666
tensor_to_check_dtype = np.float32
6767
elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
@@ -74,15 +74,15 @@ def get_output():
7474

7575
def __get_elem__(tensor, i):
7676
if tensor_to_check_dtype == np.float32:
77-
return tensor.get_float_element(i)
77+
return tensor._get_float_element(i)
7878
else:
79-
return tensor.get_double_element(i)
79+
return tensor._get_double_element(i)
8080

8181
def __set_elem__(tensor, i, e):
8282
if tensor_to_check_dtype == np.float32:
83-
tensor.set_float_element(i, e)
83+
tensor._set_float_element(i, e)
8484
else:
85-
tensor.set_double_element(i, e)
85+
tensor._set_double_element(i, e)
8686

8787
# we only compute gradient of one element each time.
8888
# we use a for loop to compute the gradient of every element.
@@ -107,7 +107,7 @@ def __set_elem__(tensor, i, e):
107107
__set_elem__(tensor_to_check, i, origin)
108108
gradient_flat[i] = (y_pos - y_neg) / delta / 2
109109

110-
return gradient_flat.reshape(tensor_to_check.get_dims())
110+
return gradient_flat.reshape(tensor_to_check.shape())
111111

112112

113113
class OpTest(unittest.TestCase):
@@ -125,7 +125,7 @@ def setUpClass(cls):
125125

126126
@classmethod
127127
def tearDownClass(cls):
128-
'''Restore random seeds'''
128+
"""Restore random seeds"""
129129
np.random.set_state(cls._np_rand_state)
130130
random.setstate(cls._py_rand_state)
131131

python/paddle/fluid/tests/unittests/test_batch_norm_op.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,6 @@ def create_or_get_tensor(scope, var_name, var, place):
129129
if var is not None:
130130
assert isinstance(var, np.ndarray)
131131
tensor.set_recursive_sequence_lengths([])
132-
tensor.set_dims(var.shape)
133132
tensor.set(var, place)
134133
return tensor
135134

0 commit comments

Comments
 (0)