Skip to content

Commit 0ac8c74

Browse files
authored
Unify fluid submodules to fluid module (#5924)
Change books just use `import fluid`, not submodules
1 parent e6546ba commit 0ac8c74

19 files changed

+381
-425
lines changed

python/paddle/v2/fluid/__init__.py

Lines changed: 41 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,41 @@
1-
import sys
2-
import core
3-
__all__ = ['proto']
4-
argv = []
5-
if core.is_compile_gpu():
6-
argv = list(sys.argv) + [
7-
"--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory"
8-
]
9-
else:
10-
argv = list(sys.argv) + ["--tryfromenv=use_pinned_memory"]
11-
core.init_gflags(argv)
1+
# import all class inside framework into fluid module
2+
import framework
3+
from framework import *
4+
# import all class inside executor into fluid module
5+
import executor
6+
from executor import *
7+
8+
import io
9+
import evaluator
10+
import initializer
11+
import layers
12+
import nets
13+
import optimizer
14+
import backward
15+
import regularizer
16+
17+
from core import LoDTensor, CPUPlace, GPUPlace
18+
19+
Tensor = LoDTensor
20+
__all__ = framework.__all__ + executor.__all__ + [
21+
'io', 'initializer', 'layers', 'nets', 'optimizer', 'backward',
22+
'regularizer', 'LoDTensor', 'CPUPlace', 'GPUPlace', 'Tensor'
23+
]
24+
25+
26+
def __read_gflags_from_env__():
27+
"""
28+
Enable reading gflags from environment variables.
29+
30+
Returns:
31+
None
32+
"""
33+
import sys
34+
import core
35+
read_env_flags = ['use_pinned_memory']
36+
if core.is_compile_gpu():
37+
read_env_flags.append('fraction_of_gpu_memory_to_use')
38+
core.init_gflags(sys.argv + ["--tryfromenv=" + ",".join(read_env_flags)])
39+
40+
41+
__read_gflags_from_env__()

python/paddle/v2/fluid/evaluator.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
import numpy as np
22

3-
import paddle.v2.fluid.layers as layers
4-
from paddle.v2.fluid.framework import Program, unique_name, \
5-
Variable
6-
from paddle.v2.fluid.layer_helper import LayerHelper
3+
import layers
4+
from framework import Program, unique_name, Variable
5+
from layer_helper import LayerHelper
76

87
__all__ = ['Accuracy']
98

python/paddle/v2/fluid/executor.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import numpy as np
2-
import paddle.v2.fluid.core as core
3-
from paddle.v2.fluid.framework import Block, Program, g_main_program
2+
from . import core
3+
from framework import Program, g_main_program
4+
5+
__all__ = ['Executor', 'g_scope']
46

57
g_scope = core.Scope()
68

python/paddle/v2/fluid/framework.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
import paddle.v2.fluid.core as core
2-
import paddle.v2.fluid.proto.framework_pb2 as framework_pb2
31
import collections
2+
43
import numpy as np
5-
import copy
4+
from . import core
5+
import proto.framework_pb2 as framework_pb2
66

77
__all__ = [
88
'Block', 'Variable', 'Program', 'Operator', 'default_startup_program',
9-
'default_main_program'
9+
'default_main_program', 'g_startup_program', 'g_main_program'
1010
]
1111

1212

python/paddle/v2/fluid/initializer.py

Lines changed: 18 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,7 @@
1-
import paddle.v2.fluid.framework as framework
1+
import framework
22
import numpy as np
33

4-
__all__ = [
5-
'ConstantInitializer', 'UniformInitializer', 'NormalInitializer',
6-
'XavierInitializer'
7-
]
4+
__all__ = ['Constant', 'Uniform', 'Normal', 'Xavier']
85

96

107
class Initializer(object):
@@ -368,3 +365,19 @@ def __call__(self, var, block):
368365
})
369366
var.op = op
370367
return op
368+
369+
370+
# We short the class name, since users will use the initializer with the package
371+
# name. The sample code:
372+
#
373+
# import paddle.fluid as fluid
374+
#
375+
# hidden = fluid.layers.fc(...,
376+
# param_attr=ParamAttr(fluid.initializer.Xavier()))
377+
#
378+
# It is no need to add an `Initializer` as the class suffix
379+
Constant = ConstantInitializer
380+
Uniform = UniformInitializer
381+
Normal = NormalInitializer
382+
Xavier = XavierInitializer
383+
MSRA = MSRAInitializer

python/paddle/v2/fluid/layer_helper.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
11
import copy
22
import itertools
33

4-
from paddle.v2.fluid.framework import Variable, g_main_program, \
5-
g_startup_program, unique_name, Program, dtype_is_floating
6-
from paddle.v2.fluid.initializer import ConstantInitializer, \
7-
UniformInitializer, XavierInitializer
4+
from framework import Variable, g_main_program, \
5+
g_startup_program, unique_name, dtype_is_floating
6+
from paddle.v2.fluid.initializer import Constant, Xavier
87

98

109
class LayerHelper(object):
@@ -209,7 +208,7 @@ def append_activation(self, input_var):
209208

210209
def _get_default_initializer(self, dtype):
211210
if dtype is None or dtype_is_floating(dtype) is True:
212-
return XavierInitializer()
211+
return Xavier()
213212
else:
214213
# For integer and boolean types, initialize with all zeros
215-
return ConstantInitializer()
214+
return Constant()

python/paddle/v2/fluid/layers.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
1-
import paddle.v2.fluid.core as core
2-
import paddle.v2.fluid.proto.framework_pb2 as framework_pb2
3-
from paddle.v2.fluid.framework import OpProtoHolder, Variable, Program, \
4-
Operator
5-
from paddle.v2.fluid.initializer import ConstantInitializer, \
6-
NormalInitializer, XavierInitializer
1+
from . import core
2+
import proto.framework_pb2 as framework_pb2
3+
from framework import OpProtoHolder, Variable, Program, Operator
4+
from initializer import Constant, Normal, Xavier
75
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name
86
import re
97
import cStringIO
@@ -58,10 +56,10 @@ def fc(input,
5856
"""
5957

6058
def _get_default_param_initializer():
61-
return XavierInitializer()
59+
return Xavier()
6260

6361
def _get_default_bias_initializer():
64-
return ConstantInitializer()
62+
return Constant()
6563

6664
helper = LayerHelper('fc', **locals())
6765

@@ -139,7 +137,7 @@ def embedding(input,
139137
"""
140138

141139
def _get_default_param_initializer():
142-
return XavierInitializer()
140+
return Xavier()
143141

144142
helper = LayerHelper('embedding', **locals())
145143
w = helper.create_parameter(
@@ -477,7 +475,7 @@ def linear_chain_crf(input,
477475
main_program=None,
478476
startup_program=None):
479477
def _get_default_param_initializer():
480-
return XavierInitializer()
478+
return Xavier()
481479

482480
helper = LayerHelper('linear_chain_crf', **locals())
483481
size = input.shape[1]
@@ -661,10 +659,10 @@ def sequence_conv(input,
661659
"""
662660

663661
def _get_default_bias_initializer():
664-
return ConstantInitializer()
662+
return Constant()
665663

666664
def _get_default_param_initializer():
667-
return XavierInitializer()
665+
return Xavier()
668666

669667
# FIXME(dzh) : want to unify the argument of python layer
670668
# function. So we ignore some unecessary attributes.
@@ -725,11 +723,11 @@ def conv2d(input,
725723
"""
726724

727725
def _get_default_bias_initializer():
728-
return ConstantInitializer()
726+
return Constant()
729727

730728
def _get_default_param_initializer(filter_size, num_channels):
731729
std = (2.0 / (filter_size[0]**2 * num_channels))**0.5
732-
return NormalInitializer(0.0, std, 0)
730+
return Normal(0.0, std, 0)
733731

734732
helper = LayerHelper('conv2d', **locals())
735733
dtype = helper.input_dtype()
@@ -878,22 +876,20 @@ def batch_norm(input,
878876
attr=helper.param_attr,
879877
shape=param_shape,
880878
dtype=dtype,
881-
initializer=ConstantInitializer(1.0))
879+
initializer=Constant(1.0))
882880
bias = helper.create_parameter(
883881
attr=helper.param_attr,
884882
shape=param_shape,
885883
dtype=dtype,
886-
initializer=ConstantInitializer(0.0))
884+
initializer=Constant(0.0))
887885

888886
mean = helper.create_global_variable(
889887
dtype=input.dtype, shape=param_shape, persistable=True)
890-
helper.set_variable_initializer(
891-
var=mean, initializer=ConstantInitializer(0.0))
888+
helper.set_variable_initializer(var=mean, initializer=Constant(0.0))
892889

893890
variance = helper.create_global_variable(
894891
dtype=input.dtype, shape=param_shape, persistable=True)
895-
helper.set_variable_initializer(
896-
var=variance, initializer=ConstantInitializer(1.0))
892+
helper.set_variable_initializer(var=variance, initializer=Constant(1.0))
897893

898894
# create output
899895
# mean and mean_out share the same memory

python/paddle/v2/fluid/nets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import paddle.v2.fluid.layers as layers
1+
import layers
22

33
__all__ = ["simple_img_conv_pool", "sequence_conv_pool"]
44

python/paddle/v2/fluid/optimizer.py

Lines changed: 28 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,13 @@
11
from collections import defaultdict
22

3-
import paddle.v2.fluid.framework as framework
4-
from paddle.v2.fluid.framework import unique_name, Program
5-
from paddle.v2.fluid.backward import append_backward_ops
6-
from paddle.v2.fluid.initializer import ConstantInitializer
7-
from paddle.v2.fluid.regularizer import append_regularization_ops
8-
from paddle.v2.fluid.layer_helper import LayerHelper
3+
import framework
4+
from backward import append_backward_ops
5+
from framework import unique_name
6+
from initializer import Constant
7+
from layer_helper import LayerHelper
8+
from regularizer import append_regularization_ops
99

10-
__all__ = [
11-
'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer',
12-
'AdamaxOptimizer', 'DecayedAdagradOptimizer'
13-
]
10+
__all__ = ['SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad']
1411

1512

1613
class Optimizer(object):
@@ -48,7 +45,7 @@ def _create_param_lr(self, param_and_grad):
4845
persistable=True)
4946
param_lr = param_lr * self._learning_rate
5047
self.helper.set_variable_initializer(
51-
var=param_lr_var, initializer=ConstantInitializer(param_lr))
48+
var=param_lr_var, initializer=Constant(param_lr))
5249
return param_lr_var
5350

5451
def _create_accumulators(self, block, parameters):
@@ -96,7 +93,7 @@ def _add_accumulator(self, name, param, dtype=None, fill_value=0.0):
9693
type=param.type,
9794
shape=param.shape)
9895
self.helper.set_variable_initializer(
99-
var, initializer=ConstantInitializer(value=float(fill_value)))
96+
var, initializer=Constant(value=float(fill_value)))
10097
self._accumulators[name][param.name] = var
10198

10299
def _get_accumulator(self, name, param):
@@ -360,7 +357,7 @@ def _create_accumulators(self, block, parameters):
360357
lod_level=0,
361358
persistable=True)
362359
self.helper.set_variable_initializer(
363-
self._beta1_pow_acc, initializer=ConstantInitializer(self._beta1))
360+
self._beta1_pow_acc, initializer=Constant(self._beta1))
364361

365362
self._beta2_pow_acc = self.helper.create_global_variable(
366363
name=unique_name('beta2_pow_acc'),
@@ -370,7 +367,7 @@ def _create_accumulators(self, block, parameters):
370367
persistable=True)
371368

372369
self.helper.set_variable_initializer(
373-
self._beta2_pow_acc, initializer=ConstantInitializer(self._beta2))
370+
self._beta2_pow_acc, initializer=Constant(self._beta2))
374371

375372
# Create accumulator tensors for first and second moments
376373
for p in parameters:
@@ -462,7 +459,7 @@ def _create_accumulators(self, block, parameters):
462459
lod_level=0,
463460
persistable=True)
464461
self.helper.set_variable_initializer(
465-
self._beta1_pow_acc, initializer=ConstantInitializer(self._beta1))
462+
self._beta1_pow_acc, initializer=Constant(self._beta1))
466463

467464
# Create accumulator tensors for first moment and infinity norm
468465
for p in parameters:
@@ -559,3 +556,19 @@ def _append_optimize_op(self, block, param_and_grad):
559556
attrs={"epsilon": self._epsilon})
560557

561558
return decayed_adagrad_op
559+
560+
561+
# We short the class name, since users will use the optimizer with the package
562+
# name. The sample code:
563+
#
564+
# import paddle.fluid as fluid
565+
#
566+
# sgd = fluid.optimizer.SGD(...)
567+
#
568+
# It is no need to add an `Optimizer` as the class suffix
569+
SGD = SGDOptimizer
570+
Momentum = MomentumOptimizer
571+
Adagrad = AdagradOptimizer
572+
Adam = AdamOptimizer
573+
Adamax = AdamaxOptimizer
574+
DecayedAdagrad = DecayedAdagradOptimizer

python/paddle/v2/fluid/regularizer.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
1-
import paddle.v2.fluid.framework as framework
1+
import framework
22

3-
__all__ = [
4-
'append_regularization_ops', 'L2DecayRegularizer', 'L1DecayRegularizer'
5-
]
3+
__all__ = ['append_regularization_ops', 'L1Decay', 'L2Decay']
64

75

86
def append_regularization_ops(parameters_and_grads):
@@ -139,3 +137,16 @@ def __call__(self, param, block):
139137
attrs={"scale": self._regularization_coeff})
140138

141139
return decay
140+
141+
142+
# We short the class name, since users will use the regulaizer with the package
143+
# name. The sample code:
144+
#
145+
# import paddle.fluid as fluid
146+
#
147+
# hidden = fluid.layers.fc(...,
148+
# param_attr=ParamAttr(fluid.regularizer.Xavier()))
149+
#
150+
# It is no need to add a `Regularizer` as the class suffix
151+
L1Decay = L1DecayRegularizer
152+
L2Decay = L2DecayRegularizer

0 commit comments

Comments
 (0)