Skip to content

Commit 28ff1cd

Browse files
committed
create learning rate for each program
1 parent 50a6e7c commit 28ff1cd

File tree

1 file changed

+31
-19
lines changed

1 file changed

+31
-19
lines changed

python/paddle/v2/fluid/optimizer.py

Lines changed: 31 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,15 @@ class Optimizer(object):
3636
"""
3737

3838
def __init__(self, learning_rate, global_step=None, regularization=None):
39-
assert learning_rate is not None
39+
if not isinstance(learning_rate, float) and \
40+
not isinstance(learning_rate, framework.Variable):
41+
raise ValueError("learning rate should be float or Variable")
4042
self._global_step = global_step
4143
self.regularization = regularization
42-
self._global_learning_rate = learning_rate
44+
self._learning_rate = learning_rate
45+
# each program should have a independent learning rate
46+
# program -> Variable(learning_rate)
47+
self._learning_rate_map = defaultdict(lambda: None)
4348
# Dictionary of accumulators. Some optimizer subclasses need to
4449
# allocate and manage extra variables associated with the parameters
4550
# to train. These variables are called accumulators.
@@ -48,26 +53,33 @@ def __init__(self, learning_rate, global_step=None, regularization=None):
4853
self.helper = None
4954

5055
def _create_global_learning_rate(self):
51-
if isinstance(self._global_learning_rate, float):
52-
self._global_learning_rate = layers.create_global_var(
53-
name=unique_name.generate("learning_rate"),
54-
shape=[1],
55-
value=float(self._global_learning_rate),
56-
dtype='float32',
57-
persistable=True)
58-
59-
if not isinstance(self._global_learning_rate, framework.Variable):
60-
raise ValueError("learning rate should be a Variable, "
61-
"actual type is %s",
62-
type(self._global_learning_rate))
63-
64-
@property
65-
def global_learning_rate(self):
56+
lr = self.global_learning_rate()
57+
58+
if isinstance(lr, framework.Variable):
59+
return
60+
else:
61+
if not isinstance(self._learning_rate, float):
62+
raise ValueError(
63+
"learning rate variable is create outside optimizer,"
64+
"can not create new learning rate variable for new program")
65+
66+
# create learning rate in the current main program
67+
self._learning_rate_map[framework.default_main_program(
68+
)] = layers.create_global_var(
69+
name=unique_name.generate("learning_rate"),
70+
shape=[1],
71+
value=float(self._learning_rate),
72+
dtype='float32',
73+
persistable=True)
74+
75+
def global_learning_rate(self, program=None):
6676
"""
6777
get global decayed learning rate
6878
:return:
6979
"""
70-
return self._global_learning_rate
80+
if program is None:
81+
program = framework.default_main_program()
82+
return self._learning_rate_map[program]
7183

7284
def _append_optimize_op(self, block, param_and_grad):
7385
""" append optimize operator to block and return all the added optimize_op
@@ -78,7 +90,7 @@ def _create_param_lr(self, param_and_grad):
7890
# create learning rate variable for every parameter
7991
param = param_and_grad[0]
8092
param_lr = param.optimize_attr['learning_rate']
81-
return self._global_learning_rate * param_lr
93+
return self.global_learning_rate() * param_lr
8294

8395
def _create_accumulators(self, block, parameters):
8496
"""Create all accumulators needed by the parameters

0 commit comments

Comments
 (0)