Skip to content

Commit 316afbb

Browse files
authored
add new API:LambdaDecay,test=develop (#25367)
add new API:LambdaDecay,test=develop
1 parent 39d85bf commit 316afbb

File tree

2 files changed

+95
-1
lines changed

2 files changed

+95
-1
lines changed

python/paddle/fluid/dygraph/learning_rate_scheduler.py

Lines changed: 68 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
__all__ = [
2424
'NoamDecay', 'PiecewiseDecay', 'NaturalExpDecay', 'ExponentialDecay',
2525
'InverseTimeDecay', 'PolynomialDecay', 'CosineDecay', 'LinearLrWarmup',
26-
'ReduceLROnPlateau', 'StepDecay', 'MultiStepDecay'
26+
'ReduceLROnPlateau', 'StepDecay', 'MultiStepDecay', 'LambdaDecay'
2727
]
2828

2929

@@ -1086,3 +1086,70 @@ def get_lr(self):
10861086
return self.base_lr * (decay_rate**i)
10871087

10881088
return self.base_lr * (decay_rate**len(self.milestones))
1089+
1090+
1091+
class LambdaDecay(_LearningRateEpochDecay):
1092+
"""
1093+
:api_attr: imperative
1094+
1095+
Sets the learning rate of ``optimizer`` to the initial lr times a multiplicative factor, and this multiplicative
1096+
factor is computed by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` .
1097+
1098+
The algorithm can be described as the code below.
1099+
1100+
.. code-block:: text
1101+
1102+
learning_rate = 0.5 # init learning_rate
1103+
lr_lambda = lambda epoch: 0.95 ** epoch
1104+
1105+
learning_rate = 0.5 # epoch 0
1106+
learning_rate = 0.475 # epoch 1
1107+
learning_rate = 0.45125 # epoch 2
1108+
1109+
Parameters:
1110+
learning_rate (float|int): The initial learning rate. It can be set to python float or int number.
1111+
lr_lambda (function): A function which computes a multiplicative factor given an integer parameter ``epoch`` , and
1112+
then multiply the initial learning rate by this multiplicative factor.
1113+
1114+
Returns:
1115+
None.
1116+
1117+
Examples:
1118+
.. code-block:: python
1119+
1120+
import paddle.fluid as fluid
1121+
import numpy as np
1122+
with fluid.dygraph.guard():
1123+
x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
1124+
linear = fluid.dygraph.Linear(10, 10)
1125+
input = fluid.dygraph.to_variable(x)
1126+
scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x)
1127+
adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
1128+
1129+
for epoch in range(6):
1130+
for batch_id in range(5):
1131+
out = linear(input)
1132+
loss = fluid.layers.reduce_mean(out)
1133+
adam.minimize(loss)
1134+
scheduler.epoch()
1135+
1136+
print("epoch:%d, current lr is %f" .format(epoch, adam.current_step_lr()))
1137+
# epoch:0, current lr is 0.5
1138+
# epoch:1, current lr is 0.475
1139+
# epoch:2, current lr is 0.45125
1140+
1141+
"""
1142+
1143+
def __init__(self, learning_rate, lr_lambda):
1144+
if not callable(lr_lambda):
1145+
raise TypeError(
1146+
"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s."
1147+
% type(lr_lambda))
1148+
1149+
self.lr_lambda = lr_lambda
1150+
super(LambdaDecay, self).__init__(learning_rate)
1151+
1152+
def get_lr(self):
1153+
base_lr = self.create_lr_var(self.base_lr)
1154+
1155+
return self.base_lr * self.lr_lambda(self.epoch_num)

python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,10 @@ def step_decay(global_step, learning_rate, step_size, decay_rate=0.1):
116116
return learning_rate * math.pow(decay_rate, global_step // step_size)
117117

118118

119+
def lambda_decay(global_step, learning_rate, lr_lambda):
120+
return learning_rate * lr_lambda(global_step)
121+
122+
119123
class TestLearningRateDecayDygraph(unittest.TestCase):
120124
def test_NoamDecay(self):
121125
with fluid.dygraph.guard():
@@ -217,6 +221,29 @@ def test_StepDecay(self):
217221
with self.assertRaises(ValueError):
218222
lr = fluid.dygraph.MultiStepDecay(2.0, [20, 30, 50])
219223

224+
def test_LambdaDecay(self):
225+
with fluid.dygraph.guard():
226+
learning_rate = 0.5
227+
lr_lambda = lambda x: 0.95**x
228+
scheduler = fluid.dygraph.LambdaDecay(learning_rate, lr_lambda)
229+
230+
linear = fluid.dygraph.nn.Linear(10, 10)
231+
adam = fluid.optimizer.Adam(
232+
scheduler, parameter_list=linear.parameters())
233+
234+
for epoch in range(30):
235+
right_result = lambda_decay(epoch, learning_rate, lr_lambda)
236+
fluid_result = scheduler().numpy()[0]
237+
scheduler.epoch()
238+
self.assertAlmostEqual(
239+
right_result,
240+
fluid_result,
241+
msg='Failed lr scheduler in epoch {0}, Python result is {1}, Fluid result is {2}'.
242+
format(epoch, right_result, fluid_result))
243+
244+
with self.assertRaises(TypeError):
245+
lr = fluid.dygraph.LambdaDecay(learning_rate, "test")
246+
220247

221248
class TestLearningRateDecay(unittest.TestCase):
222249
def check_decay(self, python_decay_fn, fluid_decay_fn, kwargs):

0 commit comments

Comments
 (0)