Skip to content

Commit 381baca

Browse files
committed
Fix piecewise_decay and fix a unittest error
1 parent 982d423 commit 381baca

File tree

2 files changed

+29
-28
lines changed

2 files changed

+29
-28
lines changed

python/paddle/fluid/layers/learning_rate_scheduler.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -277,28 +277,28 @@ def piecewise_decay(boundaries, values):
277277

278278
global_step = _decay_step_counter()
279279

280-
with init_on_cpu():
281-
lr = tensor.create_global_var(
282-
shape=[1],
283-
value=0.0,
284-
dtype='float32',
285-
persistable=True,
286-
name="learning_rate")
287-
288-
with control_flow.Switch() as switch:
289-
for i in range(len(boundaries)):
290-
boundary_val = tensor.fill_constant(
291-
shape=[1], dtype='float32', value=float(boundaries[i]))
292-
value_var = tensor.fill_constant(
293-
shape=[1], dtype='float32', value=float(values[i]))
294-
with switch.case(global_step < boundary_val):
295-
tensor.assign(value_var, lr)
296-
last_value_var = tensor.fill_constant(
280+
lr = tensor.create_global_var(
281+
shape=[1],
282+
value=0.0,
283+
dtype='float32',
284+
persistable=True,
285+
name="learning_rate")
286+
287+
with control_flow.Switch() as switch:
288+
for i in range(len(boundaries)):
289+
boundary_val = tensor.fill_constant(
297290
shape=[1],
298291
dtype='float32',
299-
value=float(values[len(values) - 1]))
300-
with switch.default():
301-
tensor.assign(last_value_var, lr)
292+
value=float(boundaries[i]),
293+
force_cpu=True)
294+
value_var = tensor.fill_constant(
295+
shape=[1], dtype='float32', value=float(values[i]))
296+
with switch.case(global_step < boundary_val):
297+
tensor.assign(value_var, lr)
298+
last_value_var = tensor.fill_constant(
299+
shape=[1], dtype='float32', value=float(values[len(values) - 1]))
300+
with switch.default():
301+
tensor.assign(last_value_var, lr)
302302

303303
return lr
304304

@@ -333,9 +333,9 @@ def _balanced_weight(param_norm, grad_norm):
333333
grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad)))
334334
if type(param_lr) == float and param_lr == 1.0:
335335
decayed_lr = learning_rate * param_norm \
336-
/ _balanced_weight(param_norm, grad_norm)
336+
/ _balanced_weight(param_norm, grad_norm)
337337
else:
338338
decayed_lr = learning_rate * param_lr * param_norm \
339-
/ _balanced_weight(param_norm, grad_norm)
339+
/ _balanced_weight(param_norm, grad_norm)
340340
# set back param local learning rate
341341
param.optimize_attr['learning_rate'] = decayed_lr

python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -91,20 +91,21 @@ def check_decay(self, python_decay_fn, fluid_decay_fn, kwargs):
9191

9292
def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn,
9393
kwargs):
94+
main_prog = fluid.Program()
95+
startup_prog = fluid.Program()
9496

95-
decayed_lr = fluid_decay_fn(**kwargs)
97+
with fluid.program_guard(main_prog, startup_prog):
98+
decayed_lr = fluid_decay_fn(**kwargs)
9699

97100
place = fluid.CPUPlace()
98101
exe = fluid.Executor(place)
99102

100-
exe.run(fluid.default_startup_program())
103+
exe.run(startup_prog)
101104

102-
fluid.memory_optimize(fluid.default_main_program())
105+
# fluid.memory_optimize(main_prog)
103106

104107
for step in range(10):
105-
lr_val, = exe.run(fluid.default_main_program(),
106-
feed={},
107-
fetch_list=[decayed_lr])
108+
lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr])
108109
python_decayed_lr = python_decay_fn(
109110
global_step=float(step), **kwargs)
110111
self.assertAlmostEqual(

0 commit comments

Comments
 (0)