Skip to content

Commit 154e1d0

Browse files
authored
Merge pull request #4972 from typhoonzero/fix_v2_optimizer_order
Fix v2 optimizer define order
2 parents 48173e8 + 6942eb2 commit 154e1d0

File tree

5 files changed

+128
-0
lines changed

5 files changed

+128
-0
lines changed

python/paddle/v2/parameters.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,10 @@ def __append_config__(self, param_conf):
101101

102102
self.__param_conf__[param_conf.name] = param_conf
103103

104+
def update_param_conf(self, model_config):
105+
for p in model_config.parameters:
106+
self.__param_conf__[p.name] = p
107+
104108
def keys(self):
105109
"""
106110
keys are the names of each parameter.

python/paddle/v2/tests/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@ py_test(test_topology SRCS test_topology.py)
55
py_test(test_rnn_layer SRCS test_rnn_layer.py)
66
py_test(test_parameters SRCS test_parameters.py)
77
py_test(test_data_feeder SRCS test_data_feeder.py)
8+
py_test(test_paramconf_order SRCS test_paramconf_order.py)
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
# Copyright PaddlePaddle contributors. All Rights Reserved
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
import unittest
15+
import math
16+
import paddle.v2 as paddle
17+
18+
19+
def wordemb(inlayer):
20+
wordemb = paddle.layer.table_projection(
21+
input=inlayer,
22+
size=5,
23+
param_attr=paddle.attr.Param(
24+
name="_proj", initial_std=0.001, learning_rate=1, l2_rate=0))
25+
return wordemb
26+
27+
28+
def train():
29+
word_dict = paddle.dataset.imikolov.build_dict()
30+
dict_size = len(word_dict)
31+
# Every layer takes integer value of range [0, dict_size)
32+
firstword = paddle.layer.data(
33+
name="firstw", type=paddle.data_type.integer_value(dict_size))
34+
secondword = paddle.layer.data(
35+
name="secondw", type=paddle.data_type.integer_value(dict_size))
36+
thirdword = paddle.layer.data(
37+
name="thirdw", type=paddle.data_type.integer_value(dict_size))
38+
fourthword = paddle.layer.data(
39+
name="fourthw", type=paddle.data_type.integer_value(dict_size))
40+
nextword = paddle.layer.data(
41+
name="fifthw", type=paddle.data_type.integer_value(dict_size))
42+
43+
Efirst = wordemb(firstword)
44+
Esecond = wordemb(secondword)
45+
Ethird = wordemb(thirdword)
46+
Efourth = wordemb(fourthword)
47+
48+
contextemb = paddle.layer.concat(input=[Efirst, Esecond, Ethird, Efourth])
49+
hidden1 = paddle.layer.fc(name="fc1",
50+
input=contextemb,
51+
size=128,
52+
act=paddle.activation.Sigmoid(),
53+
layer_attr=paddle.attr.Extra(drop_rate=0.5),
54+
bias_attr=paddle.attr.Param(learning_rate=2),
55+
param_attr=paddle.attr.Param(
56+
initial_std=1. / math.sqrt(5 * 8),
57+
learning_rate=1,
58+
l2_rate=6e-4))
59+
predictword = paddle.layer.fc(input=hidden1,
60+
size=dict_size,
61+
bias_attr=paddle.attr.Param(learning_rate=2),
62+
act=paddle.activation.Softmax())
63+
64+
return paddle.layer.classification_cost(input=predictword, label=nextword)
65+
66+
67+
class TestParamConfOrder(unittest.TestCase):
68+
def test_param_conf_order(self):
69+
paddle.init()
70+
cost = train()
71+
parameters = paddle.parameters.create(cost)
72+
adagrad = paddle.optimizer.AdaGrad(
73+
learning_rate=3e-3,
74+
regularization=paddle.optimizer.L2Regularization(rate=8e-4))
75+
76+
trainer = paddle.trainer.SGD(cost, parameters, adagrad)
77+
for p in trainer.get_topology_proto().parameters:
78+
if p.name == "_fc1.w0":
79+
self.assertEqual(p.decay_rate, 6e-4)
80+
else:
81+
self.assertEqual(p.decay_rate, 8e-4)
82+
83+
84+
if __name__ == '__main__':
85+
unittest.main()

python/paddle/v2/topology.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import layer as v2_layer
2020
import config_base
2121
import cPickle
22+
from paddle.trainer import config_parser as cp
2223

2324
__all__ = ['Topology']
2425

@@ -50,6 +51,35 @@ def __check__(layers):
5051

5152
assert isinstance(self.__model_config__, ModelConfig)
5253

54+
def update_from_default(self):
55+
# HACK(typhoonzero): update ParameterConfig(proto) in case of
56+
# optimizers are defined after layers, or between layers.
57+
# Must be called from trainer.__init__()
58+
for parameter in self.__model_config__.parameters:
59+
if parameter.momentum == 0.0 and cp.g_default_momentum:
60+
parameter.momentum = cp.g_default_momentum
61+
if parameter.decay_rate == 0.0 and cp.g_default_decay_rate:
62+
parameter.decay_rate = cp.g_default_decay_rate
63+
if parameter.initial_mean == 0.0:
64+
parameter.initial_mean = cp.g_default_initial_mean
65+
if parameter.initial_std == 0.01:
66+
parameter.initial_std = cp.g_default_initial_std
67+
if parameter.initial_strategy == 0:
68+
parameter.initial_strategy = cp.g_default_initial_strategy
69+
if parameter.initial_smart == False:
70+
parameter.initial_smart = cp.g_default_initial_smart
71+
if parameter.num_batches_regularization == 1 and \
72+
cp.g_default_num_batches_regularization:
73+
parameter.num_batches_regularization = \
74+
cp.g_default_num_batches_regularization
75+
if parameter.gradient_clipping_threshold == 0.0 and \
76+
cp.g_default_gradient_clipping_threshold:
77+
parameter.gradient_clipping_threshold = \
78+
cp.g_default_gradient_clipping_threshold
79+
if parameter.device == -1 and cp.g_default_device:
80+
parameter.device = cp.g_default_device
81+
# FIXME(typhoonzero): ignored: update_hooks, g_default_compact_func
82+
5383
def use_sparse_updater(self):
5484
"""
5585
check if any parameter require to use sparse_update

python/paddle/v2/trainer.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,11 @@ def __init__(self,
6464
"paddle.v2.optimizer.Optimizer")
6565
import py_paddle.swig_paddle as api
6666
topology = Topology(cost, extra_layers=extra_layers)
67+
# HACK(typhoonzero): update ParameterConfig(proto) in case of optimizers
68+
# are defined after layers, or between layers.
69+
topology.update_from_default()
70+
parameters.update_param_conf(topology.proto())
71+
6772
self.__optimizer__ = update_equation
6873
self.__topology__ = topology
6974
self.__parameters__ = parameters
@@ -91,6 +96,9 @@ def __init__(self,
9196
self.__parameters__.append_gradient_machine(gm)
9297
self.__parameter_updater__ = None
9398

99+
def get_topology_proto(self):
100+
return self.__topology_in_proto__
101+
94102
def __use_remote_sparse_updater__(self):
95103
return self.__use_sparse_updater__ and not self.__is_local__
96104

0 commit comments

Comments
 (0)