Skip to content

Commit ce32599

Browse files
authored
Merge pull request #1479 from reyoung/feature/simplize_v2_layer
Simplify layer.v2
2 parents 59f7778 + 3758993 commit ce32599

File tree

4 files changed

+61
-98
lines changed

4 files changed

+61
-98
lines changed

python/paddle/trainer_config_helpers/default_decorators.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,10 @@ def __wrapper__(*args, **kwargs):
5252
kwargs[name] = default_factory(func)
5353
return func(*args, **kwargs)
5454

55+
if hasattr(func, 'argspec'):
56+
__wrapper__.argspec = func.argspec
57+
else:
58+
__wrapper__.argspec = inspect.getargspec(func)
5559
return __wrapper__
5660

5761
return __impl__

python/paddle/trainer_config_helpers/layers.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import functools
1616
import collections
17+
import inspect
1718

1819
from paddle.trainer.config_parser import *
1920
from .activations import LinearActivation, SigmoidActivation, TanhActivation, \
@@ -316,6 +317,11 @@ def wrapper(*args, **kwargs):
316317
val.check(method.__name__)
317318
return method(*args, **kwargs)
318319

320+
if hasattr(method, 'argspec'):
321+
wrapper.argspec = method.argspec
322+
else:
323+
wrapper.argspec = inspect.getargspec(method)
324+
319325
return wrapper
320326

321327
return decorator

python/paddle/v2/layer.py

Lines changed: 50 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -67,33 +67,22 @@ class for each layer creation function in paddle.trainer_config_helpers.layers.
6767
"""
6868

6969
import collections
70+
import inspect
7071

7172
import paddle.trainer_config_helpers as conf_helps
7273
from paddle.trainer_config_helpers.config_parser_utils import \
7374
parse_network_config as __parse__
7475

7576
from paddle.trainer_config_helpers.default_decorators import wrap_name_default
7677
from paddle.trainer_config_helpers.default_decorators import wrap_act_default
77-
from paddle.trainer_config_helpers.default_decorators import wrap_bias_attr_default
78+
from paddle.trainer_config_helpers.default_decorators import \
79+
wrap_bias_attr_default
7880
from paddle.trainer_config_helpers.layers import layer_support
7981

8082
import data_type
8183
import activation
82-
import attr
83-
84-
__all__ = [
85-
'parse_network', 'data', 'fc', 'conv_shift', 'img_conv', 'img_pool', 'spp',
86-
'maxout', 'img_cmrnorm', 'batch_norm', 'sum_to_one_norm', 'recurrent',
87-
'lstmemory', 'grumemory', 'pool', 'last_seq', 'first_seq', 'concat',
88-
'seq_concat', 'block_expand', 'expand', 'repeat', 'seq_reshape', 'addto',
89-
'linear_comb', 'interpolation', 'bilinear_interp', 'power', 'scaling',
90-
'slope_intercept', 'tensor', 'cos_sim', 'trans', 'max_id', 'sampling_id',
91-
'pad', 'classification_cost', 'cross_entropy_cost',
92-
'cross_entropy_with_selfnorm_cost', 'regression_cost',
93-
'multi_binary_label_cross_entropy_cost', 'rank_cost', 'lambda_cost',
94-
'sum_cost', 'huber_cost', 'crf', 'crf_decoding', 'ctc', 'warp_ctc', 'nce',
95-
'hsigmoid', 'eos'
96-
]
84+
85+
__all__ = ['parse_network', 'data']
9786

9887
__projection_names__ = filter(lambda x: x.endswith('_projection'),
9988
dir(conf_helps))
@@ -289,83 +278,51 @@ def mixed(size=0,
289278
AggregateLevel = conf_helps.layers.AggregateLevel
290279
ExpandLevel = conf_helps.layers.ExpandLevel
291280

292-
layer_list = [
293-
# [V2LayerImpl, V1_method_name, parent_names]
294-
# fully connected layers
295-
['fc', 'fc_layer', ['input']],
296-
# conv layers
297-
['conv_shift', 'conv_shift_layer', ['a', 'b']],
298-
['img_conv', 'img_conv_layer', ['input']],
299-
# image pooling layers
300-
['img_pool', 'img_pool_layer', ['input']],
301-
['spp', 'spp_layer', ['input']],
302-
['maxout', 'maxout_layer', ['input']],
303-
# norm layers
304-
['img_cmrnorm', 'img_cmrnorm_layer', ['input']],
305-
['batch_norm', 'batch_norm_layer', ['input']],
306-
['sum_to_one_norm', 'sum_to_one_norm_layer', ['input']],
307-
# recurrent layers
308-
['recurrent', 'recurrent_layer', ['input']],
309-
['lstmemory', 'lstmemory', ['input']],
310-
['grumemory', 'grumemory', ['input']],
311-
# aggregate layers
312-
['pool', 'pooling_layer', ['input']],
313-
['last_seq', 'last_seq', ['input']],
314-
['first_seq', 'first_seq', ['input']],
315-
['concat', 'concat_layer', ['input']],
316-
['seq_concat', 'seq_concat_layer', ['a', 'b']],
317-
# reshaping layers
318-
['block_expand', 'block_expand_layer', ['input']],
319-
['expand', 'expand_layer', ['input', 'expand_as']],
320-
['repeat', 'repeat_layer', ['input']],
321-
['rotate', 'rotate_layer', ['input']],
322-
['seq_reshape', 'seq_reshape_layer', ['input']],
323-
# math layers
324-
['addto', 'addto_layer', ['input']],
325-
['linear_comb', 'linear_comb_layer', ['weights', 'vectors']],
326-
['interpolation', 'interpolation_layer', ['input', 'weight']],
327-
['bilinear_interp', 'bilinear_interp_layer', ['input']],
328-
['power', 'power_layer', ['input', 'weight']],
329-
['scaling', 'scaling_layer', ['input', 'weight']],
330-
['slope_intercept', 'slope_intercept_layer', ['input']],
331-
['tensor', 'tensor_layer', ['a', 'b']],
332-
['cos_sim', 'cos_sim', ['a', 'b']],
333-
['trans', 'trans_layer', ['input']],
334-
# sampling layers
335-
['max_id', 'maxid_layer', ['input']],
336-
['sampling_id', 'sampling_id_layer', ['input']],
337-
# slicing and joining layers
338-
['pad', 'pad_layer', ['input']],
339-
# cost layers
340-
[
341-
'classification_cost', 'classification_cost',
342-
['input', 'label', 'weight']
343-
],
344-
['regression_cost', 'regression_cost', ['input', 'label', 'weight']],
345-
['cross_entropy_cost', 'cross_entropy', ['input', 'label']],
346-
[
347-
'cross_entropy_with_selfnorm_cost', 'cross_entropy_with_selfnorm',
348-
['input', 'label']
349-
],
350-
[
351-
'multi_binary_label_cross_entropy_cost',
352-
'multi_binary_label_cross_entropy', ['input', 'label']
353-
],
354-
['rank_cost', 'rank_cost', ['left', 'right', 'label', 'weight']],
355-
['lambda_cost', 'lambda_cost', ['input', 'score']],
356-
['sum_cost', 'sum_cost', ['input']],
357-
['huber_cost', 'huber_cost', ['input', 'label']],
358-
['crf', 'crf_layer', ['input', 'label']],
359-
['crf_decoding', 'crf_decoding_layer', ['input']],
360-
['ctc', 'ctc_layer', ['input', 'label']],
361-
['warp_ctc', 'warp_ctc_layer', ['input', 'label']],
362-
['nce', 'nce_layer', ['input', 'label']],
363-
['hsigmoid', 'hsigmoid', ['input', 'label']],
364-
# check layers
365-
['eos', 'eos_layer', ['input']]
366-
]
367-
for l in layer_list:
368-
globals()[l[0]] = __convert_to_v2__(l[1], l[2])
281+
282+
def __layer_name_mapping__(inname):
283+
if inname in ['data_layer', 'memory', 'mixed_layer']:
284+
# Do Not handle these layers
285+
return
286+
elif inname == 'maxid_layer':
287+
return 'max_id'
288+
elif inname.endswith('memory') or inname.endswith(
289+
'_seq') or inname.endswith('_sim') or inname == 'hsigmoid':
290+
return inname
291+
elif inname in [
292+
'cross_entropy', 'multi_binary_label_cross_entropy',
293+
'cross_entropy_with_selfnorm'
294+
]:
295+
return inname + "_cost"
296+
elif inname.endswith('_cost'):
297+
return inname
298+
elif inname.endswith("_layer"):
299+
return inname[:-len("_layer")]
300+
301+
302+
def __layer_name_mapping_parent_names__(inname):
303+
all_args = getattr(conf_helps, inname).argspec.args
304+
return filter(
305+
lambda x: x in ['input1', 'input2','label', 'input', 'a', 'b', 'expand_as',
306+
'weights', 'vectors', 'weight', 'score', 'left', 'right'],
307+
all_args)
308+
309+
310+
def __convert_layer__(_new_name_, _old_name_, _parent_names_):
311+
global __all__
312+
__all__.append(_new_name_)
313+
globals()[new_name] = __convert_to_v2__(_old_name_, _parent_names_)
314+
315+
316+
for each_layer_name in dir(conf_helps):
317+
new_name = __layer_name_mapping__(each_layer_name)
318+
if new_name is not None:
319+
parent_names = __layer_name_mapping_parent_names__(each_layer_name)
320+
assert len(parent_names) != 0, each_layer_name
321+
__convert_layer__(new_name, each_layer_name, parent_names)
322+
323+
del parent_names
324+
del new_name
325+
del each_layer_name
369326

370327
# convert projection
371328
for prj in __projection_names__:

python/paddle/v2/tests/test_layer.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,13 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
import difflib
1514
import unittest
1615

17-
import paddle.trainer_config_helpers as conf_helps
1816
import paddle.v2.activation as activation
1917
import paddle.v2.attr as attr
2018
import paddle.v2.data_type as data_type
2119
import paddle.v2.layer as layer
2220
import paddle.v2.pooling as pooling
23-
from paddle.trainer_config_helpers.config_parser_utils import \
24-
parse_network_config as parse_network
2521

2622
pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
2723
label = layer.data(name='label', type=data_type.integer_value(10))
@@ -70,7 +66,7 @@ def test_norm_layer(self):
7066

7167
class AggregateLayerTest(unittest.TestCase):
7268
def test_aggregate_layer(self):
73-
pool = layer.pool(
69+
pool = layer.pooling(
7470
input=pixel,
7571
pooling_type=pooling.Avg(),
7672
agg_level=layer.AggregateLevel.EACH_SEQUENCE)

0 commit comments

Comments
 (0)