Skip to content

Commit 458271c

Browse files
author
chengduozh
committed
follow comment
test=develop
1 parent 8c58237 commit 458271c

File tree

2 files changed

+16
-9
lines changed

2 files changed

+16
-9
lines changed

paddle/fluid/API.spec

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ paddle.fluid.DistributeTranspiler.get_trainer_program ArgSpec(args=['self', 'wai
2323
paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode', 'startup_program', 'current_endpoint'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True, None, '127.0.0.1:6174'))
2424
paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level', 'skip_grads'], varargs=None, keywords=None, defaults=(None, False, 0, False))
2525
paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,))
26-
paddle.fluid.DistributeTranspilerConfig.__init__
26+
paddle.fluid.DistributeTranspilerConfig.__init__
2727
paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None))
2828
paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True))
2929
paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None
@@ -61,11 +61,11 @@ paddle.fluid.layers.cos_sim ArgSpec(args=['X', 'Y'], varargs=None, keywords=None
6161
paddle.fluid.layers.cross_entropy ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100))
6262
paddle.fluid.layers.square_error_cost ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None)
6363
paddle.fluid.layers.chunk_eval ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,))
64-
paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None))
64+
paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None, None))
6565
paddle.fluid.layers.conv2d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None))
6666
paddle.fluid.layers.conv3d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None))
6767
paddle.fluid.layers.sequence_pool ArgSpec(args=['input', 'pool_type'], varargs=None, keywords=None, defaults=None)
68-
paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'use_cudnn'], varargs=None, keywords=None, defaults=(False,))
68+
paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None))
6969
paddle.fluid.layers.softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(True, None))
7070
paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None))
7171
paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None))
@@ -312,7 +312,7 @@ paddle.fluid.transpiler.HashName.reset ArgSpec(args=['self'], varargs=None, keyw
312312
paddle.fluid.transpiler.RoundRobin.__init__ ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None)
313313
paddle.fluid.transpiler.RoundRobin.dispatch ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None)
314314
paddle.fluid.transpiler.RoundRobin.reset ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
315-
paddle.fluid.transpiler.DistributeTranspilerConfig.__init__
315+
paddle.fluid.transpiler.DistributeTranspilerConfig.__init__
316316
paddle.fluid.nets.simple_img_conv_pool ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True))
317317
paddle.fluid.nets.sequence_conv_pool ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max'))
318318
paddle.fluid.nets.glu ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,))
@@ -380,4 +380,4 @@ paddle.fluid.Scope.__init__ __init__(self: paddle.fluid.core.Scope) -> None
380380
paddle.fluid.Scope.drop_kids drop_kids(self: paddle.fluid.core.Scope) -> None
381381
paddle.fluid.Scope.find_var find_var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable
382382
paddle.fluid.Scope.new_scope new_scope(self: paddle.fluid.core.Scope) -> paddle.fluid.core.Scope
383-
paddle.fluid.Scope.var var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable
383+
paddle.fluid.Scope.var var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable

python/paddle/fluid/layers/nn.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1265,7 +1265,8 @@ def sequence_conv(input,
12651265
padding=None,
12661266
bias_attr=None,
12671267
param_attr=None,
1268-
act=None):
1268+
act=None,
1269+
name=None):
12691270
"""
12701271
This function creates the op for sequence_conv, using the inputs and
12711272
other convolutional configurations for the filters and stride as given
@@ -1287,6 +1288,8 @@ def sequence_conv(input,
12871288
will create ParamAttr as param_attr. If the Initializer of the param_attr
12881289
is not set, the parameter is initialized with Xavier. Default: None.
12891290
act (str): the activation type
1291+
name (str|None): A name for this layer(optional). If set None, the layer
1292+
will be named automatically. Default: None.
12901293
12911294
Returns:
12921295
Variable: output of sequence_conv
@@ -1315,7 +1318,7 @@ def sequence_conv(input,
13151318
return helper.append_activation(pre_act)
13161319

13171320

1318-
def sequence_softmax(input, use_cudnn=False):
1321+
def sequence_softmax(input, use_cudnn=False, name=None):
13191322
"""
13201323
This function computes the softmax activation among all time-steps for each
13211324
sequence. The dimension of each time-step should be 1. Thus, the shape of
@@ -1336,7 +1339,9 @@ def sequence_softmax(input, use_cudnn=False):
13361339
Args:
13371340
input (Variable): The input variable which is a LoDTensor.
13381341
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \
1339-
library is installed. Default: False
1342+
library is installed. Default: False.
1343+
name (str|None): A name for this layer(optional). If set None, the layer
1344+
will be named automatically. Default: None.
13401345
13411346
Returns:
13421347
Variable: output of sequence_softmax
@@ -1388,7 +1393,9 @@ def softmax(input, use_cudnn=True, name=None):
13881393
Args:
13891394
input (Variable): The input variable.
13901395
use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \
1391-
library is installed.
1396+
library is installed.
1397+
name (str|None): A name for this layer(optional). If set None, the layer
1398+
will be named automatically. Default: None.
13921399
13931400
Returns:
13941401
Variable: output of softmax

0 commit comments

Comments
 (0)