Skip to content

Commit c8d00cb

Browse files
authored
Sequence mask support tensor (#18249) (#18318)
* sequnce mask support max length tensor input; test=develop * add rnn_impl.py; test=develop * add basic gru lstm unittest; test=develop * fix api spec; test=develop * fix sequence_mask op bug; test=develop test=document_preview * change +-*x to elmentwise_op; test=develop * add mkl flag; test=develop * fix rnn impl bug; test=develop * update api spec; test=develop * fix doc bug; test=develop * fix lstm bugs; test=develop
1 parent 5cd4bbf commit c8d00cb

19 files changed

+1888
-56
lines changed

paddle/fluid/API.spec

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -429,6 +429,38 @@ paddle.fluid.contrib.multi_upload (ArgSpec(args=['client', 'hdfs_path', 'local_p
429429
paddle.fluid.contrib.extend_with_decoupled_weight_decay (ArgSpec(args=['base_optimizer'], varargs=None, keywords=None, defaults=None), ('document', 'a1095dfd4ec725747f662d69cd7659d4'))
430430
paddle.fluid.contrib.mixed_precision.decorate (ArgSpec(args=['optimizer', 'init_loss_scaling', 'incr_every_n_steps', 'decr_every_n_nan_or_inf', 'incr_ratio', 'decr_ratio', 'use_dynamic_loss_scaling'], varargs=None, keywords=None, defaults=(1.0, 1000, 2, 2.0, 0.8, False)), ('document', 'bdb8f9dbb0d94b3957272c53eeee9818'))
431431
paddle.fluid.contrib.fused_elemwise_activation (ArgSpec(args=['x', 'y', 'functor_list', 'axis', 'scale', 'save_intermediate_out'], varargs=None, keywords=None, defaults=(-1, 0.0, True)), ('document', '1c4b247a2858cea8d9d8750693688270'))
432+
paddle.fluid.contrib.BasicGRUUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'hidden_size', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'dtype'], varargs=None, keywords=None, defaults=(None, None, None, None, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
433+
paddle.fluid.contrib.BasicGRUUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
434+
paddle.fluid.contrib.BasicGRUUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
435+
paddle.fluid.contrib.BasicGRUUnit.backward (ArgSpec(args=['self'], varargs='inputs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
436+
paddle.fluid.contrib.BasicGRUUnit.clear_gradients (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
437+
paddle.fluid.contrib.BasicGRUUnit.create_parameter (ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'a6420ca1455366eaaf972191612de0b6'))
438+
paddle.fluid.contrib.BasicGRUUnit.create_variable (ArgSpec(args=['self', 'name', 'persistable', 'dtype', 'type'], varargs=None, keywords=None, defaults=(None, None, None, VarType.LOD_TENSOR)), ('document', '171cccfceba636d5bbf7bbae672945d8'))
439+
paddle.fluid.contrib.BasicGRUUnit.eval (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
440+
paddle.fluid.contrib.BasicGRUUnit.forward (ArgSpec(args=['self', 'input', 'pre_hidden'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
441+
paddle.fluid.contrib.BasicGRUUnit.full_name (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '23ce4f961f48ed0f79cadf93a3938ed2'))
442+
paddle.fluid.contrib.BasicGRUUnit.load_dict (ArgSpec(args=['self', 'stat_dict', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
443+
paddle.fluid.contrib.BasicGRUUnit.parameters (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '5aec25a854eb57abc798dccccbb507d5'))
444+
paddle.fluid.contrib.BasicGRUUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
445+
paddle.fluid.contrib.BasicGRUUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
446+
paddle.fluid.contrib.BasicGRUUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
447+
paddle.fluid.contrib.basic_gru (ArgSpec(args=['input', 'init_hidden', 'hidden_size', 'num_layers', 'sequence_length', 'dropout_prob', 'bidirectional', 'batch_first', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(1, None, 0.0, False, True, None, None, None, None, 'float32', 'basic_gru')), ('document', '0afcbe4fbe1b8c35eda58b4efe48f9fd'))
448+
paddle.fluid.contrib.BasicLSTMUnit.__init__ (ArgSpec(args=['self', 'name_scope', 'hidden_size', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'forget_bias', 'dtype'], varargs=None, keywords=None, defaults=(None, None, None, None, 1.0, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
449+
paddle.fluid.contrib.BasicLSTMUnit.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
450+
paddle.fluid.contrib.BasicLSTMUnit.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))
451+
paddle.fluid.contrib.BasicLSTMUnit.backward (ArgSpec(args=['self'], varargs='inputs', keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
452+
paddle.fluid.contrib.BasicLSTMUnit.clear_gradients (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
453+
paddle.fluid.contrib.BasicLSTMUnit.create_parameter (ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'a6420ca1455366eaaf972191612de0b6'))
454+
paddle.fluid.contrib.BasicLSTMUnit.create_variable (ArgSpec(args=['self', 'name', 'persistable', 'dtype', 'type'], varargs=None, keywords=None, defaults=(None, None, None, VarType.LOD_TENSOR)), ('document', '171cccfceba636d5bbf7bbae672945d8'))
455+
paddle.fluid.contrib.BasicLSTMUnit.eval (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
456+
paddle.fluid.contrib.BasicLSTMUnit.forward (ArgSpec(args=['self', 'input', 'pre_hidden', 'pre_cell'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
457+
paddle.fluid.contrib.BasicLSTMUnit.full_name (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '23ce4f961f48ed0f79cadf93a3938ed2'))
458+
paddle.fluid.contrib.BasicLSTMUnit.load_dict (ArgSpec(args=['self', 'stat_dict', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
459+
paddle.fluid.contrib.BasicLSTMUnit.parameters (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '5aec25a854eb57abc798dccccbb507d5'))
460+
paddle.fluid.contrib.BasicLSTMUnit.state_dict (ArgSpec(args=['self', 'destination', 'include_sublayers'], varargs=None, keywords=None, defaults=(None, True)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
461+
paddle.fluid.contrib.BasicLSTMUnit.sublayers (ArgSpec(args=['self', 'include_sublayers'], varargs=None, keywords=None, defaults=(True,)), ('document', '00a881005ecbc96578faf94513bf0d62'))
462+
paddle.fluid.contrib.BasicLSTMUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
463+
paddle.fluid.contrib.basic_lstm (ArgSpec(args=['input', 'init_hidden', 'init_cell', 'hidden_size', 'num_layers', 'sequence_length', 'dropout_prob', 'bidirectional', 'batch_first', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'forget_bias', 'dtype', 'name'], varargs=None, keywords=None, defaults=(1, None, 0.0, False, True, None, None, None, None, 1.0, 'float32', 'basic_lstm')), ('document', 'fe4d0c3c55a162b8cfe10b05fabb7ce4'))
432464
paddle.fluid.dygraph.Layer.__init__ (ArgSpec(args=['self', 'name_scope', 'dtype'], varargs=None, keywords=None, defaults=(VarType.FP32,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
433465
paddle.fluid.dygraph.Layer.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
434466
paddle.fluid.dygraph.Layer.add_sublayer (ArgSpec(args=['self', 'name', 'sublayer'], varargs=None, keywords=None, defaults=None), ('document', '839ff3c0534677ba6ad8735c3fd4e995'))

paddle/fluid/operators/math/math_function.cu

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,9 @@ template struct SetConstant<platform::CUDADeviceContext, bool>;
3535
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
3636
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
3737
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
38-
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>;
38+
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
39+
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
40+
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>;
3941

4042
DEFINE_GPU_TRANS(1);
4143
DEFINE_GPU_TRANS(2);

paddle/fluid/operators/sequence_ops/sequence_mask_op.cc

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,80 @@
1313
// limitations under the License.
1414

1515
#include "paddle/fluid/operators/sequence_ops/sequence_mask_op.h"
16+
#include <string>
17+
18+
namespace paddle {
19+
namespace operators {
20+
21+
class SequenceMaskOp : public framework::OperatorWithKernel {
22+
public:
23+
using framework::OperatorWithKernel::OperatorWithKernel;
24+
25+
void InferShape(framework::InferShapeContext* ctx) const override {
26+
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must exist");
27+
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist");
28+
29+
int maxlen = ctx->Attrs().Get<int>("maxlen");
30+
auto dim = framework::vectorize2int(ctx->GetInputDim("X"));
31+
32+
if (ctx->HasInputs("MaxLenTensor")) {
33+
dim.push_back(-1);
34+
} else {
35+
dim.push_back(maxlen > 0 ? maxlen : -1);
36+
}
37+
ctx->SetOutputDim("Y", framework::make_ddim(dim));
38+
}
39+
40+
protected:
41+
framework::OpKernelType GetExpectedKernelType(
42+
const framework::ExecutionContext& ctx) const override {
43+
return framework::OpKernelType(ctx.Input<framework::LoDTensor>("X")->type(),
44+
ctx.device_context());
45+
}
46+
framework::OpKernelType GetKernelTypeForVar(
47+
const std::string& var_name, const Tensor& tensor,
48+
const framework::OpKernelType& expected_kernel_type) const override {
49+
if (var_name == "depth_tensor") {
50+
return expected_kernel_type;
51+
}
52+
return framework::OpKernelType(expected_kernel_type.data_type_,
53+
tensor.place(), tensor.layout());
54+
}
55+
};
56+
57+
class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker {
58+
public:
59+
void Make() override {
60+
AddInput("X", "The input tensor of sequence_mask op.");
61+
AddOutput("Y", "The output mask of sequence_mask op.");
62+
AddInput("MaxLenTensor",
63+
"Max length tensor"
64+
"have higher priority than maxlen attribute")
65+
.AsDispensable();
66+
AddAttr<int>("maxlen",
67+
"The maximum length of the sequence. If maxlen < 0, maxlen "
68+
"= max(Input(X)).")
69+
.SetDefault(-1)
70+
.AddCustomChecker([](const int& v) {
71+
PADDLE_ENFORCE(v < 0 || v >= 1,
72+
"Attr(maxlen) must be less than 0 or larger than 1");
73+
});
74+
AddAttr<int>("out_dtype", "Output data type");
75+
AddComment(R"DOC(
76+
SequenceMask Operator
77+
78+
This operator outputs a Mask according to Input(X) and Attr(maxlen).
79+
Supposing Input(X) is a Tensor with shape [d_1, d_2, ..., d_n], the
80+
Output(Y) is a mask with shape [d_1, d_2, ..., d_n, maxlen], where:
81+
82+
Y(i_1, i_2, ..., i_n, j) = (j < X(i_1, i_2, ..., i_n))
83+
84+
If maxlen < 0, maxlen = max(X)
85+
)DOC");
86+
}
87+
};
88+
} // namespace operators
89+
} // namespace paddle
1690

1791
REGISTER_OPERATOR(sequence_mask, paddle::operators::SequenceMaskOp,
1892
paddle::operators::SequenceMaskOpMaker,

paddle/fluid/operators/sequence_ops/sequence_mask_op.h

Lines changed: 21 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -28,48 +28,8 @@
2828
namespace paddle {
2929
namespace operators {
3030

31-
class SequenceMaskOp : public framework::OperatorWithKernel {
32-
public:
33-
using framework::OperatorWithKernel::OperatorWithKernel;
34-
35-
void InferShape(framework::InferShapeContext *ctx) const override {
36-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must exist");
37-
PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist");
38-
39-
int maxlen = ctx->Attrs().Get<int>("maxlen");
40-
auto dim = framework::vectorize2int(ctx->GetInputDim("X"));
41-
dim.push_back(maxlen > 0 ? maxlen : -1);
42-
ctx->SetOutputDim("Y", framework::make_ddim(dim));
43-
}
44-
};
45-
46-
class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker {
47-
public:
48-
void Make() override {
49-
AddInput("X", "The input tensor of sequence_mask op.");
50-
AddOutput("Y", "The output mask of sequence_mask op.");
51-
AddAttr<int>("maxlen",
52-
"The maximum length of the sequence. If maxlen < 0, maxlen "
53-
"= max(Input(X)).")
54-
.SetDefault(-1)
55-
.AddCustomChecker([](const int &v) {
56-
PADDLE_ENFORCE(v < 0 || v >= 1,
57-
"Attr(maxlen) must be less than 0 or larger than 1");
58-
});
59-
AddAttr<int>("out_dtype", "Output data type");
60-
AddComment(R"DOC(
61-
SequenceMask Operator
62-
63-
This operator outputs a Mask according to Input(X) and Attr(maxlen).
64-
Supposing Input(X) is a Tensor with shape [d_1, d_2, ..., d_n], the
65-
Output(Y) is a mask with shape [d_1, d_2, ..., d_n, maxlen], where:
66-
67-
Y(i_1, i_2, ..., i_n, j) = (j < X(i_1, i_2, ..., i_n))
68-
69-
If maxlen < 0, maxlen = max(X)
70-
)DOC");
71-
}
72-
};
31+
using LoDTensor = framework::LoDTensor;
32+
using Tensor = framework::Tensor;
7333

7434
template <typename Tx, typename Ty>
7535
struct SequenceMaskForRangeFunctor {
@@ -90,8 +50,6 @@ struct SequenceMaskForRangeFunctor {
9050

9151
template <typename DeviceContext, typename Tx>
9252
struct SequenceMaskFunctor {
93-
using Tensor = framework::LoDTensor;
94-
9553
SequenceMaskFunctor(const DeviceContext &ctx, const Tx *x, Tensor *y,
9654
int limits, int maxlen)
9755
: ctx_(ctx), x_(x), y_(y), limits_(limits), maxlen_(maxlen) {}
@@ -119,7 +77,25 @@ class SequenceMaskKernel : public framework::OpKernel<Tx> {
11977
void Compute(const framework::ExecutionContext &ctx) const override {
12078
auto *x = ctx.Input<Tensor>("X");
12179
auto *y = ctx.Output<Tensor>("Y");
122-
auto maxlen = ctx.Attr<int>("maxlen");
80+
int maxlen = ctx.Attr<int>("maxlen");
81+
if (ctx.HasInput("MaxLenTensor")) {
82+
auto max_len_tensor = ctx.Input<Tensor>("MaxLenTensor");
83+
PADDLE_ENFORCE(max_len_tensor != NULL, "MaxLenTensor is NULL");
84+
if (platform::is_gpu_place(max_len_tensor->place())) {
85+
framework::Tensor temp;
86+
TensorCopySync(*max_len_tensor, platform::CPUPlace(), &temp);
87+
maxlen = *temp.data<int32_t>();
88+
} else {
89+
maxlen = *max_len_tensor->data<int32_t>();
90+
}
91+
92+
auto y_dim = framework::vectorize2int(x->dims());
93+
y_dim.push_back(maxlen);
94+
y->Resize(framework::make_ddim(y_dim));
95+
96+
PADDLE_ENFORCE_GT(maxlen, 0,
97+
"MaxLenTensor value should be greater than 0");
98+
}
12399

124100
auto *x_data = x->data<Tx>();
125101
auto x_numel = x->numel();

paddle/fluid/operators/shape_op.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,4 +55,5 @@ namespace ops = paddle::operators;
5555
REGISTER_OPERATOR(shape, ops::ShapeOp, ops::ShapeOpMaker,
5656
paddle::framework::EmptyGradOpMaker);
5757
REGISTER_OP_CPU_KERNEL(shape, ops::ShapeKernel<int>, ops::ShapeKernel<int32_t>,
58-
ops::ShapeKernel<float>, ops::ShapeKernel<double>);
58+
ops::ShapeKernel<int64_t>, ops::ShapeKernel<float>,
59+
ops::ShapeKernel<double>);

paddle/fluid/operators/shape_op.cu

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,6 @@ limitations under the License. */
1616

1717
REGISTER_OP_CUDA_KERNEL(shape, paddle::operators::ShapeKernel<int>,
1818
paddle::operators::ShapeKernel<int32_t>,
19+
paddle::operators::ShapeKernel<int64_t>,
1920
paddle::operators::ShapeKernel<float>,
2021
paddle::operators::ShapeKernel<double>);

paddle/fluid/operators/slice_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ class SliceOp : public framework::OperatorWithKernel {
9292
framework::OpKernelType GetExpectedKernelType(
9393
const framework::ExecutionContext& ctx) const override {
9494
return framework::OpKernelType(ctx.Input<Tensor>("Input")->type(),
95-
ctx.GetPlace());
95+
ctx.Input<Tensor>("Input")->place());
9696
}
9797
};
9898

paddle/fluid/operators/transpose_op.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#include "paddle/fluid/operators/transpose_op.h"
16+
#include <memory>
1617
#include <string>
1718
#include <vector>
1819

@@ -289,8 +290,12 @@ REGISTER_OPERATOR(transpose2_grad, ops::Transpose2OpGrad);
289290

290291
REGISTER_OP_CPU_KERNEL(
291292
transpose2, ops::TransposeKernel<paddle::platform::CPUDeviceContext, float>,
293+
ops::TransposeKernel<paddle::platform::CPUDeviceContext, int32_t>,
294+
ops::TransposeKernel<paddle::platform::CPUDeviceContext, int64_t>,
292295
ops::TransposeKernel<paddle::platform::CPUDeviceContext, double>);
293296
REGISTER_OP_CPU_KERNEL(
294297
transpose2_grad,
298+
ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, int32_t>,
299+
ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, int64_t>,
295300
ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, float>,
296301
ops::TransposeGradKernel<paddle::platform::CPUDeviceContext, double>);

paddle/fluid/operators/transpose_op.cu.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,11 +30,15 @@ REGISTER_OP_CUDA_KERNEL(
3030

3131
REGISTER_OP_CUDA_KERNEL(
3232
transpose2,
33+
ops::TransposeKernel<paddle::platform::CUDADeviceContext, int32_t>,
34+
ops::TransposeKernel<paddle::platform::CUDADeviceContext, int64_t>,
3335
ops::TransposeKernel<paddle::platform::CUDADeviceContext, float>,
3436
ops::TransposeKernel<paddle::platform::CUDADeviceContext, double>,
3537
ops::TransposeKernel<paddle::platform::CUDADeviceContext, plat::float16>);
3638
REGISTER_OP_CUDA_KERNEL(
3739
transpose2_grad,
40+
ops::TransposeGradKernel<paddle::platform::CUDADeviceContext, int32_t>,
41+
ops::TransposeGradKernel<paddle::platform::CUDADeviceContext, int64_t>,
3842
ops::TransposeGradKernel<paddle::platform::CUDADeviceContext, float>,
3943
ops::TransposeGradKernel<paddle::platform::CUDADeviceContext, double>,
4044
ops::TransposeGradKernel<paddle::platform::CUDADeviceContext,

python/paddle/fluid/contrib/layers/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616

1717
from . import nn
1818
from .nn import *
19+
from .rnn_impl import *
1920

2021
__all__ = []
2122
__all__ += nn.__all__
23+
__all__ += rnn_impl.__all__

0 commit comments

Comments
 (0)