Skip to content

Commit 479c47f

Browse files
author
Xing Wu
authored
fix error info for transpose sequence_conv_pool max_sequence_len sequ… (#24437)
* fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop * fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop * update modify, test=develop * update modify, test=develop * fixed some modifications, test=develop
1 parent 1c00732 commit 479c47f

File tree

7 files changed

+51
-36
lines changed

7 files changed

+51
-36
lines changed

paddle/fluid/operators/max_sequence_len_op.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker {
5757
class MaxSeqenceLenInferShape : public framework::InferShapeBase {
5858
public:
5959
void operator()(framework::InferShapeContext *context) const override {
60-
PADDLE_ENFORCE(context->HasInput("RankTable"));
60+
OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable",
61+
"MaxSeqenceLen");
6162
context->SetOutputDim("Out", {1});
6263
}
6364
};

paddle/fluid/operators/sequence_ops/sequence_erase_op.cc

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel {
2323
using framework::OperatorWithKernel::OperatorWithKernel;
2424

2525
void InferShape(framework::InferShapeContext* ctx) const override {
26-
PADDLE_ENFORCE(ctx->HasInput("X"),
27-
"Input(X) of SequenceErase operator should not be null.");
28-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
29-
"Output(Out) of SequenceErase operator should not be null.");
26+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase");
27+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase");
3028
auto x_dims = ctx->GetInputDim("X");
3129
PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1,
32-
"Input(X) of SequenceEraseOp should be a 2-D LoDTensor "
33-
"with the 2nd dimension equal to 1.");
30+
platform::errors::InvalidArgument(
31+
"Input(X) of SequenceEraseOp should be a 2-D LoDTensor "
32+
"with the 2nd dimension equal to 1,"
33+
"but received size %d with the 2nd dimension %d.",
34+
x_dims.size(), x_dims[1]));
3435
ctx->SetOutputDim("Out", x_dims);
3536
// The output LoDTensor's lod_level should be input X's lod_level.
3637
// For compile-time, we call SetLoDLevel to set output's lod_level.

paddle/fluid/operators/sequence_ops/sequence_erase_op.cu

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel<T> {
6464
auto* out = ctx.Output<LoDTensor>("Out");
6565

6666
auto lod = in->lod();
67-
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(),
68-
"The actual size mismatches with the LoD information.");
67+
PADDLE_ENFORCE_EQ(
68+
lod[lod.size() - 1].back(), (size_t)in->numel(),
69+
platform::errors::InvalidArgument(
70+
"The actual size mismatches with the LoD information."));
6971
auto tokens = ctx.Attr<std::vector<int>>("tokens");
7072
auto in_len = in->numel();
7173
auto in_dat = in->data<T>();

paddle/fluid/operators/sequence_ops/sequence_erase_op.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel<T> {
3030
auto lod = in->lod();
3131
PADDLE_ENFORCE_EQ(
3232
lod.empty(), false,
33-
"Input(X) Tensor of SequenceEraseOp does not contain LoD information.");
33+
platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp "
34+
"does not contain LoD information."));
3435
PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(),
35-
"The actual size mismatches with the LoD information.");
36+
platform::errors::InvalidArgument(
37+
"The actual input size %d mismatches with the LoD "
38+
"information size %d.",
39+
lod[lod.size() - 1].back(), (size_t)in->numel()));
3640
auto tokens = ctx.Attr<std::vector<int>>("tokens");
3741
auto in_len = in->numel();
3842
auto in_dat = in->data<T>();

paddle/fluid/operators/transpose_op.cc

Lines changed: 26 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel {
3131
using framework::OperatorWithKernel::OperatorWithKernel;
3232

3333
void InferShape(framework::InferShapeContext *ctx) const override {
34-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
35-
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null");
34+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose");
35+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose");
3636
auto x_dims = ctx->GetInputDim("X");
3737
std::vector<int> axis = ctx->Attrs().Get<std::vector<int>>("axis");
3838
size_t x_rank = x_dims.size();
3939
size_t axis_size = axis.size();
4040

4141
PADDLE_ENFORCE_EQ(x_rank, axis_size,
42-
"ShapeError: The input tensor's dimension "
43-
"should be equal to the axis's size. "
44-
"But received input tensor's dimension is %d, "
45-
"axis's size is %d",
46-
x_rank, axis_size);
42+
platform::errors::InvalidArgument(
43+
"The input tensor's dimension "
44+
"should be equal to the axis's size. "
45+
"But received input tensor's dimension is %d, "
46+
"axis's size is %d",
47+
x_rank, axis_size));
4748

4849
std::vector<int> count(axis_size, 0);
4950
for (size_t i = 0; i < axis_size; i++) {
50-
PADDLE_ENFORCE(
51-
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1,
52-
"ValueError: Each element of Attribute axis should "
53-
"be a unique value range from 0 to (dims - 1), "
54-
"where the dims is the axis's size, "
55-
"unique value means this axis value can appear only once. "
56-
"But received axis[%d] is %d, axis_size is %d, "
57-
"count[axis[%d]] is %d",
58-
i, axis[i], axis_size, i, count[axis[i]]);
51+
PADDLE_ENFORCE_EQ(
52+
axis[i] < static_cast<int>(axis_size) && ++count[axis[i]] == 1, true,
53+
platform::errors::InvalidArgument(
54+
"Each element of Attribute axis should "
55+
"be a unique value range from 0 to (dims - 1), "
56+
"where the dims is the axis's size, "
57+
"unique value means this axis value can appear only once. "
58+
"But received axis[%d] is %d, axis_size is %d, "
59+
"count[axis[%d]] is %d",
60+
i, axis[i], axis_size, i, count[axis[i]]));
5961
}
6062

6163
framework::DDim out_dims(x_dims);
@@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel {
149151
using framework::OperatorWithKernel::OperatorWithKernel;
150152

151153
void InferShape(framework::InferShapeContext *ctx) const override {
152-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
153-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
154-
"Input(Out@GRAD) should not be null");
154+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad");
155+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
156+
framework::GradVarName("Out"), "TransposeOpGrad");
155157
auto x_dims = ctx->GetInputDim("X");
156158
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
157159
if (ctx->HasOutput(framework::GradVarName("X"))) {
@@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp {
193195

194196
void InferShape(framework::InferShapeContext *ctx) const override {
195197
TransposeOp::InferShape(ctx);
196-
PADDLE_ENFORCE(ctx->HasOutput("XShape"),
197-
"Output(XShape) should not be null");
198+
OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2");
198199
const auto &in_dims = ctx->GetInputDim("X");
199200
std::vector<int64_t> x_shape_dim(in_dims.size() + 1);
200201
x_shape_dim[0] = 0;
@@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel {
259260
using framework::OperatorWithKernel::OperatorWithKernel;
260261

261262
void InferShape(framework::InferShapeContext *ctx) const override {
262-
PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null");
263-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
264-
"Input(Out@GRAD) should not be null");
263+
OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape",
264+
"Transpose2OpGrad");
265+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
266+
framework::GradVarName("Out"), "Transpose2OpGrad");
265267
if (ctx->HasOutput(framework::GradVarName("X"))) {
266268
auto xshape_dim = ctx->GetInputDim("XShape");
267269
auto x_shape_dim =

paddle/fluid/operators/transpose_op.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx,
5353
trans6(dev_ctx, in, out, axis);
5454
break;
5555
default:
56-
PADDLE_THROW("Tensors with rank at most 6 are supported");
56+
PADDLE_THROW(platform::errors::InvalidArgument(
57+
"Tensors with rank at most 6 are supported"
58+
", but received input tensor's rank is %d,",
59+
dim));
5760
}
5861
}
5962

python/paddle/fluid/nets.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,8 @@ def sequence_conv_pool(input,
305305
act="tanh",
306306
pool_type="sqrt")
307307
"""
308+
309+
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input')
308310
conv_out = layers.sequence_conv(
309311
input=input,
310312
num_filters=num_filters,

0 commit comments

Comments
 (0)