Skip to content

Commit 1437648

Browse files
authored
Refine error message for some ops (#24479)
test=release/1.8 l1_norm norm squared_l2_norm squared_l2_distance conv_shift sample_logits
1 parent db8f408 commit 1437648

File tree

10 files changed

+332
-116
lines changed

10 files changed

+332
-116
lines changed

paddle/fluid/operators/conv_shift_op.cc

Lines changed: 41 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -29,25 +29,49 @@ class ConvShiftOp : public framework::OperatorWithKernel {
2929
using framework::OperatorWithKernel::OperatorWithKernel;
3030

3131
void InferShape(framework::InferShapeContext *ctx) const override {
32-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
33-
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null.");
34-
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null.");
32+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ConvShiftOp");
33+
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ConvShiftOp");
34+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ConvShiftOp");
3535

3636
auto x_dims = ctx->GetInputDim("X");
3737
auto y_dims = ctx->GetInputDim("Y");
38-
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2.");
39-
PADDLE_ENFORCE_EQ(y_dims.size(), 2, "Input(Y)'s rank should be 2.");
38+
PADDLE_ENFORCE_EQ(
39+
x_dims.size(), 2,
40+
platform::errors::InvalidArgument(
41+
"Input(X)'s dimensions of ConvShiftOp should be 2. "
42+
"But received X's shape = [%s] and the dimension is %d.",
43+
x_dims, x_dims.size()));
44+
PADDLE_ENFORCE_EQ(
45+
y_dims.size(), 2,
46+
platform::errors::InvalidArgument(
47+
"Input(Y)'s dimensions of ConvShiftOp should be 2. "
48+
"But received Y's shape = [%s] and the dimension is %d.",
49+
y_dims, y_dims.size()));
4050
if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0))
41-
PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0],
42-
"The 1st dimension of Input(X) and Input(Y) should "
43-
"be equal.");
51+
PADDLE_ENFORCE_EQ(
52+
x_dims[0], y_dims[0],
53+
platform::errors::InvalidArgument(
54+
"The first dimension of Input(X) and Input(Y) of ConvShiftOp "
55+
"should be equal. "
56+
"But received X's shape = [%s], Y's shape = [%s], "
57+
"and the first dimensions are %d and %d respectively.",
58+
x_dims, y_dims, x_dims[0], y_dims[0]));
4459
if (ctx->IsRuntime() || y_dims[1] > 0)
45-
PADDLE_ENFORCE_EQ(y_dims[1] % 2, 1,
46-
"The 2nd dimension of Input(Y) should be odd.");
60+
PADDLE_ENFORCE_EQ(
61+
y_dims[1] % 2, 1,
62+
platform::errors::InvalidArgument(
63+
"The second dimension of Input(Y) of ConvShiftOp should be odd."
64+
"But received Y's shape = [%s] and the second dimension is %d.",
65+
y_dims, y_dims[1]));
4766
if (ctx->IsRuntime() || (x_dims[1] > 0 && y_dims[1] > 0))
48-
PADDLE_ENFORCE_LE(y_dims[1], x_dims[1],
49-
"The 2nd dimension of Input(Y) should be less than or "
50-
"equal to the 2nd dimension of Input(X).");
67+
PADDLE_ENFORCE_LE(
68+
y_dims[1], x_dims[1],
69+
platform::errors::InvalidArgument(
70+
"The second dimension of Input(Y) of ConvShiftOp should be less "
71+
"than or equal to the 2nd dimension of Input(X)."
72+
"But received X's shape = [%s], Y's shape = [%s], "
73+
"and the second dimensions are %d and %d respectively.",
74+
x_dims, y_dims, x_dims[1], y_dims[1]));
5175
ctx->ShareDim("X", /*->*/ "Out");
5276
ctx->ShareLoD("X", /*->*/ "Out");
5377
}
@@ -58,10 +82,10 @@ class ConvShiftGradOp : public framework::OperatorWithKernel {
5882
using framework::OperatorWithKernel::OperatorWithKernel;
5983

6084
void InferShape(framework::InferShapeContext *ctx) const override {
61-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
62-
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null.");
63-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
64-
"Input(Out@GRAD) should be not null.");
85+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ConvShiftGradOp");
86+
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ConvShiftGradOp");
87+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
88+
"Out@GRAD", "ConvShiftGradOp");
6589

6690
auto x_grad_name = framework::GradVarName("X");
6791
if (ctx->HasOutput(x_grad_name)) {

paddle/fluid/operators/l1_norm_op.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@ class L1NormOp : public framework::OperatorWithKernel {
2525
using framework::OperatorWithKernel::OperatorWithKernel;
2626

2727
void InferShape(framework::InferShapeContext* ctx) const override {
28-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
29-
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should be not null.");
28+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "L1NormOp");
29+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "L1NormOp");
3030

3131
ctx->SetOutputDim("Out", {1});
3232
}
@@ -37,11 +37,11 @@ class L1NormGradOp : public framework::OperatorWithKernel {
3737
using framework::OperatorWithKernel::OperatorWithKernel;
3838

3939
void InferShape(framework::InferShapeContext* ctx) const override {
40-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null.");
41-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
42-
"Input(Out@GRAD) should be not null.");
43-
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
44-
"Output(X@GRAD) should be not null.");
40+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "L1NormGradOp");
41+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
42+
"Out@GRAD", "L1NormGradOp");
43+
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
44+
"X@GRAD", "L1NormGradOp");
4545

4646
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
4747
}

paddle/fluid/operators/l1_norm_op.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,10 @@ class L1NormGradKernel : public framework::OpKernel<T> {
4545
const framework::Tensor *x = context.Input<framework::Tensor>("X");
4646
const framework::Tensor *d_out =
4747
context.Input<framework::Tensor>(framework::GradVarName("Out"));
48-
PADDLE_ENFORCE(d_out->numel() == 1, "L1 Norm Gradient should be scalar");
48+
PADDLE_ENFORCE_EQ(
49+
d_out->numel(), 1,
50+
platform::errors::InvalidArgument(
51+
"Input(GRAD@Out) of L1NormGradOP should be a scalar."));
4952
framework::Tensor *dx =
5053
context.Output<framework::Tensor>(framework::GradVarName("X"));
5154
dx->mutable_data<T>(context.GetPlace());

paddle/fluid/operators/norm_op.cc

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -55,10 +55,8 @@ class NormOp : public framework::OperatorWithKernel {
5555
public:
5656
using framework::OperatorWithKernel::OperatorWithKernel;
5757
void InferShape(framework::InferShapeContext* ctx) const override {
58-
PADDLE_ENFORCE(ctx->HasInput("X"),
59-
"Input(X) of NormOp should not be null.");
60-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
61-
"Output(Out) of NormOp should not be null.");
58+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "NormOp");
59+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "NormOp");
6260
auto xdim = ctx->GetInputDim("X");
6361
ctx->SetOutputDim("Out", xdim);
6462
int axis = ctx->Attrs().Get<int>("axis");
@@ -72,9 +70,9 @@ class NormOpGrad : public framework::OperatorWithKernel {
7270
public:
7371
using framework::OperatorWithKernel::OperatorWithKernel;
7472
void InferShape(framework::InferShapeContext* ctx) const override {
75-
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
76-
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
77-
"Input(X@GRAD) should not be null.");
73+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "NormOpGrad");
74+
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Input",
75+
"X@GRAD", "NormOpGrad");
7876
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
7977
}
8078
};

paddle/fluid/operators/sample_logits_op.cc

Lines changed: 48 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -112,32 +112,37 @@ class SampleLogitsOp : public framework::OperatorWithKernel {
112112
using framework::OperatorWithKernel::OperatorWithKernel;
113113

114114
void InferShape(framework::InferShapeContext* ctx) const override {
115-
PADDLE_ENFORCE(ctx->HasInput("Logits"),
116-
"Input(Logits) should be not null.");
117-
PADDLE_ENFORCE(ctx->HasInput("Labels"),
118-
"Input(Labels) should be not null.");
119-
120-
PADDLE_ENFORCE(ctx->HasOutput("Samples"),
121-
"Output(Samples) should be not null.");
122-
PADDLE_ENFORCE(ctx->HasOutput("Probabilities"),
123-
"Output(Probabilities) should be not null.");
124-
PADDLE_ENFORCE(ctx->HasOutput("SampledLogits"),
125-
"Output(SampledLogits) should be not null.");
126-
PADDLE_ENFORCE(ctx->HasOutput("SampledLabels"),
127-
"Output(SampledLabels) should be not null.");
128-
PADDLE_ENFORCE(ctx->HasOutput("LogitsDim"),
129-
"Output(LogitsDim) should be not null.");
130-
PADDLE_ENFORCE(ctx->HasOutput("LabelsDim"),
131-
"Output(LabelsDim) should be not null.");
115+
OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Logits",
116+
"SampleLogitsOp");
117+
OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Logits",
118+
"SampleLogitsOp");
119+
120+
OP_INOUT_CHECK(ctx->HasOutput("Samples"), "Output", "Samples",
121+
"SampleLogitsOp");
122+
OP_INOUT_CHECK(ctx->HasOutput("Probabilities"), "Output", "Probabilities",
123+
"SampleLogitsOp");
124+
OP_INOUT_CHECK(ctx->HasOutput("SampledLogits"), "Output", "SampledLogits",
125+
"SampleLogitsOp");
126+
OP_INOUT_CHECK(ctx->HasOutput("SampledLabels"), "Output", "SampledLabels",
127+
"SampleLogitsOp");
128+
OP_INOUT_CHECK(ctx->HasOutput("LogitsDim"), "Output", "LogitsDim",
129+
"SampleLogitsOp");
130+
OP_INOUT_CHECK(ctx->HasOutput("LabelsDim"), "Output", "LabelsDim",
131+
"SampleLogitsOp");
132132

133133
auto logits_dims = ctx->GetInputDim("Logits");
134134
auto labels_dims = ctx->GetInputDim("Labels");
135135

136-
PADDLE_ENFORCE_EQ(
137-
logits_dims.size(), 2UL,
138-
"The logits of softmax_with_cross_entropy should be a 2-D tensor.");
136+
PADDLE_ENFORCE_EQ(logits_dims.size(), 2UL,
137+
platform::errors::InvalidArgument(
138+
"Input(Logits) of SampleLogitsOp should be 2D. "
139+
"But received shape = [%s] and dimension is %d.",
140+
logits_dims, logits_dims.size()));
139141
PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL,
140-
"The labels should be a 2-D tensor.");
142+
platform::errors::InvalidArgument(
143+
"Input(Labels) of SampleLogitsOp should be 2D. "
144+
"But received shape = [%s] and dimension is %d.",
145+
labels_dims, labels_dims.size()));
141146

142147
const int num_samples = ctx->Attrs().Get<int>("num_samples");
143148
int num_sampled_classes = labels_dims[1] + num_samples;
@@ -175,25 +180,33 @@ class SampleLogitsOpGrad : public framework::OperatorWithKernel {
175180
using framework::OperatorWithKernel::OperatorWithKernel;
176181

177182
void InferShape(framework::InferShapeContext* ctx) const override {
178-
PADDLE_ENFORCE(ctx->HasInput("LogitsDim"),
179-
"Input(LogitsDim) should not be null.");
180-
PADDLE_ENFORCE(ctx->HasInput("LabelsDim"),
181-
"Input(LabelsDim) should be not null.");
182-
PADDLE_ENFORCE(ctx->HasInput("Samples"),
183-
"Input(Samples) should be not null.");
184-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("SampledLogits")),
185-
"Input(SampledLogits@Grad) should not be null.");
186-
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")),
187-
"Output(Logits@Grad) should be not null.");
183+
OP_INOUT_CHECK(ctx->HasInput("LogitsDim"), "Input", "LogitsDim",
184+
"SampleLogitsOpGrad");
185+
OP_INOUT_CHECK(ctx->HasInput("LabelsDim"), "Input", "LabelsDim",
186+
"SampleLogitsOpGrad");
187+
OP_INOUT_CHECK(ctx->HasInput("Samples"), "Input", "SamplesabelsDim",
188+
"SampleLogitsOpGrad");
189+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("SampledLogits")),
190+
"Input", "SampledLogits@GRAD", "SampleLogitsOpGrad");
191+
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Logits")), "Output",
192+
"Logits@GRAD", "SampleLogitsOpGrad");
188193

189194
auto logits_dims = ctx->GetInputDim("LogitsDim");
190195
logits_dims = framework::DDim(logits_dims.Get(), logits_dims.size() - 1);
191196
auto labels_dims = ctx->GetInputDim("LabelsDim");
192197
labels_dims = framework::DDim(labels_dims.Get(), labels_dims.size() - 1);
193-
PADDLE_ENFORCE_EQ(labels_dims.size(), 2UL,
194-
"The label should be a 2-D tensor.");
195-
PADDLE_ENFORCE_EQ(logits_dims.size(), 2UL,
196-
"The logits should be a 2-D tensor.");
198+
PADDLE_ENFORCE_EQ(
199+
logits_dims.size(), 2UL,
200+
platform::errors::InvalidArgument(
201+
"Input(LogitsDim) of SampleLogitsOpGrad should be 2D. "
202+
"But received shape = [%s] and dimension is %d.",
203+
logits_dims, logits_dims.size()));
204+
PADDLE_ENFORCE_EQ(
205+
labels_dims.size(), 2UL,
206+
platform::errors::InvalidArgument(
207+
"Input(LabelsDim) of SampleLogitsOpGrad should be 2D. "
208+
"But received shape = [%s] and dimension is %d.",
209+
labels_dims, labels_dims.size()));
197210

198211
ctx->SetOutputDim(framework::GradVarName("Logits"), logits_dims);
199212
}

paddle/fluid/operators/sample_logits_op.h

Lines changed: 55 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,37 @@ static void CPUTakeAlongD1(const platform::DeviceContext& ctx,
4949
const framework::Tensor& array,
5050
const framework::Tensor& index,
5151
framework::Tensor* value) {
52-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true);
52+
PADDLE_ENFORCE_EQ(
53+
platform::is_cpu_place(ctx.GetPlace()), true,
54+
platform::errors::InvalidArgument("This kernel only runs on CPU."));
5355
// UNDERSTAND: check shape src(B, C), index(B, K), out should also be (B, K)
54-
PADDLE_ENFORCE_EQ(index.dims().size(), 2);
55-
PADDLE_ENFORCE_EQ(array.dims().size(), 2);
56-
PADDLE_ENFORCE_EQ(index.dims()[0], array.dims()[0]);
57-
PADDLE_ENFORCE_EQ(index.dims(), value->dims());
58-
5956
const auto batch_size = index.dims()[0];
6057
const auto num_take = index.dims()[1];
6158
const auto array_dims = array.dims();
6259
const auto idx_dims = index.dims();
60+
PADDLE_ENFORCE_EQ(idx_dims.size(), 2,
61+
platform::errors::InvalidArgument(
62+
"index of CPUTakeAlongD1 should be 2D. "
63+
"But received shape = [%s] and dimension is %d.",
64+
idx_dims, idx_dims.size()));
65+
PADDLE_ENFORCE_EQ(array_dims.size(), 2,
66+
platform::errors::InvalidArgument(
67+
"array of CPUTakeAlongD1 should be 2D. "
68+
"But received shape = [%s] and dimension is %d.",
69+
array_dims, array_dims.size()));
70+
PADDLE_ENFORCE_EQ(idx_dims[0], array_dims[0],
71+
platform::errors::InvalidArgument(
72+
"The first dimension of index and array of "
73+
"CPUTakeAlongD1 should be equal. "
74+
"But received index shape = [%s], array shape = [%s], "
75+
"and the first dimensions are %d and %d.",
76+
idx_dims, array_dims, idx_dims[0], array_dims[0]));
77+
PADDLE_ENFORCE_EQ(
78+
idx_dims, value->dims(),
79+
platform::errors::InvalidArgument(
80+
"index and array of CPUTakeAlongD1 should have the same shape. "
81+
"But received index shape = [%s], array shape = [%s].",
82+
idx_dims, value->dims()));
6383

6484
// UNDERSTAND: no allocations here
6585
const T* p_array = array.data<T>();
@@ -89,16 +109,37 @@ static void CPUPutAlongD1(const platform::DeviceContext& ctx,
89109
framework::Tensor* array,
90110
const framework::Tensor& index,
91111
const framework::Tensor& value) {
92-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true);
112+
PADDLE_ENFORCE_EQ(
113+
platform::is_cpu_place(ctx.GetPlace()), true,
114+
platform::errors::InvalidArgument("This kernel only runs on CPU."));
93115
// UNDERSTAND: check shape src(B, C), index(B, K), out should also be (B, K)
94-
PADDLE_ENFORCE_EQ(index.dims().size(), 2);
95-
PADDLE_ENFORCE_EQ(array->dims().size(), 2);
96-
PADDLE_ENFORCE_EQ(index.dims()[0], array->dims()[0]);
97-
PADDLE_ENFORCE_EQ(index.dims(), value.dims());
98116
const auto batch_size = index.dims()[0];
99117
const auto num_put = index.dims()[1];
100118
auto array_dims = array->dims();
101119
auto idx_dims = index.dims();
120+
PADDLE_ENFORCE_EQ(idx_dims.size(), 2,
121+
platform::errors::InvalidArgument(
122+
"index of CPUPutAlongD1 should be 2D. "
123+
"But received shape = [%s] and dimension is %d.",
124+
idx_dims, idx_dims.size()));
125+
PADDLE_ENFORCE_EQ(array_dims.size(), 2,
126+
platform::errors::InvalidArgument(
127+
"array of CPUPutAlongD1 should be 2D. "
128+
"But received shape = [%s] and dimension is %d.",
129+
array_dims, array_dims.size()));
130+
PADDLE_ENFORCE_EQ(idx_dims[0], array_dims[0],
131+
platform::errors::InvalidArgument(
132+
"The first dimension of index and array of "
133+
"CPUPutAlongD1 should be equal. "
134+
"But received index shape = [%s], array shape = [%s], "
135+
"and the first dimensions are %d and %d.",
136+
idx_dims, array_dims, idx_dims[0], array_dims[0]));
137+
PADDLE_ENFORCE_EQ(
138+
idx_dims, value.dims(),
139+
platform::errors::InvalidArgument(
140+
"index and array of CPUPutAlongD1 should have the same shape. "
141+
"But received index shape = [%s], array shape = [%s].",
142+
idx_dims, value.dims()));
102143

103144
// UNDERSTAND: no allocations here
104145
T* p_array = array->data<T>();
@@ -149,8 +190,9 @@ class SampleLogitsKernel : public framework::OpKernel<T> {
149190
public:
150191
using Tensor = framework::Tensor;
151192
void Compute(const framework::ExecutionContext& context) const override {
152-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(context.GetPlace()), true,
153-
"This kernel only runs on CPU.");
193+
PADDLE_ENFORCE_EQ(
194+
platform::is_cpu_place(context.GetPlace()), true,
195+
platform::errors::InvalidArgument("this kernel only runs on cpu."));
154196
VLOG(3) << "Enter SampleLogitsKernel";
155197
// get necessary inputs
156198
const Tensor* logits = context.Input<Tensor>("Logits");

0 commit comments

Comments
 (0)