Skip to content

Commit 0650ae0

Browse files
FrostMLguoshengCS
andauthored
[cherry-pick] error message enhancement (#24631)
* API(dynamic_gru, chunk_eval, BeamSearchDecoder) error message enhancement (#24513) * dynamic_gru err_msg enhancement, test=develop * chunk_eval err_msg enhancement and fix crf_decoding output type, test=develop * BeamSearchDecoder err msg enhancement, test=develop * API/OP(sequence_expand_as) error message enhancement (#23712) Co-authored-by: Guo Sheng <[email protected]>
1 parent 47bd526 commit 0650ae0

File tree

13 files changed

+303
-115
lines changed

13 files changed

+303
-115
lines changed

paddle/fluid/operators/chunk_eval_op.cc

Lines changed: 32 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -24,45 +24,48 @@ class ChunkEvalOp : public framework::OperatorWithKernel {
2424
using framework::OperatorWithKernel::OperatorWithKernel;
2525

2626
void InferShape(framework::InferShapeContext *ctx) const override {
27-
PADDLE_ENFORCE_EQ(ctx->HasInput("Inference"), true,
28-
"Input(Inference) of ChunkEvalOp should not be null.");
29-
PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true,
30-
"Input(Label) of ChunkEvalOp should not be null.");
31-
PADDLE_ENFORCE_EQ(ctx->HasOutput("Precision"), true,
32-
"Output(Precision) of ChunkEvalOp should not be null.");
33-
PADDLE_ENFORCE_EQ(ctx->HasOutput("Recall"), true,
34-
"Output(Recall) of ChunkEvalOp should not be null.");
35-
PADDLE_ENFORCE_EQ(ctx->HasOutput("F1-Score"), true,
36-
"Output(F1-Score) of ChunkEvalOp should not be null.");
37-
PADDLE_ENFORCE_EQ(
38-
ctx->HasOutput("NumInferChunks"), true,
39-
"Output(NumInferChunks) of ChunkEvalOp should not be null.");
40-
PADDLE_ENFORCE_EQ(
41-
ctx->HasOutput("NumLabelChunks"), true,
42-
"Output(NumLabelChunks) of ChunkEvalOp should not be null.");
43-
PADDLE_ENFORCE_EQ(
44-
ctx->HasOutput("NumCorrectChunks"), true,
45-
"Output(NumCorrectChunks) of ChunkEvalOp should not be null.");
27+
OP_INOUT_CHECK(ctx->HasInput("Inference"), "Input", "Inference",
28+
"chunk_eval");
29+
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "chunk_eval");
30+
31+
OP_INOUT_CHECK(ctx->HasOutput("Precision"), "Output", "Precision",
32+
"chunk_eval");
33+
OP_INOUT_CHECK(ctx->HasOutput("Recall"), "Output", "Recall", "chunk_eval");
34+
OP_INOUT_CHECK(ctx->HasOutput("F1-Score"), "Output", "F1-Score",
35+
"chunk_eval");
36+
OP_INOUT_CHECK(ctx->HasOutput("NumInferChunks"), "Output", "NumInferChunks",
37+
"chunk_eval");
38+
OP_INOUT_CHECK(ctx->HasOutput("NumLabelChunks"), "Output", "NumLabelChunks",
39+
"chunk_eval");
40+
OP_INOUT_CHECK(ctx->HasOutput("NumCorrectChunks"), "Output",
41+
"NumCorrectChunks", "chunk_eval");
4642

4743
auto inference_dim = ctx->GetInputDim("Inference");
4844
auto label_dim = ctx->GetInputDim("Label");
4945

5046
PADDLE_ENFORCE_EQ(
5147
inference_dim, label_dim,
52-
"Input(Inference)'s shape must be the same as Input(Label)'s shape.");
48+
platform::errors::InvalidArgument(
49+
"Input(Inference)'s shape must be the same as Input(Label)'s "
50+
"shape, but received [%s] (Inference) vs [%s] (Label).",
51+
inference_dim, label_dim));
5352

5453
bool use_padding = ctx->HasInput("SeqLength");
5554
if (use_padding) {
56-
PADDLE_ENFORCE_EQ((inference_dim.size() == 3 && inference_dim[2] == 1) ||
57-
inference_dim.size() == 2,
58-
true,
59-
"when Input(SeqLength) is provided, Input(Inference) "
60-
"should be of dim 3 (batch_size, bucket, 1) or dim 2 "
61-
"(batch_size, bucket).");
55+
PADDLE_ENFORCE_EQ(
56+
(inference_dim.size() == 3 && inference_dim[2] == 1) ||
57+
inference_dim.size() == 2,
58+
true, platform::errors::InvalidArgument(
59+
"when Input(SeqLength) is provided, Input(Inference) "
60+
"should be of dim 3 (batch_size, bucket, 1) or dim 2 "
61+
"(batch_size, bucket), but received [%s].",
62+
inference_dim));
6263
auto seq_length_dim = ctx->GetInputDim("SeqLength");
63-
PADDLE_ENFORCE_LE(
64-
seq_length_dim.size(), 2,
65-
"Input(SeqLength)'s rank should not be greater than 2.");
64+
PADDLE_ENFORCE_LE(seq_length_dim.size(), 2,
65+
platform::errors::InvalidArgument(
66+
"Input(SeqLength)'s rank should not be greater "
67+
"than 2, but received %d.",
68+
seq_length_dim.size()));
6669
}
6770

6871
ctx->SetOutputDim("Precision", {1});

paddle/fluid/operators/chunk_eval_op.h

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,13 @@ class ChunkEvalKernel : public framework::OpKernel<T> {
5151
for (int i = 0; i < length; ++i) {
5252
int prev_tag = tag;
5353
int prev_type = type;
54-
PADDLE_ENFORCE_LE(label[i], num_chunk_types * num_tag_types);
54+
PADDLE_ENFORCE_LE(
55+
label[i], num_chunk_types * num_tag_types,
56+
platform::errors::InvalidArgument(
57+
"The value of Input(Label) should be less than the number of "
58+
"chunk types times the number of tag types, but received %d "
59+
"(Label) vs %d (chunk types) * %d (tag types).",
60+
label[i], num_chunk_types, num_tag_types));
5561
tag = label[i] % num_tag_types;
5662
type = label[i] / num_tag_types;
5763
if (in_chunk && ChunkEnd(prev_tag, prev_type, tag, type, other_chunk_type,
@@ -191,10 +197,16 @@ class ChunkEvalKernel : public framework::OpKernel<T> {
191197
tag_inside, tag_end, tag_single, excluded_chunk_types);
192198
}
193199
} else {
194-
PADDLE_ENFORCE_EQ(lod.size(), 1UL,
195-
"Only support one level sequence now.");
196-
PADDLE_ENFORCE(lod == inference->lod(),
197-
"LoD must be same between Inference and Label.");
200+
PADDLE_ENFORCE_EQ(
201+
lod.size(), 1UL,
202+
platform::errors::InvalidArgument(
203+
"Only support one level LoD sequence now, but received %d.",
204+
lod.size()));
205+
PADDLE_ENFORCE_EQ(
206+
lod, inference->lod(),
207+
platform::errors::InvalidArgument(
208+
"Input(Inference) and Input(Label) of Op(chunk_eval) should have "
209+
"same LoD information."));
198210
num_sequences = lod[0].size() - 1;
199211

200212
for (int i = 0; i < num_sequences; ++i) {

paddle/fluid/operators/gru_op.cc

Lines changed: 89 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -31,44 +31,58 @@ class GRUOp : public framework::OperatorWithKernel {
3131
using framework::OperatorWithKernel::OperatorWithKernel;
3232

3333
void InferShape(framework::InferShapeContext* ctx) const override {
34-
PADDLE_ENFORCE(ctx->HasInput("Input"),
35-
"Input(%s) of GRUOp should not be null.", "Input");
36-
PADDLE_ENFORCE(ctx->HasInput("Weight"),
37-
"Input(%s) of GRUOp should not be null.", "Weight");
38-
PADDLE_ENFORCE(ctx->HasOutput("BatchGate"),
39-
"Output(%s) of GRUOp should not be null.", "BatchGate");
40-
PADDLE_ENFORCE(ctx->HasOutput("BatchResetHiddenPrev"),
41-
"Output(%s) of GRUOp should not be null.",
42-
"BatchResetHiddenPrev");
43-
PADDLE_ENFORCE(ctx->HasOutput("BatchHidden"),
44-
"Output(%s) of GRUOp should not be null.", "BatchHidden");
45-
PADDLE_ENFORCE(ctx->HasOutput("Hidden"),
46-
"Output(%s) of GRUOp should not be null.", "Hidden");
34+
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "GRU");
35+
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "GRU");
36+
OP_INOUT_CHECK(ctx->HasOutput("BatchGate"), "Output", "BatchGate", "GRU");
37+
OP_INOUT_CHECK(ctx->HasOutput("BatchResetHiddenPrev"), "Output",
38+
"BatchResetHiddenPrev", "GRU");
39+
OP_INOUT_CHECK(ctx->HasOutput("BatchHidden"), "Output", "BatchHidden",
40+
"GRU");
41+
OP_INOUT_CHECK(ctx->HasOutput("Hidden"), "Output", "Hidden", "GRU");
42+
4743
auto input_dims = ctx->GetInputDim("Input");
4844
auto weight_dims = ctx->GetInputDim("Weight");
4945
int input_size = input_dims[1];
5046
int frame_size = weight_dims[0];
5147
if (ctx->IsRuntime()) {
52-
PADDLE_ENFORCE_EQ(
53-
input_size, frame_size * 3,
54-
"The input_size must be 3 times of frame_size in GRUOp.");
48+
PADDLE_ENFORCE_EQ(input_size, frame_size * 3,
49+
platform::errors::InvalidArgument(
50+
"The second dimension of Input(Input) must be 3 "
51+
"times of frame_size in GRUOp, but received %d "
52+
"(Input) vs %d (frame_size).",
53+
input_size, frame_size));
5554
}
5655
PADDLE_ENFORCE_EQ(
5756
weight_dims[1], frame_size * 3,
58-
"The shape of Weight matrix must be [frame_size, frame_size * 3].");
57+
platform::errors::InvalidArgument(
58+
"The shape of Input(Weight) matrix must be [frame_size, frame_size "
59+
"* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
60+
weight_dims[0], weight_dims[1], frame_size, frame_size * 3));
5961
if (ctx->HasInput("H0")) {
6062
auto h0_dims = ctx->GetInputDim("H0");
61-
PADDLE_ENFORCE_EQ(h0_dims[1], frame_size,
62-
"The width of H0 must be equal to frame_size.");
63+
PADDLE_ENFORCE_EQ(
64+
h0_dims[1], frame_size,
65+
platform::errors::InvalidArgument(
66+
"The width of Input(H0) must be equal to frame_size, but "
67+
"received %d (width of H0) vs %d (frame_size).",
68+
h0_dims[1], frame_size));
6369
}
6470
if (ctx->HasInput("Bias")) {
6571
auto bias_dims = ctx->GetInputDim("Bias");
6672
int bias_height = bias_dims[0];
6773
int bias_width = bias_dims[1];
68-
PADDLE_ENFORCE_EQ(bias_height, 1,
69-
"The shape of Bias must be [1, frame_size * 3].");
70-
PADDLE_ENFORCE_EQ(bias_width, frame_size * 3,
71-
"The shape of Bias must be [1, frame_size * 3].");
74+
PADDLE_ENFORCE_EQ(
75+
bias_height, 1,
76+
platform::errors::InvalidArgument(
77+
"The shape of Bias must be [1, frame_size * 3], but received "
78+
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
79+
bias_height, bias_width, frame_size * 3));
80+
PADDLE_ENFORCE_EQ(
81+
bias_width, frame_size * 3,
82+
platform::errors::InvalidArgument(
83+
"The shape of Bias must be [1, frame_size * 3], but received "
84+
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
85+
bias_height, bias_width, frame_size * 3));
7286
}
7387
ctx->SetOutputDim("BatchGate", input_dims);
7488
ctx->SetOutputDim("BatchResetHiddenPrev", {input_dims[0], frame_size});
@@ -166,39 +180,50 @@ class GRUGradOp : public framework::OperatorWithKernel {
166180
using framework::OperatorWithKernel::OperatorWithKernel;
167181

168182
void InferShape(framework::InferShapeContext* ctx) const override {
169-
PADDLE_ENFORCE(ctx->HasInput("Input"),
170-
"Input(%s) of GRUGradOp should not be null.", "Input");
171-
PADDLE_ENFORCE(ctx->HasInput("Weight"),
172-
"Input(%s) of GRUGradOp should not be null.", "Weight");
173-
PADDLE_ENFORCE(ctx->HasInput("BatchGate"),
174-
"Input(%s) of GRUGradOp should not be null.", "BatchGate");
175-
PADDLE_ENFORCE(ctx->HasInput("BatchResetHiddenPrev"),
176-
"Input(%s) of GRUGradOp should not be null.",
177-
"BatchResetHiddenPrev");
178-
PADDLE_ENFORCE(ctx->HasInput("BatchHidden"),
179-
"Input(%s) of GRUOp should not be null.", "BatchHidden");
180-
PADDLE_ENFORCE(ctx->HasInput("Hidden"),
181-
"Input(%s) of GRUGradOp should not be null.", "Hidden");
182-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Hidden")),
183-
"Input(%s@GRAD) of GRUGradOp should not be null.", "Hidden");
183+
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "GRU@Grad");
184+
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "GRU@Grad");
185+
OP_INOUT_CHECK(ctx->HasInput("BatchGate"), "Input", "BatchGate",
186+
"GRU@Grad");
187+
OP_INOUT_CHECK(ctx->HasInput("BatchResetHiddenPrev"), "Input",
188+
"BatchResetHiddenPrev", "GRU@Grad");
189+
OP_INOUT_CHECK(ctx->HasInput("BatchHidden"), "Input", "BatchHidden",
190+
"GRU@Grad");
191+
OP_INOUT_CHECK(ctx->HasInput("Hidden"), "Input", "Hidden", "GRU@Grad");
192+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Hidden")), "Input",
193+
framework::GradVarName("Hidden"), "GRU@Grad");
194+
184195
auto input_dims = ctx->GetInputDim("Input");
185196
auto weight_dims = ctx->GetInputDim("Weight");
186197
int input_size = input_dims[1];
187198
int frame_size = weight_dims[0];
188199
int weight_height = weight_dims[0];
189200
int weight_width = weight_dims[1];
190-
PADDLE_ENFORCE_EQ(input_size, frame_size * 3,
191-
"The input_size must be 3 times of frame_size in GRUOp.");
201+
PADDLE_ENFORCE_EQ(
202+
input_size, frame_size * 3,
203+
platform::errors::InvalidArgument(
204+
"The second dimension of Input(Input) must be 3 times of "
205+
"frame_size in GRUOp, but received %d (Input) vs %d (frame_size).",
206+
input_size, frame_size));
192207
PADDLE_ENFORCE_EQ(
193208
weight_height, frame_size,
194-
"The shape of Weight matrix must be [frame_size, frame_size * 3].");
209+
platform::errors::InvalidArgument(
210+
"The shape of Input(Weight) matrix must be [frame_size, frame_size "
211+
"* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
212+
weight_height, weight_width, frame_size, frame_size * 3));
195213
PADDLE_ENFORCE_EQ(
196214
weight_width, frame_size * 3,
197-
"The shape of Weight matrix must be [frame_size, frame_size * 3].");
215+
platform::errors::InvalidArgument(
216+
"The shape of Input(Weight) matrix must be [frame_size, frame_size "
217+
"* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
218+
weight_height, weight_width, frame_size, frame_size * 3));
198219
if (ctx->HasInput("H0")) {
199220
auto h0_dims = ctx->GetInputDim("H0");
200-
PADDLE_ENFORCE_EQ(h0_dims[1], frame_size,
201-
"The width of H0 must be equal to frame_size.");
221+
PADDLE_ENFORCE_EQ(
222+
h0_dims[1], frame_size,
223+
platform::errors::InvalidArgument(
224+
"The width of Input(H0) must be equal to frame_size, but "
225+
"received %d (width of H0) vs %d (frame_size).",
226+
h0_dims[1], frame_size));
202227
auto h0_grad_name = framework::GradVarName("H0");
203228
if (ctx->HasOutput(h0_grad_name))
204229
ctx->SetOutputDim(h0_grad_name, h0_dims);
@@ -207,10 +232,18 @@ class GRUGradOp : public framework::OperatorWithKernel {
207232
auto bias_dims = ctx->GetInputDim("Bias");
208233
int bias_height = bias_dims[0];
209234
int bias_width = bias_dims[1];
210-
PADDLE_ENFORCE_EQ(bias_height, 1,
211-
"The shape of Bias must be [1, frame_size * 3].");
212-
PADDLE_ENFORCE_EQ(bias_width, frame_size * 3,
213-
"The shape of Bias must be [1, frame_size * 3].");
235+
PADDLE_ENFORCE_EQ(
236+
bias_height, 1,
237+
platform::errors::InvalidArgument(
238+
"The shape of Bias must be [1, frame_size * 3], but received "
239+
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
240+
bias_height, bias_width, frame_size * 3));
241+
PADDLE_ENFORCE_EQ(
242+
bias_width, frame_size * 3,
243+
platform::errors::InvalidArgument(
244+
"The shape of Bias must be [1, frame_size * 3], but received "
245+
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
246+
bias_height, bias_width, frame_size * 3));
214247
auto bias_grad_name = framework::GradVarName("Bias");
215248
if (ctx->HasOutput(bias_grad_name))
216249
ctx->SetOutputDim(bias_grad_name, bias_dims);
@@ -298,14 +331,20 @@ class GRUCPUKernel : public framework::OpKernel<T> {
298331
T* packed_gate = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/,
299332
frame_size * 2 /*width of weight*/,
300333
frame_size /*height of height*/);
301-
PADDLE_ENFORCE(packed_gate);
334+
PADDLE_ENFORCE_NOT_NULL(
335+
packed_gate, platform::errors::NotFound(
336+
"The caculation result of packed_gate by "
337+
"GEMM_ALLOC should not be null when using MKL."));
302338
blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size * 2,
303339
frame_size, T(1.0), gru_value.gate_weight, frame_size * 2,
304340
packed_gate);
305341
T* packed_state = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/,
306342
frame_size /*width of weight*/,
307343
frame_size /*height of height*/);
308-
PADDLE_ENFORCE(packed_state);
344+
PADDLE_ENFORCE_NOT_NULL(
345+
packed_state, platform::errors::NotFound(
346+
"The caculation result of packed_state by "
347+
"GEMM_ALLOC should not be null when using MKL."));
309348
blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size,
310349
frame_size, T(1.0), gru_value.state_weight, frame_size,
311350
packed_state);

paddle/fluid/operators/lstm_op.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,13 @@ class LSTMGradKernel : public framework::OpKernel<T> {
219219
auto in_dims = input->dims();
220220
auto out_dims = hidden_g->dims();
221221
int frame_size = static_cast<int>(in_dims[1] / 4);
222-
PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);
222+
PADDLE_ENFORCE_EQ(
223+
frame_size, out_dims[1],
224+
platform::errors::InvalidArgument(
225+
"The second dimension of Input(" +
226+
framework::GradVarName("Hidden") +
227+
") should be %d, but received %d in LSTM@Grad operator.",
228+
frame_size, out_dims[1]));
223229

224230
math::LstmMetaValue<T> lstm_value;
225231
if (bias && ctx.Attr<bool>("use_peepholes")) {

paddle/fluid/operators/lstmp_op.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,11 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
327327
auto out_dims = cell_out->dims();
328328
framework::DDim proj_dims({in_dims[0], proj_weight->dims()[1]});
329329
int frame_size = static_cast<int>(in_dims[1] / 4);
330-
PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);
330+
PADDLE_ENFORCE_EQ(frame_size, out_dims[1],
331+
platform::errors::InvalidArgument(
332+
"The second dimension of Input(Cell) should be %d, "
333+
"but received %d in LSTMP@Grad operator.",
334+
frame_size, out_dims[1]));
331335

332336
math::LstmMetaValue<T> lstmp_value;
333337
if (bias && ctx.Attr<bool>("use_peepholes")) {

0 commit comments

Comments
 (0)