Skip to content

Commit 7f85dfe

Browse files
authored
Upgrade Error Message for AucOP & MultiplexOP (#24458)
1 parent 027f995 commit 7f85dfe

File tree

9 files changed

+120
-36
lines changed

9 files changed

+120
-36
lines changed

paddle/fluid/operators/metrics/auc_op.cc

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,29 +23,33 @@ class AucOp : public framework::OperatorWithKernel {
2323

2424
protected:
2525
void InferShape(framework::InferShapeContext *ctx) const override {
26-
PADDLE_ENFORCE(ctx->HasInput("Predict"),
27-
"Input of Out should not be null.");
28-
PADDLE_ENFORCE(ctx->HasInput("Label"),
29-
"Input of Label should not be null.");
26+
OP_INOUT_CHECK(ctx->HasInput("Predict"), "Input", "Predict", "Auc");
27+
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "Auc");
3028
auto predict_width = ctx->GetInputDim("Predict")[1];
3129
if (ctx->IsRuntime()) {
3230
PADDLE_ENFORCE_LE(predict_width, 2,
33-
"Only support binary classification,"
34-
"prediction dims[1] should be 1 or 2");
31+
platform::errors::InvalidArgument(
32+
"Only support binary classification,"
33+
"prediction dims[1] should be 1 or 2"));
3534
}
3635
auto predict_height = ctx->GetInputDim("Predict")[0];
3736
auto label_height = ctx->GetInputDim("Label")[0];
3837

3938
if (ctx->IsRuntime()) {
4039
PADDLE_ENFORCE_EQ(predict_height, label_height,
41-
"Out and Label should have same height.");
40+
platform::errors::InvalidArgument(
41+
"Out and Label should have same height."));
4242
}
4343

4444
int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1;
4545
int slide_steps = ctx->Attrs().Get<int>("slide_steps");
4646

47-
PADDLE_ENFORCE_GE(num_pred_buckets, 1, "num_thresholds must larger than 1");
48-
PADDLE_ENFORCE_GE(slide_steps, 0, "slide_steps must be natural number");
47+
PADDLE_ENFORCE_GE(
48+
num_pred_buckets, 1,
49+
platform::errors::InvalidArgument("num_thresholds must larger than 1"));
50+
PADDLE_ENFORCE_GE(slide_steps, 0,
51+
platform::errors::InvalidArgument(
52+
"slide_steps must be natural number"));
4953

5054
ctx->SetOutputDim("AUC", {1});
5155

paddle/fluid/operators/multiplex_op.cc

Lines changed: 30 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -26,28 +26,39 @@ class MultiplexOp : public framework::OperatorWithKernel {
2626
using framework::OperatorWithKernel::OperatorWithKernel;
2727

2828
void InferShape(framework::InferShapeContext* ctx) const override {
29-
PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) shouldn't be null.");
30-
PADDLE_ENFORCE(!ctx->Inputs("X").empty(),
31-
"MultiInput(X) shouldn't be empty.");
32-
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null.");
29+
OP_INOUT_CHECK(ctx->HasInput("Ids"), "Input", "Ids", "Multiplex");
30+
PADDLE_ENFORCE_NE(
31+
ctx->Inputs("X").empty(), true,
32+
platform::errors::InvalidArgument("MultiInput(X) shouldn't be empty."));
33+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Multiplex");
3334
auto ids_dim = ctx->GetInputDim("Ids");
34-
PADDLE_ENFORCE(
35-
ids_dim.size() == 2 && ids_dim[1] == 1,
36-
"The index tensor must be a vector with size batchSize x 1.");
35+
PADDLE_ENFORCE_EQ(
36+
ids_dim.size(), 2,
37+
platform::errors::PreconditionNotMet(
38+
"The index tensor must be a vector with 2 dimensions"));
39+
PADDLE_ENFORCE_EQ(
40+
ids_dim[1], 1,
41+
platform::errors::PreconditionNotMet(
42+
"The index tensor must be a vector with batchSize x 1."));
3743

3844
auto ins_dims = ctx->GetInputsDim("X");
3945
auto num_ins = ins_dims.size();
40-
PADDLE_ENFORCE(num_ins > 1,
41-
"multiplex operator should have more than "
42-
"one candidate input tensors.");
46+
PADDLE_ENFORCE_GT(num_ins, 1,
47+
platform::errors::InvalidArgument(
48+
"multiplex operator should have more than "
49+
"one candidate input tensors."));
4350

4451
auto in_dim = ins_dims[0];
45-
PADDLE_ENFORCE(in_dim.size() >= 2,
46-
"The rank of candidate tensors must be not less than 2.");
52+
PADDLE_ENFORCE_GE(
53+
in_dim.size(), 2,
54+
platform::errors::InvalidArgument(
55+
"The rank of candidate tensors must be not less than 2."));
4756
for (size_t i = 1; i < num_ins; i++) {
4857
auto dim = ins_dims[i];
49-
PADDLE_ENFORCE(in_dim == dim,
50-
"All the candidate tensors must have the same size.");
58+
PADDLE_ENFORCE_EQ(
59+
in_dim, dim,
60+
platform::errors::PreconditionNotMet(
61+
"All the candidate tensors must have the same size."));
5162
}
5263
ctx->SetOutputDim("Out", in_dim);
5364
}
@@ -115,9 +126,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel {
115126

116127
void InferShape(framework::InferShapeContext* ctx) const override {
117128
auto dxs = ctx->Outputs(framework::GradVarName("X"));
118-
PADDLE_ENFORCE(!dxs.empty(), "Output(X@Grad) should not be null.");
119-
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
120-
"Input(Out@GRAD) should not be null.");
129+
PADDLE_ENFORCE_NE(dxs.empty(), true,
130+
platform::errors::InvalidArgument(
131+
"Output(X@Grad) should not be null."));
132+
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
133+
framework::GradVarName("Out"), "MultiplexGrad");
121134
auto dout_dim = ctx->GetInputDim(framework::GradVarName("Out"));
122135
ctx->SetOutputsDim(framework::GradVarName("X"),
123136
std::vector<framework::DDim>(dxs.size(), dout_dim));

paddle/fluid/operators/multiplex_op.cu

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,11 @@ class MultiplexGPUKernel : public framework::OpKernel<T> {
4040
BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
4141
for (auto i = 0; i < rows; i++) {
4242
int32_t k = index[i];
43-
PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative.");
44-
PADDLE_ENFORCE_LT((size_t)k, ins.size(),
45-
"index exceeds the number of candidate tensors.");
43+
PADDLE_ENFORCE_GE(k, 0, platform::errors::PreconditionNotMet(
44+
"index must be nonnegative."));
45+
PADDLE_ENFORCE_LT(static_cast<size_t>(k), ins.size(),
46+
platform::errors::PreconditionNotMet(
47+
"index exceeds the number of candidate tensors."));
4648
memory::Copy(place, out->data<T>() + i * cols, place,
4749
ins[k]->data<T>() + i * cols, cols * sizeof(T), stream);
4850
}

paddle/fluid/operators/multiplex_op.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,11 @@ class MultiplexCPUKernel : public framework::OpKernel<T> {
3838
BOOST_GET_CONST(platform::CPUPlace, ctx.GetPlace());
3939
for (auto i = 0; i < rows; i++) {
4040
int32_t k = index[i];
41-
PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative.");
41+
PADDLE_ENFORCE_GE(k, 0, platform::errors::PreconditionNotMet(
42+
"index must be nonnegative."));
4243
PADDLE_ENFORCE_LT(static_cast<size_t>(k), ins.size(),
43-
"index exceeds the number of candidate tensors.");
44+
platform::errors::PreconditionNotMet(
45+
"index exceeds the number of candidate tensors."));
4446
memory::Copy(place, out->data<T>() + i * cols, place,
4547
ins[k]->data<T>() + i * cols, cols * sizeof(T));
4648
}

python/paddle/fluid/layers/metric_op.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -175,6 +175,8 @@ def auc(input,
175175
#[array([0.5])]
176176
"""
177177
helper = LayerHelper("auc", **locals())
178+
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'auc')
179+
check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'auc')
178180
auc_out = helper.create_variable_for_type_inference(dtype="float64")
179181
batch_auc_out = helper.create_variable_for_type_inference(dtype="float64")
180182
# make tp, tn, fp, fn persistable, so that can accumulate all batches.

python/paddle/fluid/layers/nn.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5607,9 +5607,15 @@ def multiplex(inputs, index):
56075607
"""
56085608
helper = LayerHelper('multiplex', **locals())
56095609

5610-
if not isinstance(inputs, list) and len(inputs) < 2:
5611-
raise ValueError("inputs should be a list object and contains at least "
5612-
"2 elements.")
5610+
check_type(inputs, 'inputs', (list), 'multiplex')
5611+
if len(inputs) < 2:
5612+
raise ValueError(
5613+
"inputs should be a list object with at least 2 elements.")
5614+
for id, x in enumerate(inputs):
5615+
check_variable_and_dtype(x, 'input[' + str(id) + ']',
5616+
['float32', 'float64', 'int32', 'int64'],
5617+
'multiplex')
5618+
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
56135619

56145620
out = helper.create_variable_for_type_inference(inputs[0].dtype)
56155621
helper.append_op(

python/paddle/fluid/tests/unittests/test_auc_op.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import numpy as np
1919
from op_test import OpTest
2020
from paddle.fluid import metrics
21+
import paddle.fluid as fluid
2122

2223

2324
class TestAucOp(OpTest):
@@ -104,5 +105,25 @@ def test_check_output(self):
104105
self.check_output()
105106

106107

107-
if __name__ == "__main__":
108+
class TestAucOpError(unittest.TestCase):
109+
def test_errors(self):
110+
with fluid.program_guard(fluid.Program(), fluid.Program()):
111+
112+
def test_type1():
113+
data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int")
114+
label1 = fluid.data(name="label1", shape=[-1], dtype="int")
115+
result1 = fluid.layers.auc(input=data1, label=label1)
116+
117+
self.assertRaises(TypeError, test_type1)
118+
119+
def test_type2():
120+
data2 = fluid.data(
121+
name="input2", shape=[-1, 2], dtype="float32")
122+
label2 = fluid.data(name="label2", shape=[-1], dtype="float32")
123+
result2 = fluid.layers.auc(input=data2, label=label2)
124+
125+
self.assertRaises(TypeError, test_type2)
126+
127+
128+
if __name__ == '__main__':
108129
unittest.main()

python/paddle/fluid/tests/unittests/test_inference_model_io.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def test_save_inference_model_with_auc(self):
142142
# fake program without feed/fetch
143143
with program_guard(program, init_program):
144144
x = layers.data(name='x', shape=[2], dtype='float32')
145-
y = layers.data(name='y', shape=[1], dtype='float32')
145+
y = layers.data(name='y', shape=[1], dtype='int32')
146146
predict = fluid.layers.fc(input=x, size=2, act='softmax')
147147
acc = fluid.layers.accuracy(input=predict, label=y)
148148
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,

python/paddle/fluid/tests/unittests/test_multiplex_op.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import unittest
1818
import numpy as np
1919
from op_test import OpTest
20+
import paddle.fluid as fluid
2021

2122

2223
class TestMultiplexOp(OpTest):
@@ -57,5 +58,38 @@ def test_check_grad_ignore_x3(self):
5758
self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3'))
5859

5960

61+
class TestMultiplexOpError(unittest.TestCase):
62+
def test_errors(self):
63+
with fluid.program_guard(fluid.Program(), fluid.Program()):
64+
x1 = fluid.data(name='x1', shape=[None, 2], dtype='int64')
65+
x2 = fluid.data(name='x2', shape=[None, 2], dtype='int64')
66+
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
67+
68+
def test_list():
69+
# the inputs type must be list
70+
fluid.layers.multiplex(inputs=x1, index=index)
71+
72+
self.assertRaises(TypeError, test_list)
73+
74+
def test_len():
75+
fluid.layers.multiplex(inputs=[x1], index=index)
76+
77+
self.assertRaises(ValueError, test_len)
78+
79+
def test_type():
80+
y1 = fluid.data(name='y1', shape=[None, 2], dtype='int16')
81+
y2 = fluid.data(name='y2', shape=[None, 2], dtype='int16')
82+
fluid.layers.multiplex(inputs=[y1, y2], index=index)
83+
84+
self.assertRaises(TypeError, test_type)
85+
86+
def test_type2():
87+
index2 = fluid.data(
88+
name='index2', shape=[None, 1], dtype='int16')
89+
fluid.layers.multiplex(inputs=[x1, x2], index=index2)
90+
91+
self.assertRaises(TypeError, test_type2)
92+
93+
6094
if __name__ == '__main__':
6195
unittest.main()

0 commit comments

Comments
 (0)