Skip to content

Commit d98e118

Browse files
authored
fix check and error message for flatten hash is_empty op (#24434)
fix check info for flatten hash is_empty op; test=develop
1 parent 30efee3 commit d98e118

File tree

8 files changed

+130
-34
lines changed

8 files changed

+130
-34
lines changed

paddle/fluid/operators/flatten_op.cc

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -29,17 +29,17 @@ class FlattenOp : public framework::OperatorWithKernel {
2929
using framework::OperatorWithKernel::OperatorWithKernel;
3030

3131
void InferShape(framework::InferShapeContext *ctx) const override {
32-
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
33-
"Input (X) of Flatten op should not be null.");
34-
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
35-
"Output (Output) of Flatten op should not be null.");
32+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Flatten");
33+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Flatten");
3634
const auto &axis = ctx->Attrs().Get<int>("axis");
3735
const auto &in_dims = ctx->GetInputDim("X");
3836
PADDLE_ENFORCE_GE(axis, 0,
39-
"The axis should be greater than or equal to 0.");
37+
platform::errors::InvalidArgument(
38+
"The axis should be greater than or equal to 0."));
4039
PADDLE_ENFORCE_LE(
4140
axis, in_dims.size(),
42-
"The axis should be less than or equal to input tensor's rank.");
41+
platform::errors::InvalidArgument(
42+
"The axis should be less than or equal to input tensor's rank."));
4343

4444
const auto &out_dims = GetOutputShape(axis, in_dims);
4545
ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
@@ -161,17 +161,17 @@ class Flatten2Op : public framework::OperatorWithKernel {
161161
using framework::OperatorWithKernel::OperatorWithKernel;
162162

163163
void InferShape(framework::InferShapeContext *ctx) const override {
164-
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
165-
"Input (X) of Flatten op should not be null.");
166-
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
167-
"Output (Output) of Flatten op should not be null.");
164+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Flatten2");
165+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Flatten2");
168166
const auto &axis = ctx->Attrs().Get<int>("axis");
169167
const auto &in_dims = ctx->GetInputDim("X");
170168
PADDLE_ENFORCE_GE(axis, 0,
171-
"The axis should be greater than or equal to 0.");
169+
platform::errors::InvalidArgument(
170+
"The axis should be greater than or equal to 0."));
172171
PADDLE_ENFORCE_LE(
173172
axis, in_dims.size(),
174-
"The axis should be less than or equal to input tensor's rank.");
173+
platform::errors::InvalidArgument(
174+
"The axis should be less than or equal to input tensor's rank"));
175175

176176
const auto &out_dims = FlattenOp::GetOutputShape(axis, in_dims);
177177
ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
@@ -181,8 +181,7 @@ class Flatten2Op : public framework::OperatorWithKernel {
181181
ctx->ShareLoD("X", "Out");
182182
}
183183

184-
PADDLE_ENFORCE_EQ(ctx->HasOutput("XShape"), true,
185-
"Output (XShape) of Flatten op should not be null.");
184+
OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Flatten2");
186185
std::vector<int64_t> xshape_dims(in_dims.size() + 1);
187186
xshape_dims[0] = 0;
188187
for (int i = 0; i < in_dims.size(); ++i) {
@@ -223,10 +222,10 @@ class Flatten2GradOp : public framework::OperatorWithKernel {
223222
using framework::OperatorWithKernel::OperatorWithKernel;
224223

225224
void InferShape(framework::InferShapeContext *context) const override {
226-
PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true,
227-
"Input(XShape) shouldn't be null.");
228-
PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true,
229-
"Input(Out@GRAD) shouldn't be null.");
225+
OP_INOUT_CHECK(context->HasInput("XShape"), "Input", "XShape",
226+
"Flatten2Grad");
227+
OP_INOUT_CHECK(context->HasInput(framework::GradVarName("Out")), "Input",
228+
framework::GradVarName("Out"), "Flatten2Grad");
230229
auto xshape_dims = context->GetInputDim("XShape");
231230
auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size());
232231
context->SetOutputDim(framework::GradVarName("X"), x_dims);

paddle/fluid/operators/hash_op.cc

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,13 @@ class HashOp : public framework::OperatorWithKernel {
2626
: OperatorWithKernel(type, inputs, outputs, attrs) {}
2727

2828
void InferShape(framework::InferShapeContext *ctx) const override {
29-
PADDLE_ENFORCE(ctx->HasInput("X"),
30-
"Input(X) of HashOp should not be null.");
31-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
32-
"Output(Out) of HashOp should not be null.");
29+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Hash");
30+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Hash");
3331

3432
auto dims = ctx->GetInputDim("X");
3533
PADDLE_ENFORCE_EQ(dims.size(), 2UL,
36-
"The input of hash_op's dimensions must be 2");
34+
platform::errors::InvalidArgument(
35+
"The input of hash_op's dimensions must be 2"));
3736
std::vector<int64_t> out_dims;
3837
int num_hash = ctx->Attrs().Get<int>("num_hash");
3938
HashOutputSize(dims, out_dims, num_hash);

paddle/fluid/operators/is_empty_op.cc

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,8 @@ class IsEmptyOp : public framework::OperatorWithKernel {
2525

2626
protected:
2727
void InferShape(framework::InferShapeContext *ctx) const override {
28-
PADDLE_ENFORCE(ctx->HasInput("X"),
29-
"Input(X) of IsEmptyOp should not be null.");
30-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
31-
"Output(Out) of IsEmptyOp should not be null.");
28+
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "IsEmpty");
29+
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "IsEmpty");
3230
ctx->SetOutputDim("Out", {1});
3331
}
3432

python/paddle/fluid/layers/control_flow.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
import warnings
2727
import six
2828
from functools import reduce, partial
29-
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type
29+
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
3030
from ... import compat as cpt
3131
from ..backward import _infer_var_data_type_shape_
3232

@@ -3725,15 +3725,15 @@ def is_empty(x, cond=None):
37253725
# fluid.layers.is_empty(x=input, cond=res)
37263726
37273727
"""
3728+
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
3729+
'is_empty')
3730+
check_type(cond, 'cond', (Variable, type(None)), 'is_empty')
37283731
helper = LayerHelper("is_empty", **locals())
37293732
if cond is None:
37303733
cond = helper.create_variable_for_type_inference(dtype='bool')
37313734
cond.stop_gradient = True
3732-
elif not isinstance(cond, Variable):
3733-
raise TypeError("cond takes a variable")
3734-
elif cond.dtype != 'bool':
3735-
raise TypeError("The data type of cond must be bool")
3736-
3735+
else:
3736+
check_dtype(cond.dtype, 'cond', ['bool'], 'is_empty')
37373737
helper.append_op(
37383738
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
37393739
return cond

python/paddle/fluid/layers/nn.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9628,6 +9628,8 @@ def flatten(x, axis=1, name=None):
96289628
out = fluid.layers.flatten(x=x, axis=2)
96299629
# out shape is [16, 3]
96309630
"""
9631+
check_variable_and_dtype(
9632+
x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64'], 'flatten')
96319633
helper = LayerHelper('flatten', **locals())
96329634

96339635
if not (isinstance(x, Variable)):
@@ -12466,6 +12468,9 @@ def hash(input, hash_size, num_hash=1, name=None):
1246612468
# [386]
1246712469
# [901]]]
1246812470
"""
12471+
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash')
12472+
check_type(hash_size, 'hash_size', ['int32', 'int64'], 'hash')
12473+
check_type(num_hash, 'num_hash', ['int32', 'int64'], 'hash')
1246912474
helper = LayerHelper('hash', **locals())
1247012475
out = helper.create_variable_for_type_inference(
1247112476
helper.input_dtype(), stop_gradient=True)

python/paddle/fluid/tests/unittests/test_flatten2_op.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
import unittest
1818
import numpy as np
19-
19+
import paddle.fluid as fluid
2020
from op_test import OpTest
2121

2222

@@ -69,5 +69,25 @@ def init_test_case(self):
6969
self.new_shape = (36, 16)
7070

7171

72+
class TestFlatten2OpError(unittest.TestCase):
73+
def test_errors(self):
74+
with fluid.program_guard(fluid.Program(), fluid.Program()):
75+
input_data = np.random.random((3, 2, 4, 5)).astype("float64")
76+
77+
def test_Variable():
78+
# the input type must be Variable
79+
fluid.layers.flatten(input_data, axis=1)
80+
81+
self.assertRaises(TypeError, test_Variable)
82+
83+
def test_type():
84+
# dtype must be float32, float64, int8, int32, int64.
85+
x2 = fluid.layers.data(
86+
name='x2', shape=[3, 2, 4, 5], dtype='float16')
87+
fluid.layers.flatten(x2, axis=1)
88+
89+
self.assertRaises(TypeError, test_type)
90+
91+
7292
if __name__ == "__main__":
7393
unittest.main()

python/paddle/fluid/tests/unittests/test_hash_op.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
import unittest
1616
import numpy as np
1717
from op_test import OpTest
18+
import paddle.fluid as fluid
1819

1920

2021
class TestHashOp(OpTest):
@@ -102,5 +103,41 @@ def test_check_output(self):
102103
self.check_output()
103104

104105

106+
class TestHashOpError(unittest.TestCase):
107+
def test_errors(self):
108+
with fluid.program_guard(fluid.Program(), fluid.Program()):
109+
input_data = np.random.randint(0, 10, (8, 1)).astype("int32")
110+
111+
def test_Variable():
112+
# the input type must be Variable
113+
fluid.layers.hash(input=input_data, hash_size=2**32)
114+
115+
self.assertRaises(TypeError, test_Variable)
116+
117+
def test_type():
118+
# dtype must be int32, int64.
119+
x2 = fluid.layers.data(
120+
name='x2', shape=[1], dtype="float32", lod_level=1)
121+
fluid.layers.hash(input=x2, hash_size=2**32)
122+
123+
self.assertRaises(TypeError, test_type)
124+
125+
def test_hash_size_type():
126+
# hash_size dtype must be int32, int64.
127+
x3 = fluid.layers.data(
128+
name='x3', shape=[1], dtype="int32", lod_level=1)
129+
fluid.layers.hash(input=x3, hash_size=1024.5)
130+
131+
self.assertRaises(TypeError, test_hash_size_type)
132+
133+
def test_num_hash_type():
134+
# num_hash dtype must be int32, int64.
135+
x4 = fluid.layers.data(
136+
name='x4', shape=[1], dtype="int32", lod_level=1)
137+
fluid.layers.hash(input=x4, hash_size=2**32, num_hash=2.5)
138+
139+
self.assertRaises(TypeError, test_num_hash_type)
140+
141+
105142
if __name__ == "__main__":
106143
unittest.main()

python/paddle/fluid/tests/unittests/test_is_empty_op.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import unittest
1818
import numpy as np
1919
from op_test import OpTest
20+
import paddle.fluid as fluid
2021

2122

2223
class TestEmpty(OpTest):
@@ -36,5 +37,42 @@ def setUp(self):
3637
self.outputs = {'Out': np.array([True])}
3738

3839

40+
class TestIsEmptyOpError(unittest.TestCase):
41+
def test_errors(self):
42+
with fluid.program_guard(fluid.Program(), fluid.Program()):
43+
input_data = np.random.random((3, 2)).astype("float64")
44+
45+
def test_Variable():
46+
# the input type must be Variable
47+
fluid.layers.is_empty(x=input_data)
48+
49+
self.assertRaises(TypeError, test_Variable)
50+
51+
def test_cond_Variable():
52+
# cond type must be Variable or None
53+
x2 = fluid.layers.data(name="x2", shape=[3, 2], dtype="float32")
54+
cond_data = np.random.random((3, 2)).astype("float32")
55+
fluid.layers.is_empty(x=x2, cond=cond_data)
56+
57+
self.assertRaises(TypeError, test_cond_Variable)
58+
59+
def test_type():
60+
# dtype must be float32, float64, int32, int64
61+
x3 = fluid.layers.data(
62+
name="x3", shape=[4, 32, 32], dtype="bool")
63+
res = fluid.layers.is_empty(x=x3)
64+
65+
self.assertRaises(TypeError, test_type)
66+
67+
def test_cond_type():
68+
# cond dtype must be bool.
69+
x4 = fluid.layers.data(name="x4", shape=[3, 2], dtype="float32")
70+
cond = fluid.layers.data(
71+
name="cond", shape=[1], dtype="float32")
72+
fluid.layers.is_empty(x=x4, cond=cond)
73+
74+
self.assertRaises(TypeError, test_cond_type)
75+
76+
3977
if __name__ == "__main__":
4078
unittest.main()

0 commit comments

Comments
 (0)