Skip to content

Commit 19d9846

Browse files
authored
fix error message, test=develop (#24425) (#24547)
1 parent 5582719 commit 19d9846

File tree

12 files changed

+178
-111
lines changed

12 files changed

+178
-111
lines changed

paddle/fluid/operators/distributed_ops/allreduce_op.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,9 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
3333
public:
3434
void Compute(const framework::ExecutionContext& ctx) const override {
3535
auto place = ctx.GetPlace();
36-
PADDLE_ENFORCE(is_gpu_place(place),
37-
"AllReduce op can run on gpu place only for now.");
36+
PADDLE_ENFORCE_EQ(is_gpu_place(place), true,
37+
platform::errors::PreconditionNotMet(
38+
"AllReduce op can run on gpu place only for now."));
3839
#if defined(PADDLE_WITH_NCCL)
3940
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
4041
auto in = ctx.Input<framework::Tensor>("X");
@@ -49,7 +50,8 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
4950
auto* comm = dev_ctx.nccl_comm();
5051
// FIXME(typhoonzero): should use nccl stream here.
5152
auto stream = dev_ctx.stream();
52-
PADDLE_ENFORCE_NOT_NULL(stream, "Should initialize NCCL firstly.");
53+
PADDLE_ENFORCE_NOT_NULL(
54+
stream, platform::errors::NotFound("Should initialize NCCL firstly."));
5355

5456
int reduce_type = ctx.Attr<int>("reduce_type");
5557
ncclRedOp_t red_type = ncclSum;
@@ -67,7 +69,7 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
6769
red_type = ncclMin;
6870
break;
6971
}
70-
PADDLE_ENFORCE(platform::dynload::ncclAllReduce(
72+
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce(
7173
sendbuff, recvbuff, numel, static_cast<ncclDataType_t>(dtype), red_type,
7274
comm, stream));
7375
if (ctx.Attr<bool>("sync_mode")) {

paddle/fluid/operators/distributed_ops/broadcast_op.cc

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,12 @@ class BroadcastOp : public framework::OperatorWithKernel {
2626
using framework::OperatorWithKernel::OperatorWithKernel;
2727

2828
void InferShape(framework::InferShapeContext* ctx) const override {
29-
PADDLE_ENFORCE(ctx->HasInput("X"),
30-
"Input(X) of BroadcastOp should not be null.");
31-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
32-
"Output(Output) of ConvOp should not be null.");
29+
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
30+
platform::errors::InvalidArgument(
31+
"Input(X) of BroadcastOp should not be null."));
32+
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
33+
platform::errors::InvalidArgument(
34+
"Output(Output) of ConvOp should not be null."));
3335
}
3436
};
3537

paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -34,28 +34,33 @@ template <typename T>
3434
class NCCLBroadcastOpKernel : public framework::OpKernel<T> {
3535
public:
3636
void Compute(const framework::ExecutionContext& ctx) const override {
37-
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
38-
"The place of ExecutionContext should be CUDAPlace.");
37+
PADDLE_ENFORCE_EQ(
38+
platform::is_gpu_place(ctx.GetPlace()), true,
39+
platform::errors::PreconditionNotMet(
40+
"The place of ExecutionContext should be CUDAPlace."));
3941

4042
#if defined(PADDLE_WITH_NCCL)
4143
int dev_id = boost::get<platform::CUDAPlace>(ctx.GetPlace()).device;
4244
int root_dev_id = ctx.Attr<int>("root");
4345

4446
auto in = ctx.Input<framework::Tensor>("X");
4547
auto out = ctx.Output<framework::Tensor>("Out");
46-
PADDLE_ENFORCE(out->IsInitialized(),
47-
"Currently, the output of broadcast op must be initialized, "
48-
"because this op can only be an In-Place operation.");
48+
PADDLE_ENFORCE_EQ(
49+
out->IsInitialized(), true,
50+
platform::errors::PreconditionNotMet(
51+
"Currently, the output of broadcast op must be initialized,"
52+
"because this op can only be an In-Place operation."));
4953
void* send_recv_buffer = out->mutable_data<T>(ctx.GetPlace());
5054
PADDLE_ENFORCE_EQ(
5155
send_recv_buffer, in->data<void>(),
52-
"Currently, the broadcast op can only be an In-Place operation.");
56+
platform::errors::PreconditionNotMet("Currently, the broadcast op can "
57+
"only be an In-Place operation."));
5358

5459
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
5560
auto comm = dev_ctx.nccl_comm();
5661
auto stream = dev_ctx.stream();
5762

58-
PADDLE_ENFORCE(platform::dynload::ncclBcast(
63+
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast(
5964
send_recv_buffer, static_cast<size_t>(in->numel()),
6065
platform::ToNCCLDataType(in->type()), root_dev_id, comm, stream));
6166

paddle/fluid/operators/eye_op.cc

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,16 +22,20 @@ class EyeOp : public framework::OperatorWithKernel {
2222
using framework::OperatorWithKernel::OperatorWithKernel;
2323

2424
void InferShape(framework::InferShapeContext* ctx) const override {
25-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
26-
"Output(Out) of EyeOP should not be null.");
25+
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
26+
platform::errors::InvalidArgument(
27+
"Output(Out) of EyeOP should not be null."));
2728
auto num_rows = ctx->Attrs().Get<int64_t>("num_rows");
28-
PADDLE_ENFORCE(num_rows >= 0,
29-
"The value of Input(num_rows) should be non-negative int.");
29+
PADDLE_ENFORCE_EQ(
30+
num_rows >= 0, true,
31+
platform::errors::InvalidArgument(
32+
"The value of Input(num_rows) should be non-negative int."));
3033
auto num_columns = ctx->Attrs().Get<int64_t>("num_columns");
3134
if (num_columns == -1) num_columns = num_rows;
32-
PADDLE_ENFORCE(
33-
num_columns >= 0,
34-
"The value of Input(num_columns) should be non-negative int.");
35+
PADDLE_ENFORCE_EQ(
36+
num_columns >= 0, true,
37+
platform::errors::InvalidArgument(
38+
"The value of Input(num_columns) should be non-negative int."));
3539
ctx->SetOutputDim("Out", {num_rows, num_columns});
3640
}
3741

paddle/fluid/operators/gather.cu.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,12 +78,14 @@ void GPUGather(const platform::DeviceContext& ctx, const Tensor& src,
7878
// check index of shape 1-D
7979
if (index.dims().size() == 1) {
8080
PADDLE_ENFORCE_GT(index.dims()[0], 0,
81-
"The index of gather_op should not be empty when the "
82-
"index's rank is 1.");
81+
platform::errors::InvalidArgument(
82+
"The index of gather_op should not be empty"
83+
"when the index's rank is 1."));
8384
} else if (index.dims().size() == 2) {
8485
PADDLE_ENFORCE_EQ(index.dims()[1], 1,
85-
" If the index's rank of gather_op is 2, the second "
86-
"dimension should be 1.");
86+
platform::errors::InvalidArgument(
87+
"If the index's rank of gather_op is 2,"
88+
" the second dimension should be 1."));
8789
}
8890

8991
int index_size = index.dims()[0];

paddle/fluid/operators/gather.h

Lines changed: 24 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -36,15 +36,23 @@ using framework::Tensor;
3636
template <typename T, typename IndexT = int>
3737
void CPUGather(const platform::DeviceContext& ctx, const Tensor& src,
3838
const Tensor& index, Tensor* output) {
39-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true);
39+
PADDLE_ENFORCE_EQ(
40+
platform::is_cpu_place(ctx.GetPlace()), true,
41+
platform::errors::PreconditionNotMet("It should be running on the CPU."));
4042
// check index of shape 1-D
4143
if (index.dims().size() == 2) {
42-
PADDLE_ENFORCE_EQ(index.dims()[1], 1,
43-
"index.dims()[1] should be 1 when index.dims().size() == "
44-
"2 in gather_op.");
44+
PADDLE_ENFORCE_EQ(
45+
index.dims()[1], 1,
46+
platform::errors::InvalidArgument(
47+
"index.dims()[1] should be 1 when index.dims().size() = 2"
48+
"in gather_op, but received value is [%d].",
49+
index.dims()[1]));
4550
} else {
4651
PADDLE_ENFORCE_EQ(index.dims().size(), 1,
47-
"index.dims().size() should be 1 or 2 in gather_op.");
52+
platform::errors::InvalidArgument(
53+
"index.dims().size() should be 1 or 2 in gather_op,"
54+
"but received shape's size is [%d].",
55+
index.dims().size()));
4856
}
4957
int64_t index_size = index.dims()[0];
5058

@@ -69,8 +77,9 @@ void CPUGather(const platform::DeviceContext& ctx, const Tensor& src,
6977
template <typename T, typename IndexT = int>
7078
void CPUGatherNd(const platform::DeviceContext& ctx, const Tensor& input,
7179
const Tensor& index, Tensor* output) {
72-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
73-
"It should be running on the CPU");
80+
PADDLE_ENFORCE_EQ(
81+
platform::is_cpu_place(ctx.GetPlace()), true,
82+
platform::errors::PreconditionNotMet("It should be running on the CPU."));
7483

7584
auto index_dims = index.dims();
7685
auto index_dims_size = index_dims.size();
@@ -98,11 +107,14 @@ void CPUGatherNd(const platform::DeviceContext& ctx, const Tensor& input,
98107
int64_t temp = 1;
99108
for (int64_t j = end_size - 1; j >= 0; --j) {
100109
IndexT index_value = p_index[i * end_size + j];
101-
PADDLE_ENFORCE_LT(index_value, input_dims[j],
102-
"Input(index[-1)] has wrong value, it is %d",
103-
index_value);
104-
PADDLE_ENFORCE_GE(index_value, 0UL,
105-
"The value of Input(index) must be no less than 0");
110+
PADDLE_ENFORCE_LT(
111+
index_value, input_dims[j],
112+
platform::errors::InvalidArgument(
113+
"Input(index[-1)] has wrong value, it is [%d]", index_value));
114+
PADDLE_ENFORCE_GE(
115+
index_value, 0UL,
116+
platform::errors::InvalidArgument(
117+
"The value of Input(index) must be no less than 0"));
106118

107119
index_ += (index_value * temp);
108120
temp *= input_dims[j];

paddle/fluid/operators/gather_nd_op.cc

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,14 @@ class GatherNdOp : public framework::OperatorWithKernel {
2727

2828
void InferShape(framework::InferShapeContext* ctx) const override {
2929
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
30-
"Input(X) of GatherNdOp should not be null.");
30+
platform::errors::InvalidArgument(
31+
"Input(X) of GatherNdOp should not be null."));
3132
PADDLE_ENFORCE_EQ(ctx->HasInput("Index"), true,
32-
"Input(Index) of GatherNdOp should not be null.");
33+
platform::errors::InvalidArgument(
34+
"Input(Index) of GatherNdOp should not be null."));
3335
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
34-
"Output(Out) of GatherNdOp should not be null.");
36+
platform::errors::InvalidArgument(
37+
"Output(Out) of GatherNdOp should not be null."));
3538

3639
auto x_dims = ctx->GetInputDim("X");
3740
auto x_dims_size = x_dims.size();
@@ -40,9 +43,11 @@ class GatherNdOp : public framework::OperatorWithKernel {
4043

4144
PADDLE_ENFORCE_LE(
4245
index_dims[index_dims_size - 1], x_dims_size,
43-
"Input(Index).shape[-1] should be no greater than Input(X).rank");
46+
platform::errors::InvalidArgument(
47+
"Input(Index).shape[-1] should be no greater than Input(X).rank"));
4448
PADDLE_ENFORCE_GE(index_dims_size, 2UL,
45-
"The rank of Input(Index) should be greater than 1");
49+
platform::errors::InvalidArgument(
50+
"The rank of Input(Index) should be greater than 1"));
4651

4752
std::vector<int64_t> result_dims;
4853
// The result dims is

paddle/fluid/operators/gather_nd_op.cu

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ class GatherNdOpCUDAKernel : public framework::OpKernel<T> {
2525
public:
2626
void Compute(const framework::ExecutionContext &ctx) const override {
2727
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
28-
"This kernel only runs on GPU device.");
28+
platform::errors::PreconditionNotMet(
29+
"This kernel only runs on GPU device."));
2930
auto *x = ctx.Input<Tensor>("X");
3031
auto *index = ctx.Input<Tensor>("Index");
3132
auto *output = ctx.Output<Tensor>("Out");
@@ -35,12 +36,15 @@ class GatherNdOpCUDAKernel : public framework::OpKernel<T> {
3536
const auto &index_type = index->type();
3637
bool index_type_match = index_type == framework::proto::VarType::INT32 ||
3738
index_type == framework::proto::VarType::INT64;
38-
PADDLE_ENFORCE_EQ(
39-
index_type_match, true,
40-
"Index holds the wrong type, it holds %s, but desires to be %s or %s",
41-
paddle::framework::DataTypeToString(index_type),
42-
paddle::framework::DataTypeToString(framework::proto::VarType::INT32),
43-
paddle::framework::DataTypeToString(framework::proto::VarType::INT64));
39+
PADDLE_ENFORCE_EQ(index_type_match, true,
40+
platform::errors::InvalidArgument(
41+
"Index holds the wrong type, it holds [%s], but "
42+
"desires to be [%s] or [%s].",
43+
paddle::framework::DataTypeToString(index_type),
44+
paddle::framework::DataTypeToString(
45+
framework::proto::VarType::INT32),
46+
paddle::framework::DataTypeToString(
47+
framework::proto::VarType::INT64)));
4448
if (index_type == framework::proto::VarType::INT32) {
4549
GPUGatherNd<DeviceContext, T, int>(ctx, *x, *index, output);
4650
} else if (index_type == framework::proto::VarType::INT64) {
@@ -54,7 +58,8 @@ class GatherNdGradOpCUDAKernel : public framework::OpKernel<T> {
5458
public:
5559
void Compute(const framework::ExecutionContext &ctx) const override {
5660
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
57-
"This kernel only runs on GPU device.");
61+
platform::errors::PreconditionNotMet(
62+
"This kernel only runs on GPU device."));
5863
auto *index = ctx.Input<Tensor>("Index");
5964
auto *dX = ctx.Output<Tensor>(framework::GradVarName("X"));
6065
auto *dO = ctx.Input<Tensor>(framework::GradVarName("Out"));
@@ -70,12 +75,15 @@ class GatherNdGradOpCUDAKernel : public framework::OpKernel<T> {
7075
bool index_type_match = index_type == framework::proto::VarType::INT32 ||
7176
index_type == framework::proto::VarType::INT64;
7277

73-
PADDLE_ENFORCE_EQ(
74-
index_type_match, true,
75-
"Index holds the wrong type, it holds %s, but desires to be %s or %s",
76-
paddle::framework::DataTypeToString(index_type),
77-
paddle::framework::DataTypeToString(framework::proto::VarType::INT32),
78-
paddle::framework::DataTypeToString(framework::proto::VarType::INT64));
78+
PADDLE_ENFORCE_EQ(index_type_match, true,
79+
platform::errors::InvalidArgument(
80+
"Index holds the wrong type, it holds [%s],"
81+
"but desires to be [%s] or [%s].",
82+
paddle::framework::DataTypeToString(index_type),
83+
paddle::framework::DataTypeToString(
84+
framework::proto::VarType::INT32),
85+
paddle::framework::DataTypeToString(
86+
framework::proto::VarType::INT64)));
7987

8088
if (index_type == framework::proto::VarType::INT32) {
8189
GPUScatterNdAdd<DeviceContext, T, int>(ctx, *dO, *index, dX);

paddle/fluid/operators/gather_nd_op.h

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,9 @@ template <typename T>
2727
class GatherNdOpKernel : public framework::OpKernel<T> {
2828
public:
2929
void Compute(const framework::ExecutionContext &ctx) const override {
30-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
31-
"This kernel only runs on CPU.");
30+
PADDLE_ENFORCE_EQ(
31+
platform::is_cpu_place(ctx.GetPlace()), true,
32+
platform::errors::PreconditionNotMet("This kernel only runs on CPU."));
3233

3334
auto *x = ctx.Input<Tensor>("X");
3435
auto *index = ctx.Input<Tensor>("Index");
@@ -40,12 +41,15 @@ class GatherNdOpKernel : public framework::OpKernel<T> {
4041
const auto &index_type = index->type();
4142
bool index_type_match = index_type == framework::proto::VarType::INT32 ||
4243
index_type == framework::proto::VarType::INT64;
43-
PADDLE_ENFORCE_EQ(
44-
index_type_match, true,
45-
"Index holds the wrong type, it holds %s, but desires to be %s or %s",
46-
paddle::framework::DataTypeToString(index_type),
47-
paddle::framework::DataTypeToString(framework::proto::VarType::INT32),
48-
paddle::framework::DataTypeToString(framework::proto::VarType::INT64));
44+
PADDLE_ENFORCE_EQ(index_type_match, true,
45+
platform::errors::InvalidArgument(
46+
"Index holds the wrong type, it holds [%s],"
47+
"but desires to be [%s] or [%s]",
48+
paddle::framework::DataTypeToString(index_type),
49+
paddle::framework::DataTypeToString(
50+
framework::proto::VarType::INT32),
51+
paddle::framework::DataTypeToString(
52+
framework::proto::VarType::INT64)));
4953
if (index_type == framework::proto::VarType::INT32) {
5054
CPUGatherNd<T, int>(ctx.device_context(), *x, *index, output);
5155
} else if (index_type == framework::proto::VarType::INT64) {
@@ -58,8 +62,9 @@ template <typename T>
5862
class GatherNdGradOpKernel : public framework::OpKernel<T> {
5963
public:
6064
void Compute(const framework::ExecutionContext &ctx) const override {
61-
PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true,
62-
"This kernel only runs on CPU.");
65+
PADDLE_ENFORCE_EQ(
66+
platform::is_cpu_place(ctx.GetPlace()), true,
67+
platform::errors::PreconditionNotMet("This kernel only runs on CPU."));
6368
auto *index = ctx.Input<Tensor>("Index");
6469
auto *dX = ctx.Output<Tensor>(framework::GradVarName("X"));
6570
auto *dO = ctx.Input<Tensor>(framework::GradVarName("Out"));
@@ -73,12 +78,15 @@ class GatherNdGradOpKernel : public framework::OpKernel<T> {
7378
const auto &index_type = index->type();
7479
bool index_type_match = index_type == framework::proto::VarType::INT32 ||
7580
index_type == framework::proto::VarType::INT64;
76-
PADDLE_ENFORCE_EQ(
77-
index_type_match, true,
78-
"Index holds the wrong type, it holds %s, but desires to be %s or %s",
79-
paddle::framework::DataTypeToString(index_type),
80-
paddle::framework::DataTypeToString(framework::proto::VarType::INT32),
81-
paddle::framework::DataTypeToString(framework::proto::VarType::INT64));
81+
PADDLE_ENFORCE_EQ(index_type_match, true,
82+
platform::errors::InvalidArgument(
83+
"Index holds the wrong type, it holds [%s],"
84+
"but desires to be [%s] or [%s]",
85+
paddle::framework::DataTypeToString(index_type),
86+
paddle::framework::DataTypeToString(
87+
framework::proto::VarType::INT32),
88+
paddle::framework::DataTypeToString(
89+
framework::proto::VarType::INT64)));
8290
if (index_type == framework::proto::VarType::INT32) {
8391
ScatterNdAdd<T, int32_t>(ctx, *dO, *index, dX);
8492
} else if (index_type == framework::proto::VarType::INT64) {

paddle/fluid/operators/gather_op.cc

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,15 @@ class GatherOp : public framework::OperatorWithKernel {
2626
using framework::OperatorWithKernel::OperatorWithKernel;
2727

2828
void InferShape(framework::InferShapeContext* ctx) const override {
29-
PADDLE_ENFORCE(ctx->HasInput("X"),
30-
"Input(X) of GatherOp should not be null.");
31-
PADDLE_ENFORCE(ctx->HasInput("Index"),
32-
"Input(Index) of GatherOp should not be null.");
33-
PADDLE_ENFORCE(ctx->HasOutput("Out"),
34-
"Output(Out) of GatherOp should not be null.");
29+
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
30+
platform::errors::InvalidArgument(
31+
"Input(X) of GatherOp should not be null."));
32+
PADDLE_ENFORCE_EQ(ctx->HasInput("Index"), true,
33+
platform::errors::InvalidArgument(
34+
"Input(Index) of GatherOp should not be null."));
35+
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
36+
platform::errors::InvalidArgument(
37+
"Output(Out) of GatherOp should not be null."));
3538

3639
auto index_dims = ctx->GetInputDim("Index");
3740
PADDLE_ENFORCE(index_dims.size() == 1 ||

0 commit comments

Comments
 (0)