@@ -46,7 +46,11 @@ class SplitSelectedRowsOpKernel : public framework::OpKernel<T> {
46
46
// split rows index into output sparse vars
47
47
for (size_t i = 0 ; i < x_rows.size (); ++i) {
48
48
auto & id = x_rows[i];
49
- PADDLE_ENFORCE_LT (id, height);
49
+ PADDLE_ENFORCE_LT (id, height,
50
+ platform::errors::OutOfRange (
51
+ " Each row_id in x.rows must be less than x.height. "
52
+ " But received x.rows[%d] = %d, x.height = %d" ,
53
+ i, id, height));
50
54
int out_idx = GetSectionIndex (id, abs_sections);
51
55
outs_rows_idx[out_idx].push_back (id);
52
56
outs_dense_idx[out_idx].push_back (i);
@@ -63,7 +67,12 @@ class SplitSelectedRowsOpKernel : public framework::OpKernel<T> {
63
67
if (rows_idx.size () > 0 ) {
64
68
for (auto idx : rows_idx) {
65
69
auto id_offset = idx - abs_sections[i];
66
- PADDLE_ENFORCE_LT (id_offset, height_sections[i]);
70
+ PADDLE_ENFORCE_LT (
71
+ id_offset, height_sections[i],
72
+ platform::errors::OutOfRange (" Each row_id in out.rows must be "
73
+ " less than out.height. But recived "
74
+ " out.rows = [%d], out.height = [%d]" ,
75
+ id_offset, height_sections[i]));
67
76
outs[i]->mutable_rows ()->push_back (id_offset);
68
77
}
69
78
auto dst = outs[i]->mutable_value ()->mutable_data <T>(ctx.GetPlace ());
@@ -80,13 +89,17 @@ class SplitSelectedRowsOpKernel : public framework::OpKernel<T> {
80
89
src + outs_dense_idx[i][j] * row_numel,
81
90
sizeof (T) * row_numel, stream);
82
91
#else
83
- PADDLE_THROW (" Paddle is not compiled with GPU" );
92
+ PADDLE_THROW (platform::errors::Unavailable (
93
+ " Paddle is not compiled with CUDA. Cannot visit cuda device" ));
84
94
#endif
85
95
}
86
96
}
87
97
}
88
98
PADDLE_ENFORCE_EQ (rows_idx.size (), outs[i]->rows ().size (),
89
- " rows should has the same size with tensor dim 0" );
99
+ platform::errors::InvalidArgument (
100
+ " rows should has the same size with tensor dim 0. "
101
+ " But received rows = %d, tensor's dim[0] = %d." ,
102
+ rows_idx.size (), outs[i]->rows ().size ()));
90
103
}
91
104
}
92
105
};
0 commit comments