Skip to content

Commit a5bdeb0

Browse files
authored
replace TORCH_CHECK with IPEX_CHECK for fast fallback (#89)
* replace TORCH_CHECK with IPEX_CHECK for fast fallback * quick path of IPEX_CHECK
1 parent 05e2754 commit a5bdeb0

File tree

3 files changed

+37
-23
lines changed

3 files changed

+37
-23
lines changed

torch_ipex/csrc/cpu/DevOPs.cpp

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ at::Tensor& dil_add_common(
263263
CHECK_DNNL_OP_PRE_COND(self);
264264
CHECK_DNNL_OP_PRE_COND(other);
265265

266-
TORCH_CHECK(self.sizes().equals(other.sizes()),
266+
IPEX_CHECK(self.sizes().equals(other.sizes()),
267267
"dil add not support broadcast yet");
268268

269269
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -308,7 +308,7 @@ at::Tensor& dil_mul_common(
308308
CHECK_DNNL_OP_PRE_COND(self);
309309
CHECK_DNNL_OP_PRE_COND(other);
310310

311-
TORCH_CHECK(self.sizes().equals(other.sizes()),
311+
IPEX_CHECK(self.sizes().equals(other.sizes()),
312312
"dil mul not support broadcast yet");
313313

314314
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -438,7 +438,7 @@ at::Tensor& dil_baddbmm_common(
438438

439439
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(batch1.dim() == 3 && batch2.dim() == 3);
440440
dil::dims inferred_size{batch1.size(0), batch1.size(1), batch2.size(2)};
441-
TORCH_CHECK(self.sizes().equals(inferred_size),
441+
IPEX_CHECK(self.sizes().equals(inferred_size),
442442
"dil baddbmm not support broadcast yet");
443443

444444
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -506,7 +506,7 @@ at::Tensor& dil_addmm_common(
506506

507507
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(mat1.dim() == 2 && mat2.dim() == 2);
508508
dil::dims inferred_size{mat1.size(0), mat2.size(1)};
509-
TORCH_CHECK(self.sizes().equals(inferred_size),
509+
IPEX_CHECK(self.sizes().equals(inferred_size),
510510
"dil addmm not support broadcast yet");
511511

512512
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -568,7 +568,7 @@ at::Tensor& dil_addbmm_common(
568568

569569
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(batch1.dim() == 3 && batch2.dim() == 3);
570570
dil::dims inferred_size{batch1.size(1), batch2.size(2)};
571-
TORCH_CHECK(self.sizes().equals(inferred_size),
571+
IPEX_CHECK(self.sizes().equals(inferred_size),
572572
"dil addbmm not support broadcast yet");
573573

574574
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -635,7 +635,7 @@ at::Tensor AtenIpexCPUDev::dil_linear(
635635
DEBUG("AtenIpexCPUDev::dil_linear\n");
636636
CHECK_DNNL_OP_PRE_COND(self);
637637
CHECK_DNNL_OP_PRE_COND(weight);
638-
TORCH_CHECK(self.dim() >= 2,
638+
IPEX_CHECK(self.dim() >= 2,
639639
"dil_linear: input needs to has dim at least 2, input dim ", self.dim());
640640

641641
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -672,7 +672,7 @@ at::Tensor AtenIpexCPUDev::dil_linear_fuse_relu(
672672
DEBUG("AtenIpexCPUDev::dil_linear\n");
673673
CHECK_DNNL_OP_PRE_COND(self);
674674
CHECK_DNNL_OP_PRE_COND(weight);
675-
TORCH_CHECK(self.dim() >= 2,
675+
IPEX_CHECK(self.dim() >= 2,
676676
"dil_linear: input needs to has dim at least 2, input dim ", self.dim());
677677

678678
dbl::comm::reorder_to_bf16_for_mix_prec(self);
@@ -791,7 +791,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_linear_backwa
791791
std::tuple<at::Tensor, at::Tensor> _dil_dropout(
792792
const at::Tensor& self,
793793
double ratio) {
794-
TORCH_CHECK(
794+
IPEX_CHECK(
795795
ratio >= 0 && ratio < 1 && self.numel() != 0,
796796
"dropout probability has to be between 0 and 1, but got ",
797797
ratio);
@@ -845,9 +845,9 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_
845845
DEBUG("AtenIpexCPUDev::dil_native_batch_norm\n");
846846
CHECK_DNNL_OP_PRE_COND(input);
847847
CHECK_DNNL_OP_PRE_COND(weight);
848-
TORCH_CHECK(input.dim() == 4 || input.dim() == 5,
848+
IPEX_CHECK(input.dim() == 4 || input.dim() == 5,
849849
"mkldnn_batch_norm: currently mkldnn only support 2d and 3d batchnorm");
850-
TORCH_CHECK(weight.defined() && bias.defined(),
850+
IPEX_CHECK(weight.defined() && bias.defined(),
851851
"mkldnn_batch_norm: currently mkldnn only support affine model");
852852

853853
dbl::comm::reorder_to_bf16_for_mix_prec(input);
@@ -906,7 +906,7 @@ std::tuple<at::Tensor, at::Tensor, at::Tensor> AtenIpexCPUDev::dil_native_batch_
906906
CHECK_DNNL_OP_PRE_COND(input);
907907
CHECK_DNNL_OP_PRE_COND(weight);
908908

909-
TORCH_CHECK(train, "mkldnn_batch_norm_backward: currently mkldnn only support train model");
909+
IPEX_CHECK(train, "mkldnn_batch_norm_backward: currently mkldnn only support train model");
910910
auto grad_output_contiguous = grad_output.is_contiguous() ? grad_output : grad_output.contiguous();
911911

912912
dbl::comm::reorder_to_bf16_for_mix_prec(grad_output);
@@ -960,7 +960,7 @@ at::Tensor AtenIpexCPUDev::dil_avg_pool2d(
960960
c10::optional<int64_t> divisor_override) {
961961
DEBUG("AtenIpexCPUDev::dil_avg_pool2d\n");
962962
CHECK_DNNL_OP_PRE_COND(input);
963-
TORCH_CHECK(!divisor_override.has_value(),
963+
IPEX_CHECK(!divisor_override.has_value(),
964964
"dil_avg_pooling operator does not support divisor");
965965

966966
dbl::comm::reorder_to_bf16_for_mix_prec(input);
@@ -986,7 +986,7 @@ at::Tensor AtenIpexCPUDev::dil_avg_pool3d(
986986
c10::optional<int64_t> divisor_override) {
987987
DEBUG("AtenIpexCPUDev::dil_avg_pool3d\n");
988988
CHECK_DNNL_OP_PRE_COND(input);
989-
TORCH_CHECK(!divisor_override.has_value(),
989+
IPEX_CHECK(!divisor_override.has_value(),
990990
"dil_avg_pooling operator does not support divisor");
991991

992992
dbl::comm::reorder_to_bf16_for_mix_prec(input);
@@ -1016,8 +1016,8 @@ at::Tensor AtenIpexCPUDev::dil_adaptive_avg_pool2d(
10161016
for (int64_t i = 2; i < input.dim(); ++i) {
10171017
auto s1 = input.size(i);
10181018
auto s2 = output_size_vec[i - 2];
1019-
TORCH_CHECK(s2 != 0, "output size can not be zero");
1020-
TORCH_CHECK(
1019+
IPEX_CHECK(s2 != 0, "output size can not be zero");
1020+
IPEX_CHECK(
10211021
s1 % s2 == 0,
10221022
"input size is not divisible by the output size is not supported yet");
10231023
kernel_size[i - 2] = s1 / s2;
@@ -1144,8 +1144,8 @@ at::Tensor AtenIpexCPUDev::dil_adaptive_avg_pool2d_backward(
11441144
for (size_t i = 2; i < input.dim(); ++i) {
11451145
auto s1 = input.size(i);
11461146
auto s2 = output_size_vec[i];
1147-
TORCH_CHECK(s2 != 0, "output size can not be zero");
1148-
TORCH_CHECK(
1147+
IPEX_CHECK(s2 != 0, "output size can not be zero");
1148+
IPEX_CHECK(
11491149
s1 % s2 == 0,
11501150
"input size is not divisible by the output size is not supported yet");
11511151
kernel_size[i - 2] = s1 / s2;
@@ -1365,7 +1365,7 @@ at::Tensor AtenIpexCPUDev::dil_transpose(const at::Tensor & self, int64_t dim0,
13651365
dbl::comm::reorder_to_bf16_for_mix_prec(self);
13661366

13671367
dil::tensor x = dbl::comm::try_gen_dil_tensor(self);
1368-
TORCH_CHECK(x.ndims() > 0, "DNNL transpose cannot generate DNNL tensor for the input aten Tensor. input tensor dim: ", self.dim());
1368+
IPEX_CHECK(x.ndims() > 0, "DNNL transpose cannot generate DNNL tensor for the input aten Tensor. input tensor dim: ", self.dim());
13691369
dil::tensor y;
13701370
std::vector<int> axes(x.ndims());
13711371
std::iota(axes.begin(), axes.end(), 0);
@@ -1379,7 +1379,7 @@ at::Tensor AtenIpexCPUDev::dil_transpose(const at::Tensor & self, int64_t dim0,
13791379
inline void check_cat_no_zero_dim(at::TensorList tensors) {
13801380
for (size_t i = 0; i < tensors.size(); ++i) {
13811381
auto& t = tensors[i];
1382-
TORCH_CHECK(t.dim() > 0,
1382+
IPEX_CHECK(t.dim() > 0,
13831383
"zero-dimensional tensor (at position ", i, ") cannot be concatenated");
13841384
}
13851385
}
@@ -1394,7 +1394,7 @@ at::Tensor& AtenIpexCPUDev::dil_cat_out(at::Tensor& result, at::TensorList tenso
13941394
dim = at::legacy_cat_wrap_dim(dim, tensors);
13951395
std::vector<dil::tensor> x;
13961396
for (auto i =0; i< tensors.size(); i++) {
1397-
TORCH_CHECK(!(tensors[i].dim() == 1 && tensors[i].sizes()[0] == 0),
1397+
IPEX_CHECK(!(tensors[i].dim() == 1 && tensors[i].sizes()[0] == 0),
13981398
"Currently Mkldnn cat operators do not support empty tensor.");
13991399

14001400
dbl::comm::reorder_to_bf16_for_mix_prec(tensors[i]);
@@ -1416,7 +1416,7 @@ at::Tensor AtenIpexCPUDev::dil_cat(at::TensorList tensors, int64_t dim) {
14161416
std::vector<dil::tensor> x;
14171417
at::Tensor tensors_contiguous[tensors.size()];
14181418
for (auto i = 0; i < tensors.size(); i++) {
1419-
TORCH_CHECK(!(tensors[i].dim() == 1 && tensors[i].sizes()[0] == 0),
1419+
IPEX_CHECK(!(tensors[i].dim() == 1 && tensors[i].sizes()[0] == 0),
14201420
"Currently Mkldnn cat operators do not support empty tensor.");
14211421
tensors_contiguous[i] = tensors[i].is_contiguous() ? tensors[i] : tensors[i].contiguous();
14221422
dbl::comm::reorder_to_bf16_for_mix_prec(tensors_contiguous[i]);
@@ -1439,7 +1439,7 @@ std::vector<at::Tensor> AtenIpexCPUDev::dil_split_with_sizes(const at::Tensor& s
14391439
std::vector<int32_t> sizes;
14401440
for (auto i = 0; i < num_splits; i++) {
14411441
auto length = split_sizes[i];
1442-
TORCH_CHECK(length >= 0,
1442+
IPEX_CHECK(length >= 0,
14431443
"split_with_sizes expects split_sizes have only non-negative ",
14441444
"entries, but got split_sizes=", split_sizes);
14451445
sizes.push_back((int32_t)length);

torch_ipex/csrc/cpu/FusionOPs.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ at::Tensor AtenIpexJITDev::dil_linear_fuse_relu(
149149
const at::Tensor& self,
150150
const at::Tensor& weight,
151151
const at::Tensor& bias) {
152-
TORCH_CHECK(self.dim() >= 2,
152+
IPEX_CHECK(self.dim() >= 2,
153153
"dil_linear: input needs to has dim at least 2, input dim ", self.dim());
154154
auto input_contiguous = self.contiguous();
155155
auto weight_contiguous = weight.contiguous();

torch_ipex/csrc/utils.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,18 @@ bool check_tensor_own_whole_storage(const at::Tensor& tensor);
2323
bool check_tensor_own_shade_context(const at::Tensor& tensor);
2424
bool check_aten_dil_shape_info(const at::Tensor& ipex_tensor, const dil::tensor &dil_tensor);
2525

26+
// A light-weight TORCH_CHECK that does not collect any backtrace info
27+
#if defined(_DEBUG)
28+
#define IPEX_CHECK(cond, ...) \
29+
if (!(cond)) { \
30+
throw std::runtime_error( \
31+
c10::detail::if_empty_then( \
32+
c10::str(__VA_ARGS__), \
33+
"Expected " #cond " to be true, but got false.")); \
34+
}
35+
#else
36+
// quick path of IPEX_CHECK without reporting message
37+
#define IPEX_CHECK(cond, ...) \
38+
if (!(cond)) { throw std::exception(); }
39+
#endif
2640
} // namespace torch_ipex

0 commit comments

Comments
 (0)