diff --git a/extension/llm/custom_ops/op_sdpa.cpp b/extension/llm/custom_ops/op_sdpa.cpp index d23572d8d04..f0a7775e803 100644 --- a/extension/llm/custom_ops/op_sdpa.cpp +++ b/extension/llm/custom_ops/op_sdpa.cpp @@ -594,46 +594,46 @@ bool validate_flash_attention_args( const Tensor& key, const Tensor& value, const optional& attn_mask) { - ET_LOG_MSG_AND_RETURN_IF_FALSE(query.dim() == 4, "query must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE(key.dim() == 4, "key must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE(value.dim() == 4, "value must be a 4D tensor"); + ET_CHECK_OR_RETURN_FALSE(query.dim() == 4, "query must be a 4D tensor"); + ET_CHECK_OR_RETURN_FALSE(key.dim() == 4, "key must be a 4D tensor"); + ET_CHECK_OR_RETURN_FALSE(value.dim() == 4, "value must be a 4D tensor"); // Sizes - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (query.size(3) == value.size(3)) && (key.size(3) == value.size(3)), "scaled_dot_product_attention_flash_attention: Q/K/V should have the same head size"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (query.scalar_type() == ScalarType::Float), "Query must be Float type"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (query.scalar_type() == key.scalar_type()) && (query.scalar_type() == value.scalar_type()), "Key and Value must have the same data type as Query"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !attn_mask.has_value() || attn_mask.value().dim() == 2, "Attention mask must be a 2D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !attn_mask.has_value() || attn_mask.value().scalar_type() == query.scalar_type(), "Attention mask must be a 2D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order(query.dim_order().data(), query.dim()), "key cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order(key.dim_order().data(), key.dim()), "value cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order(value.dim_order().data(), value.dim()), "value cache must be in contiguous dim order"); if (attn_mask.has_value()) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order( attn_mask.value().dim_order().data(), attn_mask.value().dim()), "value cache must be in contiguous dim order"); @@ -647,21 +647,19 @@ bool validate_cache_params( const Tensor& v_cache, int64_t start_pos, int64_t seq_length) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( - k_cache.dim() == 4, "kcache must be a 4D tensor"); + ET_CHECK_OR_RETURN_FALSE(k_cache.dim() == 4, "kcache must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( - v_cache.dim() == 4, "v_cache must be a 4D tensor"); + ET_CHECK_OR_RETURN_FALSE(v_cache.dim() == 4, "v_cache must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( start_pos < k_cache.size(1), "start_pos must be less than key cache at dim 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( start_pos < v_cache.size(1), "start_pos must be less than value cache at dim 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (start_pos + seq_length) <= k_cache.size(1), "start_post + seq_length must be less than max seq length supported by key cache." "start pos: %" PRId64 ", seq_length: %" PRId64 @@ -671,7 +669,7 @@ bool validate_cache_params( seq_length, k_cache.size(1)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (start_pos + seq_length) <= v_cache.size(1), "start_post + seq_length must be less than max seq length supported by key cache." "start pos: %" PRId64 ", seq_length: %" PRId64 @@ -682,11 +680,11 @@ bool validate_cache_params( v_cache.size(1)); // Make sure they are in contiguous dim order - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order(k_cache.dim_order().data(), k_cache.dim()), "key cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order(v_cache.dim_order().data(), v_cache.dim()), "value cache must be in contiguous dim order"); diff --git a/extension/llm/custom_ops/op_update_cache.cpp b/extension/llm/custom_ops/op_update_cache.cpp index bbc0190dab1..323b7a65ddb 100644 --- a/extension/llm/custom_ops/op_update_cache.cpp +++ b/extension/llm/custom_ops/op_update_cache.cpp @@ -25,17 +25,17 @@ bool validate_cache_params( const Tensor& quantized_cache, int64_t start_pos, int64_t seq_length) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( quantized_cache.dim() == 4, "quantized cache must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( quantized_value.dim() == 4, "quantized_value must be a 4D tensor"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( start_pos < quantized_cache.size(1), "start_pos must be less than cache size at dim 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (start_pos + seq_length) <= quantized_cache.size(1), "start_post + seq_length must be less than max seq length supported by cache." "start pos: %" PRId64 ", seq_length: %" PRId64 @@ -46,12 +46,12 @@ bool validate_cache_params( quantized_cache.size(1)); // Make sure they are in contiguous dim order - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order( quantized_cache.dim_order().data(), quantized_cache.dim()), "quantized cache must be in contiguous dim order"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( is_contiguous_dim_order( quantized_value.dim_order().data(), quantized_value.dim()), "quantized value must be in contiguous dim order"); diff --git a/kernels/optimized/cpu/op_bmm.cpp b/kernels/optimized/cpu/op_bmm.cpp index 21ae7dfca90..5e7fa1dd839 100644 --- a/kernels/optimized/cpu/op_bmm.cpp +++ b/kernels/optimized/cpu/op_bmm.cpp @@ -31,39 +31,38 @@ namespace { // Verifies that the parameters are valid. bool check_bmm_out_args(const Tensor& self, const Tensor& mat2, Tensor& out) { // Ensure dimensions is 3 for all input and out - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.dim() == mat2.dim(), "self.dim() %zd != mat2.dim() %zd", self.dim(), mat2.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.dim() == out.dim(), "self.dim() %zd != out.dim() %zd", self.dim(), out.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( - self.dim() == 3, "self.dim() %zd != 3", self.dim()); + ET_CHECK_OR_RETURN_FALSE(self.dim() == 3, "self.dim() %zd != 3", self.dim()); // Ensure batch larger than or equals to 0 - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.size(0) >= 0, "self.size(0) %zd < 0", self.size(0)); // Ensure batches are the same - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.size(0) == mat2.size(0), "self.size(0) %zd != mat2.size(0) %zd", self.size(0), mat2.size(0)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.size(0) == out.size(0), "self.size(0) %zd != out.size(0) %zd", self.size(0), out.size(0)); // Ensure the out size is compatible with input tensors - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( mat2.size(2) == out.size(2), "mat2.size(2) %zd != out.size(2) %zd", mat2.size(2), out.size(2)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.size(1) == out.size(1), "self.size(1) %zd != out.size(1) %zd", self.size(1), diff --git a/kernels/portable/cpu/op_convolution_backward.cpp b/kernels/portable/cpu/op_convolution_backward.cpp index 7884ea0c44c..cd635cda8f9 100644 --- a/kernels/portable/cpu/op_convolution_backward.cpp +++ b/kernels/portable/cpu/op_convolution_backward.cpp @@ -38,9 +38,9 @@ bool check_convolution_backward_args( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( transposed == false, "Transposed Convolution Backward not supported yet"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( weight.dim() == 4, "Only 2D Convolution Backward supported for now"); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(weight, input)); @@ -58,7 +58,7 @@ bool check_convolution_backward_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_bias, input)); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( check_convolution_args( input, weight, @@ -89,7 +89,7 @@ bool check_convolution_backward_args( ET_LOG_AND_RETURN_IF_FALSE( output_size_is_valid({output_sizes, output_ndim}, input.dim() - 2)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( grad_output.dim() == input.dim(), "grad_output should have same number of dimensions as input"); diff --git a/kernels/portable/cpu/op_linear_scratch_example.cpp b/kernels/portable/cpu/op_linear_scratch_example.cpp index b217e9ad942..eae2417fe32 100644 --- a/kernels/portable/cpu/op_linear_scratch_example.cpp +++ b/kernels/portable/cpu/op_linear_scratch_example.cpp @@ -40,13 +40,13 @@ bool check_linear_scratch_example_args( const optional& bias, Tensor& out, Tensor& scratch) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( input.size(1) == weight.size(1), "Unexpected weight size 1"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( scratch.size(0) == input.size(0), "Unexpected scratch size 0"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( scratch.size(1) == weight.size(0), "Unexpected scratch size 1"); return true; diff --git a/kernels/portable/cpu/op_repeat.cpp b/kernels/portable/cpu/op_repeat.cpp index 8b64eefde31..dc9a7232152 100644 --- a/kernels/portable/cpu/op_repeat.cpp +++ b/kernels/portable/cpu/op_repeat.cpp @@ -23,7 +23,7 @@ bool calculate_output_size( Tensor::SizesType* out_sizes_ptr) { ET_LOG_AND_RETURN_IF_FALSE(repeats.size() < kTensorDimensionLimit); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( repeats.size() >= self_sizes.size(), "Repeats vector size is %zu must be >= self_sizes %zu.", repeats.size(), diff --git a/kernels/portable/cpu/op_repeat_interleave.cpp b/kernels/portable/cpu/op_repeat_interleave.cpp index c8a84e8c748..61c9fbfdb82 100644 --- a/kernels/portable/cpu/op_repeat_interleave.cpp +++ b/kernels/portable/cpu/op_repeat_interleave.cpp @@ -18,12 +18,12 @@ bool check_repeat_interleave_args( int64_t output_size_value, int64_t repeats_sum, Tensor& out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( repeats.scalar_type() == ScalarType::Int || repeats.scalar_type() == ScalarType::Long, "repeats must be int or long"); - ET_LOG_MSG_AND_RETURN_IF_FALSE(repeats.dim() == 1, "repeats must be 1D"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE(repeats.dim() == 1, "repeats must be 1D"); + ET_CHECK_OR_RETURN_FALSE( output_size_value == repeats_sum, "output_size, if provided, must be equal to repeats.sum()"); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(repeats, out)); @@ -31,13 +31,13 @@ bool check_repeat_interleave_args( if (repeats.scalar_type() == ScalarType::Long) { const int64_t* const repeats_data = repeats.const_data_ptr(); for (size_t i = 0; i < repeats.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( repeats_data[i] >= 0, "repeats cannot be negative"); } } else { const int32_t* const repeats_data = repeats.const_data_ptr(); for (size_t i = 0; i < repeats.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( repeats_data[i] >= 0, "repeats cannot be negative"); } } diff --git a/kernels/portable/cpu/op_topk.cpp b/kernels/portable/cpu/op_topk.cpp index 987e974bbf5..e6ba9afef2c 100644 --- a/kernels/portable/cpu/op_topk.cpp +++ b/kernels/portable/cpu/op_topk.cpp @@ -28,7 +28,7 @@ bool check_topk_args( if (dim < 0) { dim += nonzero_dim(in); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( k >= 0 && k <= nonempty_size(in, dim), "selected index k out of range"); return true; } diff --git a/kernels/portable/cpu/util/activation_ops_util.cpp b/kernels/portable/cpu/util/activation_ops_util.cpp index 908758a2e36..70be6367c76 100644 --- a/kernels/portable/cpu/util/activation_ops_util.cpp +++ b/kernels/portable/cpu/util/activation_ops_util.cpp @@ -17,7 +17,7 @@ namespace executor { bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( approximate == "tanh" || approximate == "none", "Invalid approximation format: %.*s for gelu", static_cast(approximate.length()), @@ -32,7 +32,7 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) { const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim; const size_t dim_size = in.size(non_negative_dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim_size % 2 == 0, "Halving dimension must be even, but dimension %zd is size %zd", non_negative_dim, @@ -40,7 +40,7 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(out)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.size(non_negative_dim) == dim_size / 2, "output tensor must have half the size of the input tensor along the specified dimension."); @@ -73,7 +73,7 @@ bool check_log_softmax_args( int64_t dim, bool half_to_float, Tensor& out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !half_to_float, "half to float conversion is not supported on CPU"); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); diff --git a/kernels/portable/cpu/util/advanced_index_util.cpp b/kernels/portable/cpu/util/advanced_index_util.cpp index cc205df0e43..68faa192b44 100644 --- a/kernels/portable/cpu/util/advanced_index_util.cpp +++ b/kernels/portable/cpu/util/advanced_index_util.cpp @@ -24,7 +24,7 @@ bool check_indices_dtypes(TensorOptList indices) { if (indices[i].has_value()) { const Tensor& index = indices[i].value(); ScalarType ix_type = index.scalar_type(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( ix_type == ScalarType::Long || ix_type == ScalarType::Int || ix_type == ScalarType::Byte || ix_type == ScalarType::Bool, "Index tensors should be Long, Int, Byte or Bool"); @@ -47,7 +47,7 @@ bool check_mask_indices(const Tensor& in, TensorOptList indices) { if (indices[i].has_value()) { const Tensor& index = indices[i].value(); if (is_mask_index(index)) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index.dim() > 0, "Zero-dimensional mask index not allowed"); for (auto j = 0; j < index.dim(); j++) { if (index.size(j) != in.size(in_i + j)) { @@ -156,7 +156,7 @@ int64_t query_integral_index( bool check_index_args(const Tensor& in, TensorOptList indices, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(check_indices_dtypes(indices)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( indices.size() <= in.dim(), "Indexing too many dimensions"); ET_LOG_AND_RETURN_IF_FALSE(check_mask_indices(in, indices)); return true; @@ -197,8 +197,7 @@ bool get_indices_broadcast_shape( } else if (rev_ix_sizes[0] == 1) { rev_ix_sizes[0] = len; } else if (len != 1 && rev_ix_sizes[0] != len) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( - false, "Broadcast of mask index failed."); + ET_CHECK_OR_RETURN_FALSE(false, "Broadcast of mask index failed."); } } else { for (size_t j = 0; j < index.dim(); j++) { @@ -209,7 +208,7 @@ bool get_indices_broadcast_shape( } else if (rev_ix_sizes[j] == 1) { rev_ix_sizes[j] = rev_j_size; } else if (rev_j_size != 1 && rev_ix_sizes[j] != rev_j_size) { - ET_LOG_MSG_AND_RETURN_IF_FALSE(false, "Broadcast of index failed."); + ET_CHECK_OR_RETURN_FALSE(false, "Broadcast of index failed."); } } } @@ -290,11 +289,11 @@ bool get_index_out_target_size( size_t num_null_indices = get_num_null_indices(indices); size_t num_indexed_dims = get_num_indexed_dims(indices); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( num_null_indices + num_indexed_dims <= in.dim(), "Indexing too many dimensions"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.dim() + broadcast_ndim - num_indexed_dims <= kTensorDimensionLimit, "Out tensor would exceed number of allowed dimensions"); @@ -441,7 +440,7 @@ bool get_in_coord( if (index_val < 0) { index_val += in.size(i); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index_val >= 0 && index_val < in.size(i), "Index %" PRId64 " is out of bounds for input dimension %zd with size %zd.", diff --git a/kernels/portable/cpu/util/copy_ops_util.cpp b/kernels/portable/cpu/util/copy_ops_util.cpp index 78b66b05f22..f0e1d8b30d2 100644 --- a/kernels/portable/cpu/util/copy_ops_util.cpp +++ b/kernels/portable/cpu/util/copy_ops_util.cpp @@ -44,16 +44,16 @@ bool check_as_strided_copy_args( optional storage_offset, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( size.size() == stride.size(), "mismatch in length of strides and shape"); for (const auto& val : stride) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( val >= 0, "as_strided: Negative strides are not supported at the moment"); } int64_t offset = storage_offset.has_value() ? storage_offset.value() : 0; - ET_LOG_MSG_AND_RETURN_IF_FALSE(offset >= 0, "Negative storage offset"); + ET_CHECK_OR_RETURN_FALSE(offset >= 0, "Negative storage offset"); // Check that the requested storage is within bounds of input storage size_t storage_size_bytes = @@ -63,7 +63,7 @@ bool check_as_strided_copy_args( return true; } size_t new_storage_size_bytes = in.nbytes(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes, "Requiring a storage size of %zd are out of bounds for storage of size %zd", storage_size_bytes + storage_offset_bytes, @@ -159,17 +159,17 @@ bool check_expand_copy_args( Tensor& out) { (void)out; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( implicit == false, "This operator is not implemented for when implicit == true."); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( expand_sizes.size() >= input.sizes().size(), "The number of sizes provided (%zu) must at least be equal to the number of dimensions in the tensor (%zu)", expand_sizes.size(), input.sizes().size()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( expand_sizes.size() <= kTensorDimensionLimit, "The number of expanded dims (%zu) exceeds the configured maximum (%zu). Increase this limit.", expand_sizes.size(), @@ -198,7 +198,7 @@ bool get_expand_copy_out_target_size( // -1 can use for replacing any corresponding dimension output_sizes[j] = self_sizes[i]; } else if (self_sizes[i] != 1) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( expand_sizes[j] == self_sizes[i], "The expanded size of the tensor (%zu) must match the existing size (%zu) at non-singleton dimension %zu.", (size_t)expand_sizes[j], @@ -211,7 +211,7 @@ bool get_expand_copy_out_target_size( while (j > 0) { --j; output_sizes[j] = expand_sizes[j]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( expand_sizes[j] >= 0, "The expanded size of the tensor (%zu) isn't allowed in a leading, non-existing dimension %zu", (size_t)expand_sizes[j], @@ -241,7 +241,7 @@ bool check_permute_copy_args(const Tensor& in, IntArrayRef dims, Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(dim < kTensorDimensionLimit && dim >= 0); // Check that the dimension hasn't been seen previously. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim_exist[dim] == false, "duplicate dims are not allowed."); dim_exist[dim] = true; @@ -251,13 +251,13 @@ bool check_permute_copy_args(const Tensor& in, IntArrayRef dims, Tensor& out) { } bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.dim() > 0, "in must have at least one dimension; saw %zd", in.dim()); ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); const ssize_t dim_size = in.size(dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim_size == out.size(), "out tensorlist's length %zd must equal unbind dim %" PRId64 " size = %zd.", @@ -268,7 +268,7 @@ bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { // Validate each output. for (size_t i = 0; i < out.size(); ++i) { // All output dtypes must be the same. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].scalar_type() == out[0].scalar_type(), "out[%zu] dtype %" PRId8 " != out[0] dtype %" PRId8, i, @@ -276,7 +276,7 @@ bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { static_cast(out[0].scalar_type())); // output tensor must have # of dims = in.dim() -1 - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].dim() == (in.dim() - 1), "out[%zu] dim %zd != in dim %zd", i, @@ -286,7 +286,7 @@ bool check_unbind_copy_args(const Tensor& in, int64_t dim, TensorList out) { // Check the shape of the output. for (ssize_t d = 0, out_d = 0; d < in.dim(); ++d) { if (d != dim) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].size(out_d) == in.size(d), "out[%zu].size(%zd) %zd != in.size(%zd) %zd", i, @@ -421,19 +421,19 @@ bool check_split_with_sizes_copy_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(in, 1)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( split_sizes.size() == out.size(), "Number of split sizes must match the number of output tensors"); int64_t sum = 0; for (int i = 0; i < split_sizes.size(); i++) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( split_sizes[i] >= 0, "All split sizes must be non negative."); sum += split_sizes[i]; } const ssize_t dim_size = in.size(dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( sum == dim_size, "Sum of split sizes does not match input size at given dim"); @@ -506,7 +506,7 @@ bool check_squeeze_copy_dims_args( if (i != j) { const int64_t dim_temp = dims[j] < 0 ? dims[j] + nonzero_dim(in) : dims[j]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim != dim_temp, "dim %" PRId64 " appears multiple times in dims!", dim); @@ -612,22 +612,22 @@ bool check_split_copy_args( int64_t split_size, int64_t dim, TensorList out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( input.dim() > 0, "input must have at least one dimension; saw %zd", input.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim >= 0 && dim < input.dim(), "dim %" PRId64 " out of range [0,%zd)", dim, input.dim()); const ssize_t dim_size = input.size(dim); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( split_size >= 0, "split_size %" PRId64 " must be non-negative", split_size); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( split_size > 0 || dim_size == 0, "split_size is zero but input.size(%" PRId64 ") %zd is non-zero", dim, @@ -646,7 +646,7 @@ bool check_split_copy_args( // Note that this also handles the case where split_size == 0, avoiding a // division by zero in the other branch. When dim_size == 0 && split_size == // 0, core PyTorch expects 1 output element. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.size() == 1, "Unexpected out.size() %zu: should be 1 because split_size %" PRId64 " >= input.size(%" PRId64 ") %zd", @@ -657,7 +657,7 @@ bool check_split_copy_args( remainder = dim_size; } else { int64_t expected_out_len = (dim_size + split_size - 1) / split_size; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.size() == expected_out_len, "Unexpected out.size() %zu: ceil(input.size(%" PRId64 ")=%zd" @@ -676,7 +676,7 @@ bool check_split_copy_args( // Validate each output. for (size_t i = 0; i < out.size(); ++i) { // All output dtypes must be the same. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].scalar_type() == out[0].scalar_type(), "out[%zu] dtype %" PRId8 " != out[0] dtype %" PRId8, i, @@ -684,7 +684,7 @@ bool check_split_copy_args( static_cast(out[0].scalar_type())); // All outputs must have the same number of dimensions as the input. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].dim() == input.dim(), "out[%zu] dim %zd != input dim %zd", i, @@ -698,7 +698,7 @@ bool check_split_copy_args( if (i < out.size() - 1) { // All outputs except the final one: split dimension should be // split_size. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].size(d) == split_size, "out[%zu].size(%zd) %zd != split_size %" PRId64, i, @@ -708,7 +708,7 @@ bool check_split_copy_args( } else { // The final output: split dimension should be the remainder of // split_size. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out[i].size(d) == remainder, "out[%zu].size(%zd) %zd != remainder %" PRId64, i, @@ -811,7 +811,7 @@ bool check_unsqueeze_copy_args( } if (d < dim_normalized) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( input.size(d) == out.size(d), "input.size(%zu) %zd != out.size(%zu) %zd | dim = %" PRId64, d, @@ -820,7 +820,7 @@ bool check_unsqueeze_copy_args( out.size(d), dim); } else if (d > dim_normalized) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( input.size(d - 1) == out.size(d), "input.size(%zu) %zd != out.size(%zu) %zd | dim = %" PRId64, d - 1, @@ -829,7 +829,7 @@ bool check_unsqueeze_copy_args( out.size(d), dim); } else { // d == dim - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.size(d) == 1, "out.size(%zu) %zd shall equal 1 | dim = %" PRId64, d, @@ -848,7 +848,7 @@ bool check_view_copy_args( ET_LOG_AND_RETURN_IF_FALSE(size_int64_t.size() == out.sizes().size()); // The input and out shall share same dtype and numel - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( self.numel() == out.numel(), "self.numel() %zd != out.numel() %zd", self.numel(), @@ -860,7 +860,7 @@ bool check_view_copy_args( for (int i = 0; i < size_int64_t.size(); i++) { // If this value is -1 it implies that this dimension is inferred. if (size_int64_t[i] == -1) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !size_inferred, "Multiple dimensions cannot be inferred."); size_inferred = true; } @@ -888,7 +888,7 @@ bool get_view_copy_target_size( out_numels_without_minus_1 = out_numels_without_minus_1 * size_int64_t[i]; } else { // TODO(kimishpatel): Add test to hit this line - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( minus_1_dim == -1, "At most one view copy dim can be -1."); minus_1_dim = i; } diff --git a/kernels/portable/cpu/util/distance_util.cpp b/kernels/portable/cpu/util/distance_util.cpp index f8dc2f71216..21a111d2c47 100644 --- a/kernels/portable/cpu/util/distance_util.cpp +++ b/kernels/portable/cpu/util/distance_util.cpp @@ -14,8 +14,7 @@ namespace executor { bool check_pdist_args(const Tensor& in, double p, const Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(in, 2)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( - p >= 0, "pdist only supports non-negative p values"); + ET_CHECK_OR_RETURN_FALSE(p >= 0, "pdist only supports non-negative p values"); return true; } @@ -40,11 +39,10 @@ bool check_cdist_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_greater_or_equal_to(x2, 2)); ET_LOG_AND_RETURN_IF_FALSE( tensors_have_same_size_at_dims(x1, x1.dim() - 1, x2, x2.dim() - 1)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( - p >= 0, "cdist only supports non-negative p values"); + ET_CHECK_OR_RETURN_FALSE(p >= 0, "cdist only supports non-negative p values"); if (compute_mode.has_value()) { int64_t mode = compute_mode.value(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( mode >= 0 && mode <= 2, "possible modes: 0, 1, 2, but was: %" PRId64, mode); diff --git a/kernels/portable/cpu/util/index_util.cpp b/kernels/portable/cpu/util/index_util.cpp index fb54980bb47..909b00db3aa 100644 --- a/kernels/portable/cpu/util/index_util.cpp +++ b/kernels/portable/cpu/util/index_util.cpp @@ -20,11 +20,11 @@ bool check_gather_args( Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index.scalar_type() == ScalarType::Long, "Expected dypte int64 for index"); if (index.numel() != 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( nonzero_dim(in) == nonzero_dim(index), "self and index should have the same dimensionality when index is not empty " "except for the case when one has dimension 0 and the other has dimension 1"); @@ -37,7 +37,7 @@ bool check_gather_args( for (size_t d = 0; d < nonzero_dim(in); ++d) { if (d != dim) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( nonempty_size(index, d) <= nonempty_size(in, d), "size of dimension %zd of index should be smaller than the size of that dimension of input if dimension %zd != dim %zd", d, @@ -47,7 +47,7 @@ bool check_gather_args( } const long* index_data = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index_data[i] >= 0 && index_data[i] < nonempty_size(in, dim), "Index is out of bounds for dimension %zd with size %zd", (size_t)dim, @@ -64,12 +64,12 @@ bool check_index_select_args( Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); dim = dim < 0 ? dim + nonzero_dim(in) : dim; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( nonempty_size(in, dim) > 0, "index_select: Indexing axis dim should be positive"); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index.scalar_type() == ScalarType::Long || index.scalar_type() == ScalarType::Int, "Expected index to have type of Long or Int, but found %s", @@ -77,7 +77,7 @@ bool check_index_select_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_has_rank_smaller_or_equal_to(index, 1)); if (index.dim() > 0 && in.dim() == 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index.numel() == 1, "index_select: Index to scalar must have exactly 1 value"); } @@ -85,7 +85,7 @@ bool check_index_select_args( if (index.scalar_type() == ScalarType::Long) { const int64_t* const index_ptr = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index_ptr[i] >= 0 && index_ptr[i] < nonempty_size(in, dim), "index[%zu] = %" PRId64 " is out of range [0, %zd)", i, @@ -95,7 +95,7 @@ bool check_index_select_args( } else { const int32_t* const index_ptr = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index_ptr[i] >= 0 && index_ptr[i] < nonempty_size(in, dim), "index[%zu] = %" PRId32 " is out of range [0, %zd)", i, @@ -126,12 +126,12 @@ void get_index_select_out_target_size( bool check_nonzero_args(const Tensor& in, const Tensor& out) { (void)in; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.scalar_type() == ScalarType::Long, "Expected out to be a Long tensor but received %" PRId8, static_cast(out.scalar_type())); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.dim() == 2, "Expected out to be a 2d tensor received %zd", ssize_t(out.dim())); @@ -147,7 +147,7 @@ bool check_scatter_add_args( Tensor& out) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, out)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(self, src)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index.scalar_type() == ScalarType::Long, "Expected dypte int64 for index"); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(self, dim)); @@ -156,7 +156,7 @@ bool check_scatter_add_args( return true; } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( nonzero_dim(self) == nonzero_dim(src) && nonzero_dim(self) == nonzero_dim(index), "self, index and src should have same number of dimensions."); @@ -167,12 +167,12 @@ bool check_scatter_add_args( } for (size_t d = 0; d < nonzero_dim(self); ++d) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( nonempty_size(index, d) <= nonempty_size(src, d), "size of dimension %zd of index should be smaller than the size of that dimension of src", d); if (d != dim) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( nonempty_size(index, d) <= nonempty_size(self, d), "size of dimension %zd of index should be smaller than the size of that dimension of self if dimension %zd != dim %zd", d, @@ -182,7 +182,7 @@ bool check_scatter_add_args( } const long* index_data = index.const_data_ptr(); for (size_t i = 0; i < index.numel(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index_data[i] >= 0 && index_data[i] < nonempty_size(self, dim), "Index is out of bounds for dimension %zd with size %zd", (size_t)dim, @@ -228,7 +228,7 @@ bool check_select_scatter_args( ET_LOG_AND_RETURN_IF_FALSE(dim_is_valid(dim, in.dim())); // The index shall be valid in the given dimenson - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( index >= 0 && index < in.size(dim), "index %" PRId64 " out of range [-%zd,%zd) at in.size( %" PRId64 ")", index, @@ -239,7 +239,7 @@ bool check_select_scatter_args( // The src.dim() shall be one lower than in.dim() since src needs to fit // into the selected data on one dim of input // https://pytorch.org/docs/stable/generated/torch.select_scatter.html - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.dim() == src.dim() + 1, "in.dim() %zd != src.dim() + 1 %zd", in.dim(), diff --git a/kernels/portable/cpu/util/kernel_ops_util.cpp b/kernels/portable/cpu/util/kernel_ops_util.cpp index 2e267b57715..1e851ccb1ef 100644 --- a/kernels/portable/cpu/util/kernel_ops_util.cpp +++ b/kernels/portable/cpu/util/kernel_ops_util.cpp @@ -26,14 +26,14 @@ bool param_array_is_valid( bool allow_empty) { auto size = array.size(); if (allow_empty) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( size == 0 || size == 1 || size == length, "Expected %s to have size 0, 1 or %zu but got %zd", name, length, size); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( size == 1 || size == length, "Expected %s to have size 1 or %zu but got %zd", name, @@ -126,7 +126,7 @@ bool output_padding_is_valid( const int64_t op_i = val_at(output_padding, i); const int64_t s_i = val_at(stride, i); const int64_t d_i = val_at(dilation, i); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( op_i < s_i || op_i < d_i, "output padding must be smaller than either stride or dilation"); } @@ -246,12 +246,12 @@ void calculate_kernel_output_sizes( } bool check_arange_args(double start, double end, double step, Tensor& out) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.dim() == 1, "out should be a 1-d tensor, but got a %zu-d tensor", out.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (step > 0 && (end >= start)) || (step < 0 && (end <= start)), "upper bound and larger bound inconsistent with step sign"); @@ -272,7 +272,7 @@ bool check_avg_pool2d_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) || (in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0), "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input"); @@ -285,7 +285,7 @@ bool check_avg_pool2d_args( padding, kernel_size, /*kernel_ndim=*/2, /*enforce_half_kernel=*/true)); if (divisor_override.has_value()) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( divisor_override.value() != 0, "divisor_override must be non-zero, but found %" PRId64, divisor_override.value()); @@ -334,7 +334,7 @@ bool check_convolution_args( tensor_is_default_or_channels_last_dim_order(weight)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.dim() == 3 || in.dim() == 4, "Expect input tensor to be 3-D or 4-D, but got, %zu.", static_cast(in.dim())); @@ -343,7 +343,7 @@ bool check_convolution_args( if (bias.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(tensor_is_rank(bias.value(), 1)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( bias.value().size(0) == transposed ? groups * weight.size(1) : weight.size(0), "bias length must equal number of output channels, but got %zd", @@ -369,14 +369,14 @@ bool check_convolution_args( output_padding_is_valid(output_padding, stride, dilation, kernel_ndim)); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( weight.size(0) >= groups, "Given groups=%" PRId64 ", expected weight to be at least %" PRId64 " at dimension 0, but got weight.size(0) = %zd instead", groups, groups, weight.size(0)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( weight.size(0) % groups == 0, "Given groups=%" PRId64 ", expected weight to be divisible by %" PRId64 " at dimension 0, but got weight.size(0) = %zd instead", @@ -385,7 +385,7 @@ bool check_convolution_args( weight.size(0)); if (!transposed) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.size(1) == groups * weight.size(1), "Given groups=%" PRId64 " and weight.size(1) = %zd, expected input to have %" PRId64 @@ -395,7 +395,7 @@ bool check_convolution_args( groups * weight.size(1), in.size(1)); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.size(1) == weight.size(0), "input channels must match weight.size(0) in transposed convolution"); } @@ -472,7 +472,7 @@ bool check_max_pool2d_with_indices_args( Tensor& out, Tensor& indices) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( indices.scalar_type() == ScalarType::Long, "Expected indices to have type of Long, but found %s", toString(indices.scalar_type())); @@ -480,7 +480,7 @@ bool check_max_pool2d_with_indices_args( ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(in)); ET_LOG_AND_RETURN_IF_FALSE(tensor_is_default_or_channels_last_dim_order(out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( (in.dim() == 3 && in.size(0) > 0 && in.size(1) > 0 && in.size(2) > 0) || (in.dim() == 4 && in.size(1) > 0 && in.size(2) > 0 && in.size(3) > 0), "Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input"); @@ -543,10 +543,10 @@ bool check_constant_pad_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( pad.size() % 2 == 0, "Padding array must be a multiple of 2"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( pad.size() / 2 <= in.dim(), "Padding array contains too many elements"); return true; @@ -578,13 +578,13 @@ bool check_embedding_args( const Tensor& indices, const Tensor& out) { // Ensure weight is 2-D. It could be empty. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( weight.dim() == 2, "weight.dim() %zd != 2", weight.dim()); // Ensure out is k+1 dimension tensor where k is the indices.dim() // out's first k dimension shall be same as indices, and the last dim shall // equal weight's last dim - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.dim() == indices.dim() + 1, "out.dim() %zd != indices.dim() %zd + 1", out.dim(), diff --git a/kernels/portable/cpu/util/normalization_ops_util.cpp b/kernels/portable/cpu/util/normalization_ops_util.cpp index 684417f448a..9f3ce5cc112 100644 --- a/kernels/portable/cpu/util/normalization_ops_util.cpp +++ b/kernels/portable/cpu/util/normalization_ops_util.cpp @@ -81,15 +81,15 @@ bool check_layer_norm_args( Tensor& mean_out, Tensor& rstd_out) { size_t ndim = normalized_shape.size(); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( ndim >= 1, "Expected normalized_shape to be at least 1-dimensional, i.e., containing at least one element."); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.dim() >= ndim, "Expected input tensor to have rank >= the length of normalized_shape."); size_t shift = in.dim() - ndim; for (size_t d = 0; d < ndim; ++d) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.size(d + shift) == normalized_shape[d], "Expected normalized_shape to match the sizes of input's rightmost dimensions."); } @@ -144,16 +144,16 @@ bool check_group_norm_args( ET_LOG_AND_RETURN_IF_FALSE(in.size(0) == N); ET_LOG_AND_RETURN_IF_FALSE(in.size(1) == C); ET_LOG_AND_RETURN_IF_FALSE(in.numel() == N * C * HxW); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( group > 0, "Expected number of groups to be greater than 0"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( C % group == 0, "Expected number of channels in input to be divisible by number of groups"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !weight.has_value() || (weight.value().dim() == 1 && weight.value().size(0) == C), "Expected weight to be a vector of size equal to the number of channels in input"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !bias.has_value() || (bias.value().dim() == 1 && bias.value().size(0) == C), "Expected bias to be a vector of size equal to the number of channels in input"); diff --git a/kernels/portable/cpu/util/reduce_util.cpp b/kernels/portable/cpu/util/reduce_util.cpp index 65140fc6643..fb6ac202f44 100644 --- a/kernels/portable/cpu/util/reduce_util.cpp +++ b/kernels/portable/cpu/util/reduce_util.cpp @@ -51,7 +51,7 @@ ET_NODISCARD bool check_dim_list_is_valid( ET_LOG_AND_RETURN_IF_FALSE( non_neg_d < kTensorDimensionLimit && non_neg_d >= 0); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim_exist[non_neg_d] == false, "dim %zd appears multiple times in the list of dims", non_neg_d); diff --git a/kernels/portable/cpu/util/repeat_util.cpp b/kernels/portable/cpu/util/repeat_util.cpp index d373a86c16c..bcb7a7ae0f9 100644 --- a/kernels/portable/cpu/util/repeat_util.cpp +++ b/kernels/portable/cpu/util/repeat_util.cpp @@ -25,7 +25,7 @@ bool check_repeat_args( executorch::aten::ArrayRef repeats, Tensor& out) { // Ensure the self tensors list is non-empty. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( repeats.size() >= self.dim(), "Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor"); @@ -34,11 +34,11 @@ bool check_repeat_args( for (auto repeat : repeats) { all_non_negative = all_non_negative && (repeat >= 0); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( all_non_negative, "Trying to create tensor with negative dimension"); /// Check if out.size() is legal. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.dim() == repeats.size(), "The dimension of out shall equal size of repeats, but now is %zd and %zd", out.dim(), @@ -47,7 +47,7 @@ bool check_repeat_args( // Right now we only support the tensors whose dimension is no greater than // kTensorDimensionLimit. Only check out tensor because the number of // dimension of out tensor shall have more than or equal to self tensor - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( out.dim() <= kTensorDimensionLimit, "The dimension of input and output should not be larger than %zd", kTensorDimensionLimit); @@ -66,7 +66,7 @@ bool check_repeat_args( reformat_self_size[out.dim() - 1 - i] = self.size(self.dim() - 1 - i); } for (size_t i = 0; i < repeats.size(); i++) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( reformat_self_size[i] * repeats[i] == out.size(i), "Expect out size at dimension %zu is %" PRId64 ", but now is %zd", i, diff --git a/kernels/portable/cpu/util/slice_util.cpp b/kernels/portable/cpu/util/slice_util.cpp index a948a370de2..e6444bd074a 100644 --- a/kernels/portable/cpu/util/slice_util.cpp +++ b/kernels/portable/cpu/util/slice_util.cpp @@ -24,7 +24,7 @@ bool check_narrow_copy_args( ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE(lenth >= 0, "lenth must be non-negative"); + ET_CHECK_OR_RETURN_FALSE(lenth >= 0, "lenth must be non-negative"); ET_LOG_AND_RETURN_IF_FALSE(start >= -in.size(dim)); ET_LOG_AND_RETURN_IF_FALSE(start <= in.size(dim)); if (start < 0) { @@ -56,8 +56,7 @@ bool check_slice_copy_args( ET_LOG_AND_RETURN_IF_FALSE(in.dim() > 0); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( - step > 0, "slice step must be greater than zero"); + ET_CHECK_OR_RETURN_FALSE(step > 0, "slice step must be greater than zero"); return true; } @@ -89,8 +88,7 @@ bool check_slice_scatter_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(input, src)); // Check step. Step must be greater than zero - ET_LOG_MSG_AND_RETURN_IF_FALSE( - step > 0, "slice step must be greater than zero"); + ET_CHECK_OR_RETURN_FALSE(step > 0, "slice step must be greater than zero"); // The size of src tensor should follow these rules: // - src.size(i) shall equal to input.size(i) if i != dim, @@ -100,7 +98,7 @@ bool check_slice_scatter_args( ET_LOG_AND_RETURN_IF_FALSE( tensors_have_same_size_at_dims(input, d, src, d)); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( src.size(d) == num_values, "input.size(%zu) %zd != num_values %" PRId64 " | dim = %" PRId64 ")", d, diff --git a/kernels/prim_ops/et_view.cpp b/kernels/prim_ops/et_view.cpp index 66aa9ac87e2..0f041dae00f 100644 --- a/kernels/prim_ops/et_view.cpp +++ b/kernels/prim_ops/et_view.cpp @@ -38,13 +38,13 @@ bool get_view_target_size( int64_t numel_without_minus_1 = 1; for (int i = 0; i < dim; i++) { if (size[i] == -1) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( minus1_dim == -1, "At most one view dim can be -1."); minus1_dim = i; } else { // The size[i] must be non-negative now, but we check size[i] >= -1 // in case code is reordered in the future. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( size[i] >= -1, "Negative sizes are not allowed."); numel_without_minus_1 *= size[i]; @@ -56,7 +56,7 @@ bool get_view_target_size( } } if (minus1_dim >= 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( n_zero == 0, "Cannot infer dimension size if there is a zero dim."); out_size[minus1_dim] = self.numel() / numel_without_minus_1; } diff --git a/kernels/quantized/cpu/op_mixed_linear.cpp b/kernels/quantized/cpu/op_mixed_linear.cpp index d09d0bdd5e1..c97ed2cb7c9 100644 --- a/kernels/quantized/cpu/op_mixed_linear.cpp +++ b/kernels/quantized/cpu/op_mixed_linear.cpp @@ -36,13 +36,13 @@ bool check_quantized_mixed_linear_args( ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight_scales)); if (dtype.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(out.scalar_type() == dtype.value()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dtype.value() == ScalarType::Float || dtype.value() == ScalarType::Half, "dtype must be Float or Half"); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( weight.scalar_type() == ScalarType::Char, "weight dtype must be int8"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.scalar_type() == ScalarType::Float || in.scalar_type() == ScalarType::Half, "input dtype must be Float or Half"); @@ -55,7 +55,7 @@ bool check_quantized_mixed_linear_args( } // Support for non-null zero points is not implemented yet. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !opt_weight_zero_points.has_value(), "zero points not supported yet."); return true; } diff --git a/kernels/quantized/cpu/op_mixed_mm.cpp b/kernels/quantized/cpu/op_mixed_mm.cpp index 044e110bf5c..564de74dfde 100644 --- a/kernels/quantized/cpu/op_mixed_mm.cpp +++ b/kernels/quantized/cpu/op_mixed_mm.cpp @@ -31,9 +31,9 @@ bool check_quantized_mixed_mm_args( tensors_have_same_size_at_dims(weight_scales, 0, weight, 0)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight_scales, out)); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( weight.scalar_type() == ScalarType::Char, "weight dtype must be int8"); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( in.scalar_type() == ScalarType::Float || in.scalar_type() == ScalarType::Half, "input dtype must be Float or Half"); @@ -46,7 +46,7 @@ bool check_quantized_mixed_mm_args( } // Support for non-null zero points is not implemented yet. - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( !opt_weight_zero_points.has_value(), "zero points not supported yet."); return true; } diff --git a/runtime/core/error.h b/runtime/core/error.h index cdf6303a650..7fbd92b7c08 100644 --- a/runtime/core/error.h +++ b/runtime/core/error.h @@ -126,6 +126,22 @@ using ::executorch::runtime::error_code_t; } \ } +/** + * A convenience macro to be used in utility functions that check whether input + * tensor(s) are valid, which are expected to return a boolean. Checks whether + * `cond` is true; if not, log the failed check with `message` and return false. + * + * @param[in] cond the condition to check + * @param[in] message an additional message to log with `cond` + */ +#define ET_CHECK_OR_RETURN_FALSE(cond__, message__, ...) \ + { \ + if (!(cond__)) { \ + ET_LOG(Error, "Check failed (%s): " message__, #cond__, ##__VA_ARGS__); \ + return false; \ + } \ + } + /** * If error__ is not Error::Ok, optionally log a message and return the error * from the current function, which must be of return type diff --git a/runtime/core/exec_aten/util/tensor_util.h b/runtime/core/exec_aten/util/tensor_util.h index e16fe63e2a2..d7917e37b19 100644 --- a/runtime/core/exec_aten/util/tensor_util.h +++ b/runtime/core/exec_aten/util/tensor_util.h @@ -332,35 +332,22 @@ }) /** + * DEPRECATED: Please use ET_CHECK_OR_RETURN_FALSE instead and provide + * an informative message. (For example, the values of any variables used in + * `cond` would not be reported automatically by this macro.) + * * A convenience macro to be used in utility functions that check whether input * tensor(s) are valid, which are expected to return a boolean. Checks whether * `cond` is true; if not, log the failed check and return false. * * @param[in] cond the condition to check */ -#define ET_LOG_AND_RETURN_IF_FALSE(cond) \ - do { \ - if (!(cond)) { \ - ET_LOG(Error, "Check failed (%s): ", #cond); \ - return false; \ - } \ - } while (false) +#define ET_LOG_AND_RETURN_IF_FALSE(cond) ET_CHECK_OR_RETURN_FALSE(cond, "") /** - * A convenience macro to be used in utility functions that check whether input - * tensor(s) are valid, which are expected to return a boolean. Checks whether - * `cond` is true; if not, log the failed check with `message` and return false. - * - * @param[in] cond the condition to check - * @param[in] message an additional message to log with `cond` + * DEPRECATED: Please use ET_CHECK_OR_RETURN_FALSE instead. */ -#define ET_LOG_MSG_AND_RETURN_IF_FALSE(cond, message, ...) \ - do { \ - if (!(cond)) { \ - ET_LOG(Error, "Check failed (%s): " message, #cond, ##__VA_ARGS__); \ - return false; \ - } \ - } while (false) +#define ET_LOG_MSG_AND_RETURN_IF_FALSE ET_CHECK_OR_RETURN_FALSE /** * If `cond` is false, log `cond` and return from the kernel with a failure @@ -419,7 +406,7 @@ namespace runtime { * upper_bound - 1, inclusive. */ inline bool dim_is_valid(int64_t dim, int64_t upper_bound) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim >= -upper_bound && dim < upper_bound, "Dimension %" PRId64 " is out of range. Dimension should be between %" PRId64 " and %" PRId64 @@ -456,7 +443,7 @@ inline ssize_t nonempty_size( inline bool tensor_can_cast_to( executorch::aten::Tensor a, executorch::aten::ScalarType dtype) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::canCast(a.scalar_type(), dtype), "Tensor of dtype %s cannot cast to dtype %s", torch::executor::toString(a.scalar_type()), @@ -466,7 +453,7 @@ inline bool tensor_can_cast_to( } inline bool tensor_is_bool_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( t.scalar_type() == executorch::aten::ScalarType::Bool, "Expected to find bool type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -477,7 +464,7 @@ inline bool tensor_is_bool_type(executorch::aten::Tensor t) { inline bool tensor_is_type( executorch::aten::Tensor t, executorch::aten::ScalarType dtype) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( t.scalar_type() == dtype, "Expected to find %s type, but tensor has type %s", torch::executor::toString(dtype), @@ -489,7 +476,7 @@ inline bool tensor_is_type( inline bool tensor_is_integral_type( executorch::aten::Tensor t, bool includeBool = false) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isIntegralType(t.scalar_type(), includeBool), "Expected to find a integral type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -498,7 +485,7 @@ inline bool tensor_is_integral_type( } inline bool tensor_is_floating_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isFloatingType(t.scalar_type()), "Expected to find a floating type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -507,7 +494,7 @@ inline bool tensor_is_floating_type(executorch::aten::Tensor t) { } inline bool tensor_is_real_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isRealType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -516,7 +503,7 @@ inline bool tensor_is_real_type(executorch::aten::Tensor t) { } inline bool tensor_is_realh_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isRealHType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -525,7 +512,7 @@ inline bool tensor_is_realh_type(executorch::aten::Tensor t) { } inline bool tensor_is_realhbf16_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( executorch::runtime::isRealHBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -534,7 +521,7 @@ inline bool tensor_is_realhbf16_type(executorch::aten::Tensor t) { } inline bool tensor_is_realhb_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isRealHBType(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -543,7 +530,7 @@ inline bool tensor_is_realhb_type(executorch::aten::Tensor t) { } inline bool tensor_is_realhbbf16_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( executorch::runtime::isRealHBBF16Type(t.scalar_type()), "Expected to find a real type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -552,7 +539,7 @@ inline bool tensor_is_realhbbf16_type(executorch::aten::Tensor t) { } inline bool tensor_is_complex_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isComplexType(t.scalar_type()), "Expected to find a complex type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -561,7 +548,7 @@ inline bool tensor_is_complex_type(executorch::aten::Tensor t) { } inline bool tensor_is_bits_type(executorch::aten::Tensor t) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( torch::executor::isBitsType(t.scalar_type()), "Expected to find a bits type, but tensor has type %s", torch::executor::toString(t.scalar_type())); @@ -572,7 +559,7 @@ inline bool tensor_is_bits_type(executorch::aten::Tensor t) { inline bool tensors_have_same_dtype( executorch::aten::Tensor a, executorch::aten::Tensor b) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( a.scalar_type() == b.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s}", torch::executor::toString(a.scalar_type()), @@ -584,7 +571,7 @@ inline bool tensors_have_same_dtype( executorch::aten::Tensor a, executorch::aten::Tensor b, executorch::aten::Tensor c) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( a.scalar_type() == b.scalar_type() && b.scalar_type() == c.scalar_type(), ET_TENSOR_CHECK_PREFIX__ ": dtype={%s, %s, %s}", torch::executor::toString(a.scalar_type()), @@ -594,7 +581,7 @@ inline bool tensors_have_same_dtype( } inline bool tensor_is_rank(executorch::aten::Tensor t, size_t rank) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( t.dim() == rank, "Expected tensor.dim() to be %zu, but got %zu", static_cast(rank), @@ -606,7 +593,7 @@ inline bool tensor_is_rank(executorch::aten::Tensor t, size_t rank) { inline bool tensor_has_rank_greater_or_equal_to( executorch::aten::Tensor t, size_t rank) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( t.dim() >= rank, "Expected tensor.dim() to be >= %zu, but got %zu", static_cast(rank), @@ -618,7 +605,7 @@ inline bool tensor_has_rank_greater_or_equal_to( inline bool tensor_has_rank_smaller_or_equal_to( executorch::aten::Tensor t, size_t rank) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( t.dim() <= rank, "Expected tensor.dim() to be <= %zu, but got %zu", static_cast(rank), @@ -629,12 +616,12 @@ inline bool tensor_has_rank_smaller_or_equal_to( inline bool tensor_has_dim(executorch::aten::Tensor t, int64_t d) { if (t.dim() == 0) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( d == 0 || d == -1, "dim must be 0 or -1 for 0-dim tensor, got %" PRId64, d); } else { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( d > 0 ? d < t.dim() : t.dim() + d >= 0, "%zu-dim tensor does not have dim at index %zu", static_cast(t.dim()), @@ -660,7 +647,7 @@ tensor_dim_has_index(executorch::aten::Tensor t, int64_t d, int64_t ix) { // Dimension must have been already checked by tensor_has_dim ET_CHECK(d >= 0 && d < t.dim()); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( ix >= -t.size(d) && ix < t.size(d), "index %" PRId64 " out of range [-%zu,%zu) at dimension %" PRId64 ")", ix, @@ -675,17 +662,17 @@ inline bool tensors_have_same_size_at_dims( size_t dim_a, executorch::aten::Tensor b, size_t dim_b) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim_a < a.dim(), "Cannot retrieve dim %zu from tensor with dim %zu", static_cast(dim_a), static_cast(a.dim())); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( dim_b < b.dim(), "Cannot retrieve dim %zu from tensor with dim %zu", static_cast(dim_b), static_cast(b.dim())); - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( a.size(dim_a) == b.size(dim_b), ET_TENSOR_CHECK_PREFIX__ ": a.size(%zu) = %zu does not match b.size(%zu) = %zu", @@ -860,13 +847,13 @@ inline bool tensor_is_contiguous(executorch::aten::Tensor t) { if (strides.size() == 0) { return true; } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( strides[strides.size() - 1] == 1, "Tensor is not contiguous; the stride of the last dimension must be 1, " "but got %zu", static_cast(strides[strides.size() - 1])); for (int i = strides.size() - 1; i > 0; --i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( strides[i - 1] == strides[i] * sizes[i], "Tensor is not contiguous; the stride of dim %zu should be equal to " "strides[%zu] * sizes[%zu] = %zu, but found %zu", @@ -882,7 +869,7 @@ inline bool tensor_is_contiguous(executorch::aten::Tensor t) { inline bool tensors_have_same_rank( executorch::aten::Tensor a, executorch::aten::Tensor b) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( a.dim() == b.dim(), ET_TENSOR_CHECK_PREFIX__ ": rank={%zd, %zd}", ssize_t(a.dim()), diff --git a/runtime/core/exec_aten/util/tensor_util_aten.cpp b/runtime/core/exec_aten/util/tensor_util_aten.cpp index d768f66d05f..4df273d4dbb 100644 --- a/runtime/core/exec_aten/util/tensor_util_aten.cpp +++ b/runtime/core/exec_aten/util/tensor_util_aten.cpp @@ -35,7 +35,7 @@ Error get_dim_order( bool tensor_has_valid_dim_order(at::Tensor t) { executorch::aten::DimOrderType dim_order[kTensorDimensionLimit]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( get_dim_order(t, dim_order, t.dim()) == Error::Ok, "Failed to retrieve dim order from tensor!"); @@ -55,7 +55,7 @@ bool tensor_has_valid_dim_order(at::Tensor t) { inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) { executorch::aten::DimOrderType dim_order[kTensorDimensionLimit]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( get_dim_order(t, dim_order, t.dim()) == Error::Ok, "Failed to retrieve dim order from tensor!"); @@ -86,7 +86,7 @@ bool tensors_have_same_dim_order( executorch::aten::DimOrderType first_dim_order[kTensorDimensionLimit]; executorch::aten::DimOrderType other_dim_order[kTensorDimensionLimit]; - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( get_dim_order(tensor_list[0], first_dim_order, tensor_list[0].dim()) == Error::Ok, "Failed to retrieve dim order from 1st input tensor!"); @@ -97,7 +97,7 @@ bool tensors_have_same_dim_order( is_channels_last_dim_order(first_dim_order, tensor_list[0].dim()); for (size_t i = 1; i < tensor_list.size(); ++i) { - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) == Error::Ok, "Failed to retrieve dim order from %zd-th input tensor!", @@ -109,7 +109,7 @@ bool tensors_have_same_dim_order( is_channels_last_dim_order(other_dim_order, tensor_list[i].dim()); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( all_contiguous || all_channels_last, "%zd input tensors have different dim orders", tensor_list.size()); diff --git a/runtime/core/exec_aten/util/tensor_util_portable.cpp b/runtime/core/exec_aten/util/tensor_util_portable.cpp index 3350445db73..c1cbcfb6064 100644 --- a/runtime/core/exec_aten/util/tensor_util_portable.cpp +++ b/runtime/core/exec_aten/util/tensor_util_portable.cpp @@ -125,7 +125,7 @@ bool tensors_have_same_dim_order( tensor_list[i].dim_order().size()); } - ET_LOG_MSG_AND_RETURN_IF_FALSE( + ET_CHECK_OR_RETURN_FALSE( all_contiguous || all_channels_last, "%zd input tensors have different dim orders", tensor_list.size());