diff --git a/kernels/portable/cpu/util/normalization_ops_util.cpp b/kernels/portable/cpu/util/normalization_ops_util.cpp index 4adcf02b303..dedab427ae7 100644 --- a/kernels/portable/cpu/util/normalization_ops_util.cpp +++ b/kernels/portable/cpu/util/normalization_ops_util.cpp @@ -7,6 +7,7 @@ */ #include +#include #include #include @@ -92,6 +93,11 @@ bool check_layer_norm_args( ", ndim = %zu", in.dim(), ndim); + ET_CHECK_OR_RETURN_FALSE( + ndim <= kTensorDimensionLimit, + "Expected normalized shape to have at most %zu dimensions but it had %zu", + kTensorDimensionLimit, + ndim); size_t shift = in.dim() - ndim; for (const auto d : c10::irange(ndim)) { ET_CHECK_OR_RETURN_FALSE( @@ -103,7 +109,7 @@ bool check_layer_norm_args( d, normalized_shape[d]); } - executorch::aten::SizesType shape[ndim]; + std::array shape; for (const auto i : c10::irange(ndim)) { shape[i] = static_cast(normalized_shape[i]); } @@ -111,12 +117,12 @@ bool check_layer_norm_args( if (weight.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, weight.value())); ET_LOG_AND_RETURN_IF_FALSE( - tensor_has_expected_size(weight.value(), {shape, ndim})); + tensor_has_expected_size(weight.value(), {shape.data(), ndim})); } if (bias.has_value()) { ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, bias.value())); ET_LOG_AND_RETURN_IF_FALSE( - tensor_has_expected_size(bias.value(), {shape, ndim})); + tensor_has_expected_size(bias.value(), {shape.data(), ndim})); } ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out)); ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, mean_out));