77 */
88
99#include < c10/util/irange.h>
10+ #include < array>
1011#include < cstring>
1112
1213#include < executorch/kernels/portable/cpu/util/normalization_ops_util.h>
@@ -92,6 +93,11 @@ bool check_layer_norm_args(
9293 " , ndim = %zu" ,
9394 in.dim (),
9495 ndim);
96+ ET_CHECK_OR_RETURN_FALSE (
97+ ndim <= kTensorDimensionLimit ,
98+ " Expected normalized shape to have at most %zu dimensions but it had %zu" ,
99+ kTensorDimensionLimit ,
100+ ndim);
95101 size_t shift = in.dim () - ndim;
96102 for (const auto d : c10::irange (ndim)) {
97103 ET_CHECK_OR_RETURN_FALSE (
@@ -103,20 +109,20 @@ bool check_layer_norm_args(
103109 d,
104110 normalized_shape[d]);
105111 }
106- executorch::aten::SizesType shape[ndim] ;
112+ std::array< executorch::aten::SizesType, kTensorDimensionLimit > shape;
107113 for (const auto i : c10::irange (ndim)) {
108114 shape[i] = static_cast <executorch::aten::SizesType>(normalized_shape[i]);
109115 }
110116
111117 if (weight.has_value ()) {
112118 ET_LOG_AND_RETURN_IF_FALSE (tensors_have_same_dtype (in, weight.value ()));
113119 ET_LOG_AND_RETURN_IF_FALSE (
114- tensor_has_expected_size (weight.value (), {shape, ndim}));
120+ tensor_has_expected_size (weight.value (), {shape. data () , ndim}));
115121 }
116122 if (bias.has_value ()) {
117123 ET_LOG_AND_RETURN_IF_FALSE (tensors_have_same_dtype (in, bias.value ()));
118124 ET_LOG_AND_RETURN_IF_FALSE (
119- tensor_has_expected_size (bias.value (), {shape, ndim}));
125+ tensor_has_expected_size (bias.value (), {shape. data () , ndim}));
120126 }
121127 ET_LOG_AND_RETURN_IF_FALSE (tensors_have_same_dtype (in, out));
122128 ET_LOG_AND_RETURN_IF_FALSE (tensors_have_same_dtype (in, mean_out));
0 commit comments