From bb5561bd086c0fc7d8143ece69cf4676c2c5e956 Mon Sep 17 00:00:00 2001 From: "cmadhira@cadence.com" Date: Wed, 29 Jan 2025 14:41:47 +0530 Subject: [PATCH] removed malloc and used allocate_temp to allocate memory in native layer norm --- .../fusion_g3/operators/op_native_layer_norm.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp b/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp index b4f076e8100..09c7c00fd2c 100644 --- a/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp +++ b/backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp @@ -225,7 +225,10 @@ std::tuple native_layer_norm_out( if (weight.has_value()) { weight_data = weight.value().mutable_data_ptr(); } else { - weight_data = (float*)malloc(num_elm * sizeof(float)); + executorch::runtime::Result temp_mem_weight = + ctx.allocate_temp(num_elm * sizeof(float)); + weight_data = (float*)(temp_mem_weight.get()); + for (int i = 0; i < num_elm; i++) { weight_data[i] = 1; } @@ -234,7 +237,10 @@ std::tuple native_layer_norm_out( if (bias.has_value()) { bias_data = bias.value().mutable_data_ptr(); } else { - bias_data = (float*)malloc(num_elm * sizeof(float)); + executorch::runtime::Result temp_mem_bias = + ctx.allocate_temp(num_elm * sizeof(float)); + bias_data = (float*)(temp_mem_bias.get()); + for (int i = 0; i < num_elm; i++) { bias_data[i] = 0; } @@ -255,12 +261,6 @@ std::tuple native_layer_norm_out( bias_data, (float)eps); - if (!bias.has_value()) { - free(bias_data); - } - if (!weight.has_value()) { - free(weight_data); - } } else { ET_KERNEL_CHECK( ctx,