@@ -16,8 +16,8 @@ using exec_aten::ScalarType;
1616using executorch::runtime::KernelRuntimeContext;
1717using executorch::runtime::kTensorDimensionLimit ;
1818using executorch::runtime::resize_tensor;
19- using executorch::runtime::tensors_have_same_dim_order;
2019using executorch::runtime::tensor_is_default_dim_order;
20+ using executorch::runtime::tensors_have_same_dim_order;
2121using torch::executor::check_bmm_args;
2222using torch::executor::Error;
2323using torch::executor::get_bmm_out_target_size;
@@ -78,16 +78,16 @@ Tensor& bmm_out(
7878 WORD32 out_stride = p;
7979
8080 WORD32* __restrict__ tmp =
81- (WORD32* __restrict__)kernels::allocate_temp_memory (
82- ctx, (batch_size * m * p) * sizeof (float ));
81+ (WORD32* __restrict__)kernels::allocate_temp_memory (
82+ ctx, (batch_size * m * p) * sizeof (float ));
8383
8484 ET_KERNEL_CHECK (ctx, tmp != nullptr , MemoryAllocationFailed, out);
8585
8686 tmp[batch_size * m * p] = {0 };
8787
8888 WORD32* __restrict__ p_o =
89- (WORD32* __restrict__)kernels::allocate_temp_memory (
90- ctx, (batch_size * m * p) * sizeof (WORD32));
89+ (WORD32* __restrict__)kernels::allocate_temp_memory (
90+ ctx, (batch_size * m * p) * sizeof (WORD32));
9191
9292 ET_KERNEL_CHECK (ctx, p_o != nullptr , MemoryAllocationFailed, out);
9393
0 commit comments