@@ -713,6 +713,9 @@ Error Method::resolve_operator(
713
713
}
714
714
TensorMeta* meta = allocator->allocateList <TensorMeta>(n_args);
715
715
if (meta == nullptr ) {
716
+ if (allocator == memory_manager_->temp_allocator ()) {
717
+ memory_manager_->temp_allocator ()->reset ();
718
+ }
716
719
return Error::MemoryAllocationFailed;
717
720
}
718
721
@@ -726,6 +729,9 @@ Error Method::resolve_operator(
726
729
executorch::aten::DimOrderType* dim_order_ptr =
727
730
allocator->allocateList <executorch::aten::DimOrderType>(tensor.dim ());
728
731
if (dim_order_ptr == nullptr ) {
732
+ if (allocator == memory_manager_->temp_allocator ()) {
733
+ memory_manager_->temp_allocator ()->reset ();
734
+ }
729
735
return Error::MemoryAllocationFailed;
730
736
}
731
737
size_t size = tensor.dim ();
@@ -751,9 +757,18 @@ Error Method::resolve_operator(
751
757
" Missing operator: [%" ET_PRIssize_t " ] %s" ,
752
758
static_cast <ssize_t >(op_index),
753
759
operator_name);
760
+ if (allocator == memory_manager_->temp_allocator ()) {
761
+ memory_manager_->temp_allocator ()->reset ();
762
+ }
754
763
return op_function.error ();
755
764
}
756
765
kernels[kernel_index] = op_function.get ();
766
+
767
+ // If we used the temp allocator here, reset it.
768
+ if (allocator == memory_manager_->temp_allocator ()) {
769
+ memory_manager_->temp_allocator ()->reset ();
770
+ }
771
+
757
772
return Error::Ok;
758
773
}
759
774
@@ -1547,6 +1562,9 @@ Error Method::execute() {
1547
1562
i);
1548
1563
}
1549
1564
ET_LOG (Debug, " Executing method: %s." , method_meta ().name ());
1565
+ if (temp_allocator_ != nullptr ) {
1566
+ temp_allocator_->reset ();
1567
+ }
1550
1568
1551
1569
// Chains are executed sequentially today, but future async designs may
1552
1570
// branch and run many in parallel or out of order.
0 commit comments