|
1 |
| -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. |
| 1 | +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. |
2 | 2 |
|
3 | 3 | Licensed under the Apache License, Version 2.0 (the "License");
|
4 | 4 | you may not use this file except in compliance with the License.
|
@@ -490,15 +490,6 @@ TfLiteStatus MicroAllocator::FinishModelAllocation(
|
490 | 490 | TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles(
|
491 | 491 | scratch_buffer_handles, scratch_buffer_request_count_));
|
492 | 492 |
|
493 |
| - // Allocate buffers for variable tensors. |
494 |
| - for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); |
495 |
| - subgraph_idx++) { |
496 |
| - const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); |
497 |
| - TFLITE_DCHECK(subgraph != nullptr); |
498 |
| - TF_LITE_ENSURE_STATUS(AllocateVariables( |
499 |
| - subgraph, subgraph_allocations[subgraph_idx].tensors)); |
500 |
| - } |
501 |
| - |
502 | 493 | // Plan all subgraphs and scratch buffers together.
|
503 | 494 | TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph_allocations,
|
504 | 495 | *scratch_buffer_handles));
|
@@ -754,23 +745,27 @@ TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors(
|
754 | 745 | return kTfLiteOk;
|
755 | 746 | }
|
756 | 747 |
|
757 |
| -TfLiteStatus MicroAllocator::AllocateVariables(const SubGraph* subgraph, |
758 |
| - TfLiteEvalTensor* eval_tensors) { |
| 748 | +TfLiteStatus MicroAllocator::AllocateVariables( |
| 749 | + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, |
| 750 | + const int32_t* offline_planner_offsets) { |
759 | 751 | for (size_t i = 0; i < subgraph->tensors()->size(); ++i) {
|
760 | 752 | auto* tensor = subgraph->tensors()->Get(i);
|
761 | 753 | if (tensor->is_variable()) {
|
762 |
| - size_t buffer_size; |
763 |
| - TF_LITE_ENSURE_STATUS( |
764 |
| - TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); |
| 754 | + if (offline_planner_offsets == nullptr || |
| 755 | + offline_planner_offsets[i] == kOnlinePlannedBuffer) { |
| 756 | + size_t buffer_size; |
| 757 | + TF_LITE_ENSURE_STATUS( |
| 758 | + TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); |
765 | 759 |
|
766 |
| - eval_tensors[i].data.data = |
767 |
| - persistent_buffer_allocator_->AllocatePersistentBuffer( |
768 |
| - buffer_size, MicroArenaBufferAlignment()); |
| 760 | + eval_tensors[i].data.data = |
| 761 | + persistent_buffer_allocator_->AllocatePersistentBuffer( |
| 762 | + buffer_size, MicroArenaBufferAlignment()); |
769 | 763 |
|
770 |
| - if (eval_tensors[i].data.data == nullptr) { |
771 |
| - MicroPrintf("Failed to allocate variable tensor of size %d", |
772 |
| - buffer_size); |
773 |
| - return kTfLiteError; |
| 764 | + if (eval_tensors[i].data.data == nullptr) { |
| 765 | + MicroPrintf("Failed to allocate variable tensor of size %d", |
| 766 | + buffer_size); |
| 767 | + return kTfLiteError; |
| 768 | + } |
774 | 769 | }
|
775 | 770 | }
|
776 | 771 | }
|
@@ -819,6 +814,17 @@ TfLiteStatus MicroAllocator::CommitStaticMemoryPlan(
|
819 | 814 | const int32_t* offline_planner_offsets = nullptr;
|
820 | 815 | TF_LITE_ENSURE_STATUS(
|
821 | 816 | builder.GetOfflinePlannedOffsets(&offline_planner_offsets));
|
| 817 | + |
| 818 | + // We allocate buffers for variable tensors here since the offline planner |
| 819 | + // offsets are conviently available here. |
| 820 | + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); |
| 821 | + subgraph_idx++) { |
| 822 | + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); |
| 823 | + TFLITE_DCHECK(subgraph != nullptr); |
| 824 | + TF_LITE_ENSURE_STATUS(AllocateVariables( |
| 825 | + subgraph, allocations[subgraph_idx].tensors, offline_planner_offsets)); |
| 826 | + } |
| 827 | + |
822 | 828 | TF_LITE_ENSURE_STATUS(
|
823 | 829 | builder.InitializeAllocationInfo(offline_planner_offsets, allocations));
|
824 | 830 |
|
|
0 commit comments