|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#include <executorch/extension/flat_tensor/serialize/serialize.h> |
| 10 | +#include <executorch/extension/flat_tensor/serialize/schema_generated.h> |
| 11 | +#include <executorch/extension/flat_tensor/serialize/scalar_type_generated.h> |
| 12 | + |
| 13 | +#include <string> |
| 14 | +#include <fstream> |
| 15 | + |
| 16 | +namespace executorch { |
| 17 | +namespace extension { |
| 18 | +namespace flat_tensor { |
| 19 | + |
| 20 | +namespace { |
| 21 | + size_t padding_required(size_t offset, size_t alignment) { |
| 22 | + // Returns the padding required to align `offset` to `alignment`. |
| 23 | + size_t remainder = offset % alignment; |
| 24 | + if (remainder != 0) { |
| 25 | + return alignment - remainder; |
| 26 | + } |
| 27 | + return 0; |
| 28 | + } |
| 29 | + |
| 30 | + size_t aligned_size(size_t input_size, size_t alignment) { |
| 31 | + // Returns input_size padded up to the next whole multiple of alignment. |
| 32 | + return input_size + padding_required(input_size, alignment); |
| 33 | + } |
| 34 | + |
| 35 | + void serialize_nulls(std::ostream& out, size_t num_bytes) { |
| 36 | + for (size_t i = 0; i < num_bytes; i++) { |
| 37 | + out.write("\0", 1); |
| 38 | + } |
| 39 | + } |
| 40 | +} |
| 41 | + |
| 42 | +ET_EXPERIMENTAL runtime::Error save_ptd( |
| 43 | + const std::string& path, |
| 44 | + const std::map<std::string, exec_aten::Tensor>& tensor_map, |
| 45 | + const size_t tensor_alignment |
| 46 | +) { |
| 47 | + // Assert the system is little endian |
| 48 | + int n = 1; |
| 49 | + if(*(char *)&n != 1) { |
| 50 | + return runtime::Error::NotSupported; |
| 51 | + } |
| 52 | + // Create File |
| 53 | + std::ofstream file; |
| 54 | + file.open(path); |
| 55 | + runtime::Error e = save_ptd(file, tensor_map, tensor_alignment); |
| 56 | + file.close(); |
| 57 | + return e; |
| 58 | +} |
| 59 | + |
| 60 | +ET_EXPERIMENTAL runtime::Error save_ptd( |
| 61 | + std::ostream& out, |
| 62 | + const std::map<std::string, exec_aten::Tensor>& tensor_map, |
| 63 | + const size_t tensor_alignment |
| 64 | +) { |
| 65 | + // Assert the system is little endian |
| 66 | + int n = 1; |
| 67 | + if(*(char *)&n != 1) { |
| 68 | + return runtime::Error::NotSupported; |
| 69 | + } |
| 70 | + // Create flatbuffer |
| 71 | + flatbuffers::FlatBufferBuilder builder; |
| 72 | + |
| 73 | + std::vector<flatbuffers::Offset<::flat_tensor::TensorMetadata>> tensors; |
| 74 | + std::vector<flatbuffers::Offset<::flat_tensor::DataSegment>> buffers; |
| 75 | + |
| 76 | + // Serialize the tensors. |
| 77 | + size_t total_segment_size = 0; |
| 78 | + for (const auto& [name, tensor] : tensor_map) { |
| 79 | + auto name_offset = builder.CreateString(name); |
| 80 | + // Serialize the tensor metadata. |
| 81 | + auto tensor_metadata = ::flat_tensor::CreateTensorMetadata( |
| 82 | + builder, |
| 83 | + name_offset, |
| 84 | + static_cast<executorch_flatbuffer::ScalarType>(tensor.scalar_type()), |
| 85 | + builder.CreateVector(tensor.sizes().data(), tensor.sizes().size()), |
| 86 | + builder.CreateVector(tensor.dim_order().data(), tensor.dim_order().size()), |
| 87 | + 0, // segment index |
| 88 | + total_segment_size); |
| 89 | + tensors.push_back(tensor_metadata); // Precalculate the size of the data blob |
| 90 | + total_segment_size += aligned_size(tensor.nbytes(), tensor_alignment); |
| 91 | + } |
| 92 | + // Only have one segment |
| 93 | + buffers.push_back(::flat_tensor::CreateDataSegment(builder, 0, total_segment_size)); |
| 94 | + |
| 95 | + auto flat_tensor = CreateFlatTensor(builder, internal::VERSION, tensor_alignment, builder.CreateVector(tensors), builder.CreateVector(buffers)); |
| 96 | + builder.Finish(flat_tensor); // Our flatbuffer is created now. |
| 97 | + |
| 98 | + |
| 99 | + // Calculate flatbuffer padding. |
| 100 | + auto padded_flatbufer_size = aligned_size(builder.GetSize(), tensor_alignment); |
| 101 | + auto padded_header_size = aligned_size(internal::HEADER_EXPECTED_LENGTH, tensor_alignment); |
| 102 | + |
| 103 | + // Serialize header |
| 104 | + out.write(internal::HEADER_MAGIC, sizeof(internal::HEADER_MAGIC)); |
| 105 | + out.write(reinterpret_cast<const char *>(&internal::HEADER_EXPECTED_LENGTH), sizeof(internal::HEADER_EXPECTED_LENGTH)); |
| 106 | + |
| 107 | + internal::FlatTensorHeader header = { |
| 108 | + padded_header_size, // Offset to flatbuffer |
| 109 | + builder.GetSize(), // flatbuffer size |
| 110 | + padded_header_size + padded_flatbufer_size, // offset to segments |
| 111 | + total_segment_size // segment data size |
| 112 | + }; |
| 113 | + |
| 114 | + out.write(reinterpret_cast<const char *>(&header.flatbuffer_offset), sizeof(header.flatbuffer_offset)); |
| 115 | + out.write(reinterpret_cast<const char *>(&header.flatbuffer_size), sizeof(header.flatbuffer_size)); |
| 116 | + out.write(reinterpret_cast<const char *>(&header.segment_base_offset), sizeof(header.segment_base_offset)); |
| 117 | + out.write(reinterpret_cast<const char *>(&header.segment_data_size), sizeof(header.segment_data_size)); |
| 118 | + |
| 119 | + // serialize header padding |
| 120 | + serialize_nulls(out, padding_required(internal::HEADER_EXPECTED_LENGTH, tensor_alignment)); |
| 121 | + |
| 122 | + // serialize flatbuffer |
| 123 | + out.write(reinterpret_cast<const char *>(builder.GetBufferPointer()), builder.GetSize()); |
| 124 | + |
| 125 | + // serialize flatbuffer padding |
| 126 | + serialize_nulls(out, padding_required(builder.GetSize(), tensor_alignment)); |
| 127 | + |
| 128 | + // serialize segment: buffers + tensor padding |
| 129 | + for (const auto& [name, tensor] : tensor_map) { |
| 130 | + out.write(reinterpret_cast<const char *>(tensor.data_ptr()), tensor.nbytes()); |
| 131 | + serialize_nulls(out, padding_required(tensor.nbytes(), tensor_alignment)); |
| 132 | + } |
| 133 | + return runtime::Error::Ok; |
| 134 | +} |
| 135 | + |
| 136 | +} // namespace flat_tensor |
| 137 | +} // namespace extension |
| 138 | +} // namespace executorch |
0 commit comments