Skip to content

Commit fc6046c

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
add cpp serializer for flat tensor (ptd) (#7841)
Summary: Leverage the flatbuffer builder apis to generate a .ptd on device. This will be used by training for checkpointing. No other use cases really exist for generating a .ptd on device right now, so I didnt worry about making this more easily extensible by coming up with a cpp equivalent of a cord or by trying to think ahead of how this might integrate with delegates. Later if we add support for delegates owning the weights under ET training we can revisit this. Differential Revision: D67992901
1 parent 1f1a96f commit fc6046c

File tree

7 files changed

+389
-1
lines changed

7 files changed

+389
-1
lines changed

extension/flat_tensor/serialize/flat_tensor_header.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,21 @@ struct FlatTensorHeader {
3838
// @lint-ignore CLANGTIDY facebook-hte-CArray
3939
static constexpr char kMagic[kMagicSize] = {'F', 'H', '0', '1'};
4040

41+
/// The expected length of the header, in bytes.
42+
static constexpr uint32_t kHeaderExpectedLength =
43+
// Header magic
44+
4
45+
// Header length
46+
+ 4
47+
// Flatbuffer offset
48+
+ 8
49+
// Flatbuffer data size
50+
+ 8
51+
// Segment base offset
52+
+ 8
53+
// Data size
54+
+ 8;
55+
4156
/**
4257
* Look for and parse a FlatTensorHeader in the provided data.
4358
*
Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/extension/flat_tensor/serialize/serialize.h>
10+
11+
#include <executorch/extension/flat_tensor/serialize/flat_tensor_header.h>
12+
#include <executorch/extension/flat_tensor/serialize/scalar_type_generated.h>
13+
#include <executorch/extension/flat_tensor/serialize/schema_generated.h>
14+
15+
#include <fstream>
16+
#include <string>
17+
18+
namespace executorch {
19+
namespace extension {
20+
namespace flat_tensor {
21+
22+
namespace {
23+
size_t padding_required(size_t offset, size_t alignment) {
24+
// Returns the padding required to align `offset` to `alignment`.
25+
size_t remainder = offset % alignment;
26+
if (remainder != 0) {
27+
return alignment - remainder;
28+
}
29+
return 0;
30+
}
31+
32+
size_t aligned_size(size_t input_size, size_t alignment) {
33+
// Returns input_size padded up to the next whole multiple of alignment.
34+
return input_size + padding_required(input_size, alignment);
35+
}
36+
37+
void write_nulls(std::ostream& out, size_t num_bytes) {
38+
for (size_t i = 0; i < num_bytes; i++) {
39+
out.write("\0", 1);
40+
}
41+
}
42+
} // namespace
43+
44+
runtime::Error save_ptd(
45+
const std::string& path,
46+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
47+
const size_t tensor_alignment) {
48+
// Create File
49+
std::ofstream file;
50+
file.open(path);
51+
runtime::Error e = save_ptd(file, tensor_map, tensor_alignment);
52+
file.close();
53+
return e;
54+
}
55+
56+
runtime::Error save_ptd(
57+
std::ostream& out,
58+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
59+
const size_t tensor_alignment) {
60+
// Assert the system is little endian. Since we are sending the data over
61+
// the wire, we need to ensure that the data is always in the same format.
62+
// for now we only support little endian.
63+
int n = 1;
64+
if (*(char*)&n != 1) {
65+
ET_LOG(Error, "Cannot save_ptd on big endian system");
66+
return runtime::Error::NotSupported;
67+
}
68+
// Create flatbuffer
69+
flatbuffers::FlatBufferBuilder builder;
70+
71+
std::vector<flatbuffers::Offset<::flat_tensor_flatbuffer::TensorMetadata>>
72+
tensors;
73+
std::vector<flatbuffers::Offset<::flat_tensor_flatbuffer::DataSegment>>
74+
buffers;
75+
76+
// Write the tensors.
77+
size_t total_segment_size = 0;
78+
size_t i = tensor_map.size();
79+
for (const auto& [name, tensor] : tensor_map) {
80+
auto name_offset = builder.CreateString(name);
81+
// Write the tensor metadata.
82+
auto tensor_metadata = ::flat_tensor_flatbuffer::CreateTensorMetadata(
83+
builder,
84+
name_offset,
85+
static_cast<executorch_flatbuffer::ScalarType>(tensor.scalar_type()),
86+
builder.CreateVector(tensor.sizes().data(), tensor.sizes().size()),
87+
builder.CreateVector(
88+
tensor.dim_order().data(), tensor.dim_order().size()),
89+
0, // segment index
90+
total_segment_size);
91+
92+
tensors.push_back(tensor_metadata);
93+
// Don't pad last entry.
94+
if (i != 1) {
95+
// Precalculate the size of the data blob.
96+
total_segment_size += aligned_size(tensor.nbytes(), tensor_alignment);
97+
} else {
98+
total_segment_size += tensor.nbytes();
99+
}
100+
i--;
101+
}
102+
// Only have one segment
103+
buffers.push_back(::flat_tensor_flatbuffer::CreateDataSegment(
104+
builder, 0, total_segment_size));
105+
106+
auto flat_tensor = CreateFlatTensor(
107+
builder,
108+
kSchemaVersion,
109+
tensor_alignment,
110+
builder.CreateVector(tensors),
111+
builder.CreateVector(buffers));
112+
builder.Finish(flat_tensor); // Our flatbuffer is created now.
113+
114+
// Calculate flatbuffer padding.
115+
auto padded_flatbufer_size =
116+
aligned_size(builder.GetSize(), tensor_alignment);
117+
auto padded_header_size =
118+
aligned_size(FlatTensorHeader::kHeaderExpectedLength, tensor_alignment);
119+
120+
// Write header
121+
out.write(FlatTensorHeader::kMagic, sizeof(FlatTensorHeader::kMagic));
122+
out.write(
123+
reinterpret_cast<const char*>(&FlatTensorHeader::kHeaderExpectedLength),
124+
sizeof(FlatTensorHeader::kHeaderExpectedLength));
125+
126+
FlatTensorHeader header = {
127+
padded_header_size, // Offset to flatbuffer
128+
builder.GetSize(), // flatbuffer size
129+
padded_header_size + padded_flatbufer_size, // offset to segments
130+
total_segment_size // segment data size
131+
};
132+
133+
out.write(
134+
reinterpret_cast<const char*>(&header.flatbuffer_offset),
135+
sizeof(header.flatbuffer_offset));
136+
out.write(
137+
reinterpret_cast<const char*>(&header.flatbuffer_size),
138+
sizeof(header.flatbuffer_size));
139+
out.write(
140+
reinterpret_cast<const char*>(&header.segment_base_offset),
141+
sizeof(header.segment_base_offset));
142+
out.write(
143+
reinterpret_cast<const char*>(&header.segment_data_size),
144+
sizeof(header.segment_data_size));
145+
146+
// Write header padding
147+
write_nulls(
148+
out,
149+
padding_required(
150+
FlatTensorHeader::kHeaderExpectedLength, tensor_alignment));
151+
152+
// Write flatbuffer
153+
out.write(
154+
reinterpret_cast<const char*>(builder.GetBufferPointer()),
155+
builder.GetSize());
156+
157+
// Write flatbuffer padding
158+
write_nulls(out, padding_required(builder.GetSize(), tensor_alignment));
159+
160+
// Write segment: buffers + tensor padding
161+
i = tensor_map.size();
162+
for (const auto& [name, tensor] : tensor_map) {
163+
out.write(
164+
reinterpret_cast<const char*>(tensor.data_ptr()), tensor.nbytes());
165+
// Don't pad last entry.
166+
if (i != 1) {
167+
write_nulls(out, padding_required(tensor.nbytes(), tensor_alignment));
168+
}
169+
i--;
170+
}
171+
return runtime::Error::Ok;
172+
}
173+
174+
} // namespace flat_tensor
175+
} // namespace extension
176+
} // namespace executorch
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#pragma once
10+
11+
#include <executorch/runtime/core/exec_aten/exec_aten.h>
12+
13+
#include <map>
14+
#include <string>
15+
16+
namespace executorch {
17+
namespace extension {
18+
namespace flat_tensor {
19+
20+
/**
21+
* Schema version of the .ptd format. Should be kept in sync with serialize.py
22+
*/
23+
constexpr uint32_t kSchemaVersion = 0;
24+
25+
/**
26+
* Creates a .ptd from the given tensor map.
27+
*
28+
* @param path The file path to save the .ptd to.
29+
* @param tensor_map The map of tensor names to tensors to save.
30+
* @param tensor_alignment The bytes tensor data should be aligned to.
31+
* @return An error if the data could not be saved. Error::Ok for success.
32+
*/
33+
ET_EXPERIMENTAL runtime::Error save_ptd(
34+
const std::string& path,
35+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
36+
const size_t tensor_alignment);
37+
38+
/**
39+
* Creates a .ptd from the given tensor map.
40+
*
41+
* @param out The stream to write the .ptd data to.
42+
* @param tensor_map The map of tensor names to tensors to save.
43+
* @param tensor_alignment The bytes tensor data should be aligned to.
44+
* @return An error if the data could not be saved. Error::Ok for success.
45+
*/
46+
ET_EXPERIMENTAL runtime::Error save_ptd(
47+
std::ostream& out,
48+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
49+
const size_t tensor_alignment);
50+
51+
} // namespace flat_tensor
52+
} // namespace extension
53+
} // namespace executorch

extension/flat_tensor/serialize/serialize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ def serialize(
266266
# Create FlatTensor, which describes of the contents of the file and
267267
# points to all the data segments. It will be serialized to flatbuffer.
268268
flat_tensor = FlatTensor(
269-
version=0,
269+
version=0, # Keep in sync with c++ version number in serialize.h
270270
tensor_alignment=self.config.tensor_alignment,
271271
tensors=flat_tensor_metadata,
272272
segments=[DataSegment(offset=0, size=len(flat_tensor_data))],

extension/flat_tensor/serialize/targets.bzl

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,3 +42,18 @@ def define_common_targets():
4242
visibility = ["//executorch/..."],
4343
exported_deps = ["//executorch/runtime/core:core"],
4444
)
45+
46+
runtime.cxx_library(
47+
name = "serialize_cpp",
48+
srcs = ["serialize.cpp"],
49+
deps = [
50+
":flat_tensor_header",
51+
":generated_headers",
52+
"//executorch/runtime/core/exec_aten:lib",
53+
],
54+
exported_headers = ["serialize.h"],
55+
visibility = [
56+
"//executorch/...",
57+
],
58+
exported_external_deps = ["flatbuffers-api"],
59+
)

extension/flat_tensor/test/targets.bzl

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,16 @@ def define_common_targets():
1616
"//executorch/extension/flat_tensor/serialize:flat_tensor_header",
1717
],
1818
)
19+
20+
runtime.cxx_test(
21+
name = "serialize_cpp_test",
22+
srcs = [
23+
"test_serialize.cpp",
24+
],
25+
deps = [
26+
"//executorch/extension/flat_tensor/serialize:serialize_cpp",
27+
"//executorch/extension/flat_tensor/serialize:generated_headers",
28+
"//executorch/extension/flat_tensor/serialize:flat_tensor_header",
29+
"//executorch/extension/tensor:tensor",
30+
],
31+
)

0 commit comments

Comments
 (0)