Skip to content

Commit a2012d9

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
add cpp serializer for flat tensor (ptd) (#7841)
Summary: Leverage the flatbuffer builder apis to generate a .ptd on device. This will be used by training for checkpointing. No other use cases really exist for generating a .ptd on device right now, so I didnt worry about making this more easily extensible by coming up with a cpp equivalent of a cord or by trying to think ahead of how this might integrate with delegates. Later if we add support for delegates owning the weights under ET training we can revisit this. Differential Revision: D67992901
1 parent 5ee5f2f commit a2012d9

File tree

6 files changed

+369
-0
lines changed

6 files changed

+369
-0
lines changed

extension/flat_tensor/serialize/flat_tensor_header.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,21 @@ struct FlatTensorHeader {
3838
// @lint-ignore CLANGTIDY facebook-hte-CArray
3939
static constexpr char kMagic[kMagicSize] = {'F', 'H', '0', '1'};
4040

41+
/// The expected length of the header, in bytes.
42+
static constexpr uint32_t kHeaderExpectedLength =
43+
// Header magic
44+
4
45+
// Header length
46+
+ 4
47+
// Flatbuffer offset
48+
+ 8
49+
// Flatbuffer data size
50+
+ 8
51+
// Segment base offset
52+
+ 8
53+
// Data size
54+
+ 8;
55+
4156
/**
4257
* Look for and parse a FlatTensorHeader in the provided data.
4358
*
Lines changed: 160 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,160 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/extension/flat_tensor/serialize/flat_tensor_header.h>
10+
#include <executorch/extension/flat_tensor/serialize/scalar_type_generated.h>
11+
#include <executorch/extension/flat_tensor/serialize/schema_generated.h>
12+
#include <executorch/extension/flat_tensor/serialize/serialize.h>
13+
14+
#include <fstream>
15+
#include <string>
16+
17+
namespace executorch {
18+
namespace extension {
19+
namespace flat_tensor {
20+
21+
namespace {
22+
size_t padding_required(size_t offset, size_t alignment) {
23+
// Returns the padding required to align `offset` to `alignment`.
24+
size_t remainder = offset % alignment;
25+
if (remainder != 0) {
26+
return alignment - remainder;
27+
}
28+
return 0;
29+
}
30+
31+
size_t aligned_size(size_t input_size, size_t alignment) {
32+
// Returns input_size padded up to the next whole multiple of alignment.
33+
return input_size + padding_required(input_size, alignment);
34+
}
35+
36+
void write_nulls(std::ostream& out, size_t num_bytes) {
37+
for (size_t i = 0; i < num_bytes; i++) {
38+
out.write("\0", 1);
39+
}
40+
}
41+
} // namespace
42+
43+
runtime::Error save_ptd(
44+
const std::string& path,
45+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
46+
const size_t tensor_alignment) {
47+
// Create File
48+
std::ofstream file;
49+
file.open(path);
50+
runtime::Error e = save_ptd(file, tensor_map, tensor_alignment);
51+
file.close();
52+
return e;
53+
}
54+
55+
runtime::Error save_ptd(
56+
std::ostream& out,
57+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
58+
const size_t tensor_alignment) {
59+
// Assert the system is little endian. Since we are sending the data over
60+
// the wire, we need to ensure that the data is always in the same format.
61+
// for now we only support little endian.
62+
int n = 1;
63+
if (*(char*)&n != 1) {
64+
ET_LOG(Error, "Cannot save_ptd on big endian system");
65+
return runtime::Error::NotSupported;
66+
}
67+
// Create flatbuffer
68+
flatbuffers::FlatBufferBuilder builder;
69+
70+
std::vector<flatbuffers::Offset<::flat_tensor::TensorMetadata>> tensors;
71+
std::vector<flatbuffers::Offset<::flat_tensor::DataSegment>> buffers;
72+
73+
// Write the tensors.
74+
size_t total_segment_size = 0;
75+
for (const auto& [name, tensor] : tensor_map) {
76+
auto name_offset = builder.CreateString(name);
77+
// Write the tensor metadata.
78+
auto tensor_metadata = ::flat_tensor::CreateTensorMetadata(
79+
builder,
80+
name_offset,
81+
static_cast<executorch_flatbuffer::ScalarType>(tensor.scalar_type()),
82+
builder.CreateVector(tensor.sizes().data(), tensor.sizes().size()),
83+
builder.CreateVector(
84+
tensor.dim_order().data(), tensor.dim_order().size()),
85+
0, // segment index
86+
total_segment_size);
87+
tensors.push_back(
88+
tensor_metadata); // Precalculate the size of the data blob
89+
total_segment_size += aligned_size(tensor.nbytes(), tensor_alignment);
90+
}
91+
// Only have one segment
92+
buffers.push_back(
93+
::flat_tensor::CreateDataSegment(builder, 0, total_segment_size));
94+
95+
auto flat_tensor = CreateFlatTensor(
96+
builder,
97+
kSchemaVersion,
98+
tensor_alignment,
99+
builder.CreateVector(tensors),
100+
builder.CreateVector(buffers));
101+
builder.Finish(flat_tensor); // Our flatbuffer is created now.
102+
103+
// Calculate flatbuffer padding.
104+
auto padded_flatbufer_size =
105+
aligned_size(builder.GetSize(), tensor_alignment);
106+
auto padded_header_size =
107+
aligned_size(FlatTensorHeader::kHeaderExpectedLength, tensor_alignment);
108+
109+
// Write header
110+
out.write(FlatTensorHeader::kMagic, sizeof(FlatTensorHeader::kMagic));
111+
out.write(
112+
reinterpret_cast<const char*>(&FlatTensorHeader::kHeaderExpectedLength),
113+
sizeof(FlatTensorHeader::kHeaderExpectedLength));
114+
115+
FlatTensorHeader header = {
116+
padded_header_size, // Offset to flatbuffer
117+
builder.GetSize(), // flatbuffer size
118+
padded_header_size + padded_flatbufer_size, // offset to segments
119+
total_segment_size // segment data size
120+
};
121+
122+
out.write(
123+
reinterpret_cast<const char*>(&header.flatbuffer_offset),
124+
sizeof(header.flatbuffer_offset));
125+
out.write(
126+
reinterpret_cast<const char*>(&header.flatbuffer_size),
127+
sizeof(header.flatbuffer_size));
128+
out.write(
129+
reinterpret_cast<const char*>(&header.segment_base_offset),
130+
sizeof(header.segment_base_offset));
131+
out.write(
132+
reinterpret_cast<const char*>(&header.segment_data_size),
133+
sizeof(header.segment_data_size));
134+
135+
// Write header padding
136+
write_nulls(
137+
out,
138+
padding_required(
139+
FlatTensorHeader::kHeaderExpectedLength, tensor_alignment));
140+
141+
// Write flatbuffer
142+
out.write(
143+
reinterpret_cast<const char*>(builder.GetBufferPointer()),
144+
builder.GetSize());
145+
146+
// Write flatbuffer padding
147+
write_nulls(out, padding_required(builder.GetSize(), tensor_alignment));
148+
149+
// Write segment: buffers + tensor padding
150+
for (const auto& [name, tensor] : tensor_map) {
151+
out.write(
152+
reinterpret_cast<const char*>(tensor.data_ptr()), tensor.nbytes());
153+
write_nulls(out, padding_required(tensor.nbytes(), tensor_alignment));
154+
}
155+
return runtime::Error::Ok;
156+
}
157+
158+
} // namespace flat_tensor
159+
} // namespace extension
160+
} // namespace executorch
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#pragma once
10+
11+
#include <executorch/runtime/core/exec_aten/exec_aten.h>
12+
13+
#include <map>
14+
#include <string>
15+
16+
namespace executorch {
17+
namespace extension {
18+
namespace flat_tensor {
19+
20+
/// Schema version of the .ptd format. Should be kept in sync with Python.
21+
constexpr uint32_t kSchemaVersion = 0;
22+
23+
/**
24+
* Creates a .ptd from the given tensor map.
25+
*
26+
* @param path The file path to save the .ptd to.
27+
* @param tensor_map The map of tensor names to tensors to save.
28+
* @param tensor_alignment The bytes tensor data should be aligned to.
29+
* @return An error if the data could not be saved. Error::Ok for success.
30+
*/
31+
ET_EXPERIMENTAL runtime::Error save_ptd(
32+
const std::string& path,
33+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
34+
const size_t tensor_alignment);
35+
36+
/**
37+
* Creates a .ptd from the given tensor map.
38+
*
39+
* @param out The stream to write the .ptd data to.
40+
* @param tensor_map The map of tensor names to tensors to save.
41+
* @param tensor_alignment The bytes tensor data should be aligned to.
42+
* @return An error if the data could not be saved. Error::Ok for success.
43+
*/
44+
ET_EXPERIMENTAL runtime::Error save_ptd(
45+
std::ostream& out,
46+
const std::map<std::string, exec_aten::Tensor>& tensor_map,
47+
const size_t tensor_alignment);
48+
49+
} // namespace flat_tensor
50+
} // namespace extension
51+
} // namespace executorch

extension/flat_tensor/serialize/targets.bzl

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,3 +42,18 @@ def define_common_targets():
4242
visibility = ["//executorch/..."],
4343
exported_deps = ["//executorch/runtime/core:core"],
4444
)
45+
46+
runtime.cxx_library(
47+
name = "serialize_cpp",
48+
srcs = ["serialize.cpp"],
49+
deps = [
50+
":flat_tensor_header",
51+
":generated_headers",
52+
"//executorch/runtime/core/exec_aten:lib",
53+
],
54+
exported_headers = ["serialize.h"],
55+
visibility = [
56+
"//executorch/...",
57+
],
58+
exported_external_deps = ["flatbuffers-api"],
59+
)

extension/flat_tensor/test/targets.bzl

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,16 @@ def define_common_targets():
1616
"//executorch/extension/flat_tensor/serialize:flat_tensor_header",
1717
],
1818
)
19+
20+
runtime.cxx_test(
21+
name = "serialize_cpp_test",
22+
srcs = [
23+
"test_serialize.cpp",
24+
],
25+
deps = [
26+
"//executorch/extension/flat_tensor/serialize:serialize_cpp",
27+
"//executorch/extension/flat_tensor/serialize:generated_headers",
28+
"//executorch/extension/flat_tensor/serialize:flat_tensor_header",
29+
"//executorch/extension/tensor:tensor",
30+
],
31+
)
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/extension/flat_tensor/serialize/flat_tensor_header.h>
10+
#include <executorch/extension/flat_tensor/serialize/scalar_type_generated.h>
11+
#include <executorch/extension/flat_tensor/serialize/schema_generated.h>
12+
#include <executorch/extension/flat_tensor/serialize/serialize.h>
13+
14+
#include <gtest/gtest.h>
15+
16+
#include <executorch/extension/tensor/tensor_ptr.h>
17+
#include <executorch/runtime/core/result.h>
18+
#include <executorch/runtime/platform/runtime.h>
19+
#include <map>
20+
21+
#include <sstream>
22+
23+
using namespace ::testing;
24+
using executorch::runtime::Error;
25+
using executorch::runtime::Result;
26+
27+
class FlatTensorSerializeTest : public ::testing::Test {
28+
protected:
29+
void SetUp() override {
30+
// Since these tests cause ET_LOG to be called, the PAL must be initialized
31+
// first.
32+
executorch::runtime::runtime_init();
33+
}
34+
};
35+
36+
TEST_F(FlatTensorSerializeTest, ValidFlatTensorSerialized) {
37+
const size_t kTensorAlignment = 16;
38+
std::map<std::string, exec_aten::Tensor> flat_tensor_map;
39+
40+
float linear_weight = 3.14f;
41+
auto weight = executorch::extension::make_tensor_ptr({1}, &linear_weight);
42+
43+
float linear_bias = 2.0f;
44+
auto bias = executorch::extension::make_tensor_ptr({1}, &linear_bias);
45+
46+
flat_tensor_map.insert({"linear.weight", *weight.get()});
47+
flat_tensor_map.insert({"linear.bias", *bias.get()});
48+
49+
std::ostringstream buf;
50+
auto err = executorch::extension::flat_tensor::save_ptd(
51+
buf, flat_tensor_map, kTensorAlignment);
52+
ASSERT_EQ(err, Error::Ok);
53+
auto x = buf.str();
54+
const char* byte_buffer = x.c_str();
55+
56+
// Check Magic
57+
ASSERT_EQ(byte_buffer[0], 'F');
58+
ASSERT_EQ(byte_buffer[1], 'H');
59+
ASSERT_EQ(byte_buffer[2], '0');
60+
ASSERT_EQ(byte_buffer[3], '1');
61+
62+
// Check Header
63+
ASSERT_EQ(
64+
*(uint32_t*)(byte_buffer + 4),
65+
executorch::extension::FlatTensorHeader::kHeaderExpectedLength);
66+
ASSERT_EQ(
67+
*(uint64_t*)(byte_buffer + 8),
68+
48); // Header is 40 bytes + 8 bytes of padding today, and then the
69+
// flatbuffer starts.
70+
ASSERT_EQ(
71+
*(uint64_t*)(byte_buffer + 16),
72+
224); // This is fragile, and depends on the schema, the builder, and the
73+
// padding needed.
74+
const uint64_t segment_offset = 48 +
75+
224; // Segment offset, depends on the padded header and flatbuffer sizes.
76+
ASSERT_EQ(*(uint64_t*)(byte_buffer + 24), segment_offset);
77+
ASSERT_EQ(
78+
*(uint64_t*)(byte_buffer + 32),
79+
32); // Segment total size, 8 bytes of data (2 floats), 24 bytes of
80+
// padding.
81+
82+
// Check Flatbuffer
83+
auto flat_tensor = ::flat_tensor::GetFlatTensor(byte_buffer + 48);
84+
85+
ASSERT_EQ(
86+
flat_tensor->version(),
87+
executorch::extension::flat_tensor::kSchemaVersion);
88+
ASSERT_EQ(flat_tensor->tensor_alignment(), 16);
89+
ASSERT_EQ(flat_tensor->tensors()->size(), 2);
90+
ASSERT_EQ(flat_tensor->segments()->size(), 1);
91+
92+
auto tensor0 = flat_tensor->tensors()->Get(0);
93+
ASSERT_EQ(strcmp(tensor0->fully_qualified_name()->c_str(), "linear.bias"), 0);
94+
ASSERT_EQ(tensor0->scalar_type(), executorch_flatbuffer::ScalarType::FLOAT);
95+
ASSERT_EQ(tensor0->sizes()->size(), 1);
96+
ASSERT_EQ(tensor0->segment_index(), 0);
97+
ASSERT_EQ(tensor0->offset(), 0);
98+
99+
auto tensor1 = flat_tensor->tensors()->Get(1);
100+
ASSERT_EQ(
101+
strcmp(tensor1->fully_qualified_name()->c_str(), "linear.weight"), 0);
102+
ASSERT_EQ(tensor1->scalar_type(), executorch_flatbuffer::ScalarType::FLOAT);
103+
ASSERT_EQ(tensor1->sizes()->size(), 1);
104+
ASSERT_EQ(tensor1->segment_index(), 0);
105+
ASSERT_EQ(tensor1->offset(), 16);
106+
107+
// Test Segments
108+
auto segment = flat_tensor->segments()->Get(0);
109+
110+
ASSERT_EQ(segment->offset(), 0);
111+
ASSERT_EQ(segment->size(), 32);
112+
uint8_t* data = (uint8_t*)(byte_buffer + segment_offset);
113+
ASSERT_EQ(*(float*)(data + 0), linear_bias);
114+
ASSERT_EQ(*(float*)(data + 16), linear_weight);
115+
}

0 commit comments

Comments
 (0)