Skip to content

Commit 59b5e90

Browse files
larryliu0820facebook-github-bot
authored andcommitted
Enable selective build in OSS
Differential Revision: D76380110
1 parent c6c3616 commit 59b5e90

File tree

4 files changed

+320
-10
lines changed

4 files changed

+320
-10
lines changed

codegen/tools/gen_oplist.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,12 @@
2121
from ..parse import strip_et_fields
2222

2323

24+
from executorch.codegen.tools.selective_build import ( # type: ignore[import-not-found]
25+
_get_io_metadata_for_program_operators,
26+
_get_program_from_buffer,
27+
_get_program_operators,
28+
_IOMetaData,
29+
)
2430
from torchgen.gen import LineLoader, parse_native_yaml_struct
2531
from torchgen.selective_build.operator import SelectiveBuildOperator
2632
from torchgen.selective_build.selector import merge_et_kernel_metadata
@@ -86,10 +92,6 @@ class KernelType(IntEnum):
8692

8793

8894
def _get_operators(model_file: str) -> List[str]:
89-
from executorch.codegen.tools.selective_build import ( # type: ignore[import-not-found]
90-
_get_program_from_buffer,
91-
_get_program_operators,
92-
)
9395

9496
print("Processing model file: ", model_file)
9597
with open(model_file, "rb") as f:
@@ -103,12 +105,6 @@ def _get_operators(model_file: str) -> List[str]:
103105

104106
def _get_kernel_metadata_for_model(model_file: str) -> Dict[str, List[str]]:
105107

106-
from executorch.codegen.tools.selective_build import ( # type: ignore[import-not-found]
107-
_get_io_metadata_for_program_operators,
108-
_get_program_from_buffer,
109-
_IOMetaData,
110-
)
111-
112108
with open(model_file, "rb") as f:
113109
buf = f.read()
114110

codegen/tools/selective_build.cpp

Lines changed: 269 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,269 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <pybind11/pybind11.h>
10+
#include <pybind11/stl.h>
11+
12+
#include <executorch/runtime/platform/assert.h>
13+
#include <executorch/schema/program_generated.h>
14+
15+
namespace py = pybind11;
16+
17+
namespace torch {
18+
namespace executor {
19+
20+
namespace {
21+
22+
// Metadata for kernel call io variables.
23+
// dtype and dim_order will exist only if corresponding variable is Tensor.
24+
struct IOMetaData {
25+
int kernel_type;
26+
int dtype;
27+
std::vector<unsigned int> dim_order;
28+
29+
// Create tensor metadata. It records tensor's dtype and dim order.
30+
explicit IOMetaData(const executorch_flatbuffer::Tensor* t)
31+
: kernel_type(
32+
static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)),
33+
dtype(static_cast<int>(t->scalar_type())) {
34+
for (size_t i = 0; i < t->dim_order()->size(); i++) {
35+
dim_order.push_back(static_cast<unsigned int>(t->dim_order()->Get(i)));
36+
}
37+
}
38+
39+
// Create metadata for non-tensor variable.
40+
explicit IOMetaData(executorch_flatbuffer::KernelTypes type)
41+
: kernel_type(static_cast<int>(type)) {
42+
ET_CHECK(
43+
type != executorch_flatbuffer::KernelTypes::Tensor &&
44+
type != executorch_flatbuffer::KernelTypes::TensorList &&
45+
type != executorch_flatbuffer::KernelTypes::OptionalTensorList);
46+
}
47+
};
48+
49+
struct KernelIOMetaDataComparsion {
50+
bool operator()(
51+
const std::vector<IOMetaData>& lhs,
52+
const std::vector<IOMetaData>& rhs) const {
53+
if (lhs.size() != rhs.size()) {
54+
return lhs.size() < rhs.size();
55+
}
56+
for (size_t i = 0; i < lhs.size(); i++) {
57+
if (lhs[i].kernel_type != rhs[i].kernel_type) {
58+
return lhs[i].kernel_type < rhs[i].kernel_type;
59+
}
60+
if (lhs[i].kernel_type !=
61+
static_cast<int>(executorch_flatbuffer::KernelTypes::Tensor)) {
62+
continue;
63+
}
64+
if (lhs[i].dtype != rhs[i].dtype) {
65+
return lhs[i].dtype < rhs[i].dtype;
66+
}
67+
if (lhs[i].dim_order != rhs[i].dim_order) {
68+
return lhs[i].dim_order < rhs[i].dim_order;
69+
}
70+
}
71+
return false;
72+
}
73+
};
74+
75+
using KernelIOMetadata = std::vector<IOMetaData>;
76+
77+
using OpIOMetaData = std::set<KernelIOMetadata, KernelIOMetaDataComparsion>;
78+
79+
std::vector<std::string> get_operators_from_execution_plan(
80+
const executorch_flatbuffer::ExecutionPlan& plan) {
81+
std::vector<std::string> op_names;
82+
for (const executorch_flatbuffer::Operator* op : *plan.operators()) {
83+
if (op->overload()->str().empty()) {
84+
op_names.push_back(op->name()->str());
85+
} else {
86+
op_names.push_back(op->name()->str() + "." + op->overload()->str());
87+
}
88+
}
89+
return op_names;
90+
}
91+
92+
std::map<std::string, OpIOMetaData>
93+
get_kernel_tensor_metadatas_from_execution_plan(
94+
const executorch_flatbuffer::ExecutionPlan* plan) {
95+
std::map<std::string, OpIOMetaData> op_io_metadata;
96+
for (const executorch_flatbuffer::Chain* chain : *plan->chains()) {
97+
for (const executorch_flatbuffer::Instruction* inst :
98+
*chain->instructions()) {
99+
if (inst->instr_args_type() ==
100+
executorch_flatbuffer::InstructionArguments::KernelCall) {
101+
const executorch_flatbuffer::KernelCall* kernel_call =
102+
inst->instr_args_as_KernelCall();
103+
const executorch_flatbuffer::Operator* op =
104+
plan->operators()->Get(kernel_call->op_index());
105+
std::string op_overload_name = op->name()->str();
106+
if (op->overload()->size()) {
107+
op_overload_name += "." + op->overload()->str();
108+
}
109+
110+
// create an empty entry if current kernel is not in the map.
111+
if (op_io_metadata.count(op_overload_name) == 0) {
112+
op_io_metadata.insert(
113+
std::make_pair(op_overload_name, OpIOMetaData()));
114+
}
115+
116+
// go through IOs of this operator and collect tensor metadatas.
117+
KernelIOMetadata kernel_io_metadata;
118+
for (int arg_id : *kernel_call->args()) {
119+
const executorch_flatbuffer::EValue* arg =
120+
plan->values()->Get(arg_id);
121+
if (arg->val_type() == executorch_flatbuffer::KernelTypes::Tensor) {
122+
kernel_io_metadata.push_back(IOMetaData(arg->val_as_Tensor()));
123+
} else if (
124+
arg->val_type() ==
125+
executorch_flatbuffer::KernelTypes::TensorList) {
126+
if (arg->val_as_TensorList()->items()->size() == 0) {
127+
// treat empty tensor list as null type since we can not get
128+
// metadata from it.
129+
kernel_io_metadata.push_back(
130+
IOMetaData(executorch_flatbuffer::KernelTypes::Null));
131+
} else {
132+
// all eles in TensorList are tensor and share same tensor
133+
// metadata. use the metadata of first element as the metadata for
134+
// whole list.
135+
const executorch_flatbuffer::Tensor* tensor_arg =
136+
plan->values()
137+
->Get(arg->val_as_TensorList()->items()->Get(0))
138+
->val_as_Tensor();
139+
kernel_io_metadata.push_back(IOMetaData(tensor_arg));
140+
}
141+
} else if (
142+
arg->val_type() ==
143+
executorch_flatbuffer::KernelTypes::OptionalTensorList) {
144+
// all eles in OptionalTensorList are either tensor or null, and all
145+
// tensors share same metadata. Use the metadata of first tensor
146+
// element as the metadata for whole list. If no tensor exists (e.g.
147+
// each element is None), treat the whole list as a single null
148+
// element.
149+
const executorch_flatbuffer::OptionalTensorList* opt_tensor_list =
150+
arg->val_as_OptionalTensorList();
151+
152+
// Find one non-null tensor
153+
bool found_tensor_element = false;
154+
for (size_t i = 0; i < opt_tensor_list->items()->size(); i++) {
155+
// We now adopt both index == -1 and actually serialize a null
156+
// type EValue to represent a null data.
157+
if (opt_tensor_list->items()->Get(i) != -1 &&
158+
plan->values()
159+
->Get(opt_tensor_list->items()->Get(i))
160+
->val_type() ==
161+
executorch_flatbuffer::KernelTypes::Tensor) {
162+
const executorch_flatbuffer::Tensor* tensor_arg =
163+
plan->values()
164+
->Get(opt_tensor_list->items()->Get(i))
165+
->val_as_Tensor();
166+
kernel_io_metadata.push_back(IOMetaData(tensor_arg));
167+
found_tensor_element = true;
168+
break;
169+
}
170+
}
171+
if (!found_tensor_element) {
172+
kernel_io_metadata.push_back(
173+
IOMetaData(executorch_flatbuffer::KernelTypes::Null));
174+
}
175+
} else {
176+
kernel_io_metadata.push_back(IOMetaData(arg->val_type()));
177+
}
178+
}
179+
op_io_metadata[op_overload_name].insert(kernel_io_metadata);
180+
}
181+
}
182+
}
183+
return op_io_metadata;
184+
}
185+
} // namespace
186+
187+
const executorch_flatbuffer::Program* _get_program_from_buffer(
188+
const py::bytes& buffer) {
189+
return executorch_flatbuffer::GetProgram(
190+
buffer.cast<std::string_view>().data());
191+
}
192+
193+
py::list _get_program_operators(const executorch_flatbuffer::Program* program) {
194+
const auto& plans = *program->execution_plan();
195+
std::vector<std::string> op_names;
196+
for (const auto& plan : plans) {
197+
auto plan_ops = get_operators_from_execution_plan(*plan);
198+
if (!plan_ops.empty()) {
199+
op_names.insert(op_names.end(), plan_ops.begin(), plan_ops.end());
200+
}
201+
}
202+
return py::cast(op_names);
203+
}
204+
205+
// expose IO metadatas for all operators in given program
206+
py::dict _get_io_metadata_for_program_operators(
207+
const executorch_flatbuffer::Program* program) {
208+
const auto& plans = *program->execution_plan();
209+
std::map<std::string, OpIOMetaData> program_op_io_metadata;
210+
211+
// aggregrate op metadata from different execution plan.
212+
for (const executorch_flatbuffer::ExecutionPlan* plan : plans) {
213+
std::map<std::string, OpIOMetaData> plan_op_io_metadata =
214+
get_kernel_tensor_metadatas_from_execution_plan(plan);
215+
216+
for (const auto& op_io_metadata : plan_op_io_metadata) {
217+
std::string op_name = op_io_metadata.first;
218+
if (program_op_io_metadata.count(op_name) == 0) {
219+
program_op_io_metadata.insert(std::make_pair(op_name, OpIOMetaData()));
220+
}
221+
program_op_io_metadata[op_name].insert(
222+
plan_op_io_metadata[op_name].begin(),
223+
plan_op_io_metadata[op_name].end());
224+
}
225+
}
226+
227+
// convert program_op_io_metadata to py data structure.
228+
py::dict py_program_op_io_metadata;
229+
for (const auto& op_io_meta : program_op_io_metadata) {
230+
py::set py_op_io_meta;
231+
for (const auto& io_metas : op_io_meta.second) {
232+
py::list py_io_metadatas;
233+
for (const auto& io_metadata : io_metas) {
234+
py_io_metadatas.append(io_metadata);
235+
}
236+
py_op_io_meta.add(py::tuple(py_io_metadatas));
237+
}
238+
py_program_op_io_metadata[op_io_meta.first.data()] = py_op_io_meta;
239+
}
240+
241+
return py_program_op_io_metadata;
242+
}
243+
244+
PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
245+
py::class_<executorch_flatbuffer::Program>(m, "_Program");
246+
247+
m.def(
248+
"_get_program_from_buffer",
249+
&_get_program_from_buffer,
250+
py::return_value_policy::reference);
251+
252+
m.def(
253+
"_get_program_operators",
254+
&_get_program_operators,
255+
py::return_value_policy::copy);
256+
257+
m.def(
258+
"_get_io_metadata_for_program_operators",
259+
&_get_io_metadata_for_program_operators,
260+
py::return_value_policy::copy);
261+
262+
py::class_<IOMetaData>(m, "_IOMetaData")
263+
.def_readwrite("kernel_type", &IOMetaData::kernel_type)
264+
.def_readwrite("dtype", &IOMetaData::dtype)
265+
.def_readwrite("dim_order", &IOMetaData::dim_order);
266+
}
267+
268+
} // namespace executor
269+
} // namespace torch

codegen/tools/selective_build.pyi

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from typing import Any, Dict, List
8+
9+
class _Program: ...
10+
11+
class _IOMetaData:
12+
@property
13+
def kernel_type(self) -> int: ...
14+
@property
15+
def dtype(self) -> int: ...
16+
@property
17+
def dim_order(self) -> List[int]: ...
18+
19+
def _get_program_from_buffer(buffer: bytes) -> _Program: ...
20+
def _get_program_operators(program: _Program) -> List[str]: ...
21+
def _get_io_metadata_for_program_operators(
22+
program: _Program,
23+
) -> Dict[str, Any]: ...

codegen/tools/targets.bzl

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,28 @@ def define_common_targets(is_fbcode = False):
155155
_is_external_target = True,
156156
)
157157

158+
if not runtime.is_oss:
159+
runtime.cxx_python_extension(
160+
name = "selective_build",
161+
srcs = [
162+
"selective_build.cpp",
163+
],
164+
base_module = "executorch.codegen.tools",
165+
types = ["selective_build.pyi"],
166+
preprocessor_flags = [
167+
"-DEXECUTORCH_PYTHON_MODULE_NAME=selective_build",
168+
],
169+
deps = [
170+
"//executorch/runtime/core:core",
171+
"//executorch/schema:program",
172+
],
173+
external_deps = [
174+
"pybind11",
175+
],
176+
use_static_deps = True,
177+
visibility = ["//executorch/codegen/..."],
178+
)
179+
158180
# TODO(larryliu0820): This is a hack to only run these two on fbcode. These targets depends on exir which is only available in fbcode.
159181
if not runtime.is_oss and is_fbcode:
160182
runtime.python_binary(

0 commit comments

Comments
 (0)