Skip to content

Commit 883346c

Browse files
committed
Add skeleton code
1 parent ec35f56 commit 883346c

File tree

11 files changed

+772
-1
lines changed

11 files changed

+772
-1
lines changed

CMakeLists.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -603,6 +603,12 @@ endif()
603603

604604
if(EXECUTORCH_BUILD_CORTEX_M)
605605
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/cortex_m)
606+
list(APPEND _executorch_backends coretex_m_backend)
607+
endif()
608+
609+
if(EXECUTORCH_BUILD_AOTI)
610+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/aoti)
611+
list(APPEND _executorch_backends aoti_backend)
606612
endif()
607613

608614
if(EXECUTORCH_BUILD_DEVTOOLS)

backends/aoti/CMakeLists.txt

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
#
7+
# Build AOTI backend for runtime.
8+
#
9+
# ### Editing this file ###
10+
#
11+
# This file should be formatted with
12+
# ~~~
13+
# cmake-format -i CMakeLists.txt
14+
# ~~~
15+
# It should also be cmake-lint clean.
16+
#
17+
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
18+
19+
# Source root directory for executorch.
20+
if(NOT EXECUTORCH_ROOT)
21+
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../..)
22+
endif()
23+
24+
include(${EXECUTORCH_ROOT}/build/Utils.cmake)
25+
26+
find_package(CUDA)
27+
28+
set(_common_include_directories ${EXECUTORCH_ROOT}/..)
29+
30+
set(_aoti_sources runtime/AotiBackend.cpp)
31+
32+
add_library(aoti_backend STATIC ${_aoti_sources})
33+
target_include_directories(
34+
aoti_backend PUBLIC ${_common_include_directories} ${CUDA_INCLUDE_DIRS}
35+
)
36+
37+
target_compile_options(aoti_backend PUBLIC -fexceptions -frtti -fPIC)
38+
target_link_libraries(aoti_backend PUBLIC extension_tensor ${CUDA_LIBRARIES})
39+
executorch_target_link_options_shared_lib(aoti_backend)
40+
41+
install(
42+
TARGETS aoti_backend
43+
EXPORT ExecuTorchTargets
44+
DESTINATION lib
45+
INCLUDES
46+
DESTINATION ${_common_include_directories}
47+
)

backends/aoti/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
## Experimental AOTI backend
2+
Proceed with caution. This is an experimental backend that is not yet ready for production use.

backends/aoti/aoti_backend.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import copy
8+
9+
from subprocess import check_call
10+
from typing import final, List
11+
12+
import torch
13+
from executorch.exir.backend.backend_details import (
14+
BackendDetails,
15+
ExportedProgram,
16+
PreprocessResult,
17+
)
18+
from executorch.exir.backend.compile_spec_schema import CompileSpec
19+
20+
21+
@final
22+
class AotiBackend(BackendDetails):
23+
@staticmethod
24+
def preprocess(
25+
edge_program: ExportedProgram,
26+
compile_specs: List[CompileSpec],
27+
) -> PreprocessResult:
28+
print("entering the lowerable parts in AotiBackend.preprocess....")
29+
30+
print("here", edge_program.example_inputs)
31+
copy_edge_program = copy.deepcopy(edge_program)
32+
graph_module = copy_edge_program.graph_module
33+
args, kwargs = copy_edge_program.example_inputs
34+
so_path = torch._inductor.aot_compile(graph_module, args, kwargs, options={}) # type: ignore[arg-type]
35+
print(so_path)
36+
check_call(
37+
f"patchelf --remove-needed libtorch.so --remove-needed libtorch_cuda.so --remove-needed libc10_cuda.so --remove-needed libtorch_cpu.so --add-needed libcudart.so {so_path}",
38+
shell=True,
39+
)
40+
41+
with open(so_path, "rb") as f:
42+
data = f.read()
43+
return PreprocessResult(data)

backends/aoti/aoti_partitioner.py

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
# pyre-unsafe
8+
9+
from typing import cast, final, List
10+
11+
import torch
12+
from executorch.backends.aoti.aoti_backend import AotiBackend # usort: skip
13+
from executorch.exir.backend.compile_spec_schema import CompileSpec
14+
from executorch.exir.backend.partitioner import (
15+
DelegationSpec,
16+
Partitioner,
17+
PartitionResult,
18+
)
19+
from executorch.exir.backend.utils import tag_constant_data
20+
from executorch.exir.dialects._ops import ops as exir_ops
21+
from torch.export.exported_program import ExportedProgram
22+
from torch.fx.passes.infra.partitioner import CapabilityBasedPartitioner
23+
24+
from torch.fx.passes.operator_support import OperatorSupportBase
25+
26+
27+
class AOTISupportedOperators(OperatorSupportBase):
28+
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
29+
supported = node.op == "call_function" and node.target in [
30+
exir_ops.edge.aten.add.Tensor,
31+
exir_ops.edge.aten._to_copy.default,
32+
]
33+
34+
return supported
35+
36+
def is_node_supported_custom(self, node: torch.fx.Node) -> bool:
37+
if node.target == exir_ops.edge.aten.mean.dim:
38+
keep_dim = node.args[2] if len(node.args) > 2 else False
39+
return cast(bool, keep_dim)
40+
if node.target == exir_ops.edge.aten.var.correction:
41+
keep_dim = node.kwargs.get("keepdim", False)
42+
return cast(bool, keep_dim)
43+
return True
44+
45+
46+
@final
47+
class AotiPartitioner(Partitioner):
48+
def __init__(self, compile_spec: List[CompileSpec]) -> None:
49+
self.delegation_spec = DelegationSpec(AotiBackend.__name__, compile_spec)
50+
print(self.delegation_spec)
51+
52+
def partition(self, exported_program: ExportedProgram) -> PartitionResult:
53+
# Run the CapabilityBasedPartitioner to return the largest possible
54+
# subgraphs containing the nodes with the tags
55+
# logger.info("AotiPartitioner::partition")
56+
partition_tags = {}
57+
58+
capability_partitioner = CapabilityBasedPartitioner(
59+
exported_program.graph_module,
60+
AOTISupportedOperators(),
61+
allows_single_node_partition=True,
62+
)
63+
partition_list = capability_partitioner.propose_partitions()
64+
for partition in partition_list:
65+
for node in partition.nodes:
66+
tag = f"tag{partition.id}"
67+
node.meta["delegation_tag"] = tag
68+
partition_tags[tag] = self.delegation_spec
69+
70+
tag_constant_data(exported_program)
71+
72+
return PartitionResult(
73+
tagged_exported_program=exported_program, partition_tags=partition_tags
74+
)

0 commit comments

Comments
 (0)