Skip to content

Commit 9ab662c

Browse files
committed
Initial skeleton codes for Exynos Backend
Signed-off-by: jiseong.oh <[email protected]>
1 parent 53555b3 commit 9ab662c

File tree

4 files changed

+262
-0
lines changed

4 files changed

+262
-0
lines changed

backends/samsung/enn_preprocess.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import logging
8+
from typing import Dict, final, List
9+
10+
import torch
11+
from executorch.exir.backend.backend_details import (
12+
BackendDetails,
13+
CompileSpec,
14+
PreprocessResult,
15+
)
16+
17+
from executorch.exir.passes import PassManager
18+
19+
from torch.export.exported_program import ExportedProgram
20+
21+
22+
@final
23+
class EnnBackend(BackendDetails):
24+
@staticmethod
25+
def preprocess(
26+
edge_program: ExportedProgram,
27+
compile_specs: List[CompileSpec],
28+
) -> PreprocessResult:
29+
# 1 Converter Init
30+
enn_preprocess_passes = PassManager(passes=[])
31+
32+
# 2 make enn graph
33+
enn_preprocess_passes = PassManager(passes=[])
34+
pass_result = enn_preprocess_passes(edge_program.graph_module)
35+
# 3 node visitors
36+
node_visitors = []
37+
38+
vals_to_ids: Dict[torch.fx.Node, int] = {}
39+
for node in pass_result.graph_module.graph.nodes:
40+
if node.op == "call_function":
41+
logging.warning(f"Visiting: {node}, {node.target.__name__}")
42+
if node.target.__name__ in node_visitors:
43+
pass
44+
else:
45+
raise RuntimeError(
46+
f"{node.target.__name__}" " is not supported in ENN Delegate"
47+
)
48+
elif node.op in [
49+
"get_attr",
50+
"placeholder",
51+
"output",
52+
]:
53+
continue
54+
else:
55+
raise RuntimeError(f"{node.op}" " is not supported in ENN Delegate")
56+
57+
# 4 Compile Graph
58+
return PreprocessResult(processed_bytes=0, debug_handle_map={})
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import logging
8+
from typing import Any, Callable, Dict, List, Optional, Tuple
9+
10+
import torch
11+
from executorch.backends.samsung.enn_preprocess import EnnBackend
12+
13+
from executorch.backends.samsung.utils.utils import get_compile_spec
14+
from executorch.exir.backend.backend_details import CompileSpec
15+
from executorch.exir.backend.canonical_partitioners.pattern_op_partitioner import (
16+
generate_partitions_from_list_of_nodes,
17+
)
18+
from executorch.exir.backend.partitioner import (
19+
DelegationSpec,
20+
Partitioner,
21+
PartitionResult,
22+
)
23+
from executorch.exir.backend.utils import tag_constant_data
24+
from torch.fx.passes.infra.partitioner import Partition
25+
from torch.fx.passes.operator_support import OperatorSupportBase
26+
27+
28+
class EnnOperatorSupport(OperatorSupportBase):
29+
30+
def __init__(
31+
self,
32+
edge_program: torch.export.ExportedProgram,
33+
compile_specs: List[CompileSpec],
34+
):
35+
self.edge_program = edge_program
36+
option_spec = get_compile_spec(compile_specs, "Exynos compile", required=True)
37+
38+
def is_node_supported(self, _, node: torch.fx.Node) -> bool:
39+
if node.op != "call_function":
40+
return False
41+
42+
if node.op in [
43+
"get_attr",
44+
"placeholder",
45+
"output",
46+
]:
47+
return False
48+
49+
return False
50+
51+
def __del__(self):
52+
pass
53+
54+
55+
class EnnPartitioner(Partitioner):
56+
def __init__(self, compile_specs: List[CompileSpec]):
57+
# TODO(anyone): Add meaningful initialize
58+
self.delegation_spec = DelegationSpec(EnnBackend.__name__, compile_specs)
59+
self.partition_tags: Dict[str, DelegationSpec] = {}
60+
self.compile_specs = compile_specs
61+
62+
def generate_partitions(
63+
self, edge_program: torch.export.ExportedProgram
64+
) -> List[Any]:
65+
66+
return generate_partitions_from_list_of_nodes(
67+
edge_program.graph_module,
68+
op_support=self.op_support_checker,
69+
)
70+
71+
def tag_nodes(self, partitions: List[Partition]) -> None:
72+
partition_tags: Dict[str, DelegationSpec] = {}
73+
for partition in partitions:
74+
# Add delegation tags
75+
for node in partition.nodes:
76+
delegation_tag = f"enn_{partition.id}"
77+
node.meta["delegation_tag"] = delegation_tag
78+
partition_tags[delegation_tag] = self.delegation_spec
79+
return partition_tags
80+
81+
# override
82+
def partition(self, edge_program: torch.export.ExportedProgram) -> PartitionResult:
83+
partitions = self.generate_partitions(edge_program)
84+
logging.info(f"Find {len(partitions)} " "subgraphs to partition and lowering.")
85+
if len(partitions) != 0:
86+
self.partition_tags = self.tag_nodes(partitions)
87+
tag_constant_data(edge_program)
88+
del self.op_support_checker
89+
return PartitionResult(
90+
tagged_exported_program=edge_program, partition_tags=self.partition_tags
91+
)
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from typing import List, Optional, Tuple
8+
9+
import executorch.exir as exir
10+
import torch
11+
from executorch.exir import EdgeCompileConfig, ExportedProgram
12+
from executorch.exir.backend.backend_details import CompileSpec
13+
from executorch.exir.program._program import (
14+
to_edge_transform_and_lower,
15+
)
16+
17+
18+
def to_edge_transform_and_lower_to_enn(
19+
module: torch.nn.Module,
20+
inputs: Tuple[torch.Tensor],
21+
custom_pass_config: List[PassType] = None,
22+
compile_specs: Optional[CompileSpec] = None,
23+
) -> exir.ExecutorchProgramManager:
24+
assert compile_specs is not None, "For now, we must deliver complile specs"
25+
prog = torch.export.export(module, inputs)
26+
return to_edge_transform_and_lower(
27+
prog,
28+
compile_config=[],
29+
)

examples/samsung/aot_compiler.py

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import argparse
8+
import logging
9+
10+
from executorch.backends.samsung.serialization.compile_options import (
11+
gen_samsung_backend_compile_spec,
12+
)
13+
from executorch.backends.samsung.utils.export_utils import (
14+
to_edge_transform_and_lower_to_enn,
15+
)
16+
from executorch.exir import ExecutorchBackendConfig
17+
from executorch.extension.export_util.utils import save_pte_program
18+
19+
from ..models import MODEL_NAME_TO_MODEL
20+
from ..models.model_factory import EagerModelFactory
21+
22+
FORMAT = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
23+
logging.basicConfig(level=logging.INFO, format=FORMAT)
24+
25+
SUPPORT_MODEL_NAMES = []
26+
27+
if __name__ == "__main__":
28+
parser = argparse.ArgumentParser()
29+
30+
parser.add_argument(
31+
"-c",
32+
"--chipset",
33+
required=True,
34+
help="Samsung chipset, i.e. E9955, etc",
35+
type=str,
36+
)
37+
parser.add_argument(
38+
"-m",
39+
"--model_name",
40+
required=True,
41+
help=f"Model name. Valid ones: {SUPPORT_MODEL_NAMES}",
42+
)
43+
parser.add_argument("-o", "--output_dir", default=".", help="output directory")
44+
45+
args = parser.parse_args()
46+
47+
if args.model_name not in SUPPORT_MODEL_NAMES and args.quantize:
48+
raise RuntimeError(
49+
f"Model {args.model_name} is not a valid name. or not quantizable right now, "
50+
"please contact executorch team if you want to learn why or how to support "
51+
"quantization for the requested model"
52+
f"Available models are {SUPPORT_MODEL_NAMES}."
53+
)
54+
55+
model, example_inputs, dynamic_shapes, _ = EagerModelFactory.create_model(
56+
*MODEL_NAME_TO_MODEL[args.model_name]
57+
)
58+
assert (
59+
dynamic_shapes is None
60+
), "enn backend doesn't support dynamic shapes currently."
61+
62+
model = model.eval()
63+
64+
if args.quantize:
65+
raise NotImplementedError("Quantizer is under developing...")
66+
else:
67+
# TODO(anyone) Remove the judgement after quantizer work fine or judge it in other ways
68+
# raise AssertionError("Only support s8/fp16/s16")
69+
pass
70+
71+
# logging.info(f"Exported graph:\n{edge.exported_program().graph}")
72+
compile_specs = [gen_samsung_backend_compile_spec(args.chipset)]
73+
edge = to_edge_transform_and_lower_to_enn(
74+
model, example_inputs, compile_specs=compile_specs
75+
)
76+
77+
# Save
78+
exec_prog = edge.to_executorch(
79+
config=ExecutorchBackendConfig(extract_delegate_segments=True)
80+
)
81+
82+
quant_tag = "q8" if args.quantize else "fp32"
83+
model_name = f"{args.model_name}_exynos_{quant_tag}"
84+
save_pte_program(exec_prog, model_name, args.output_dir)

0 commit comments

Comments
 (0)