Skip to content

Commit 407b23a

Browse files
chong-chen01Jiseong-oh
authored andcommitted
Initialize common ops and support ic3/resnet models
Initialize the ops in inception v3 and resnet. Keep necessary codes in aot_compiler. Use the aot_compiler to generate ic3, like ``` python -m executorch.examples.samsung.aot_compiler --chipset e9955 -m ic3 ``` About the on-device test, more information can be found in README which is in the same level directory of aot_compiler.py Co-authored-by: chong-chen <[email protected]>
1 parent 96ea729 commit 407b23a

21 files changed

+894
-18
lines changed

backends/samsung/builders/__init__.py

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,44 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7-
from . import node_visitor
7+
from . import (
8+
node_visitor,
9+
op_add,
10+
op_avg_pool2d,
11+
op_batch_norm,
12+
op_cat,
13+
op_clamp,
14+
op_conv2d,
15+
op_getitem,
16+
op_hardtanh,
17+
op_linear,
18+
op_max_pool2d,
19+
op_mean_dim,
20+
op_mul,
21+
op_permute,
22+
op_relu,
23+
op_reshape,
24+
op_select,
25+
op_unsqueeze,
26+
)
827

928
__all__ = [
1029
node_visitor,
30+
op_add,
31+
op_avg_pool2d,
32+
op_batch_norm,
33+
op_cat,
34+
op_clamp,
35+
op_conv2d,
36+
op_getitem,
37+
op_hardtanh,
38+
op_linear,
39+
op_max_pool2d,
40+
op_mean_dim,
41+
op_mul,
42+
op_permute,
43+
op_relu,
44+
op_reshape,
45+
op_select,
46+
op_unsqueeze,
1147
]

backends/samsung/builders/node_visitor.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
get_tensor_type,
1414
)
1515
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
16+
from executorch.backends.transforms.utils import is_param_node
1617
from torch.export import ExportedProgram
1718

1819

@@ -52,6 +53,10 @@ def define_tensor(
5253
data_type = get_map_dtype(tensor.dtype)
5354

5455
const_data = None
56+
if is_param_node(self.exported_program, node):
57+
if swap_nc_for_weights:
58+
tensor = torch.swapdims(tensor, 0, 1)
59+
const_data = tensor.contiguous().detach().numpy()
5560

5661
dims = [1] if len(tensor.size()) == 0 else list(tensor.size())
5762

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import Dict
7+
8+
import torch
9+
from executorch.backends.samsung.builders.node_visitor import (
10+
NodeVisitor,
11+
register_node_visitor,
12+
)
13+
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
14+
15+
16+
@register_node_visitor
17+
class AddVisitor(NodeVisitor):
18+
target = "aten.add.Tensor"
19+
20+
def __init__(self, *args) -> None:
21+
super().__init__(*args)
22+
23+
def define_node(
24+
self,
25+
node: torch.fx.Node,
26+
enn_graph: EnnGraph,
27+
vals_to_ids: Dict[torch.Tensor, int],
28+
) -> None:
29+
input1 = node.args[0]
30+
input_id_1 = self.define_tensor(input1, enn_graph, vals_to_ids)
31+
input2 = node.args[1]
32+
input_id_2 = self.define_tensor(input2, enn_graph, vals_to_ids)
33+
34+
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
35+
36+
enn_graph.define_op(node.name, "ELTSUM", [input_id_1, input_id_2], [output_id])
Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import cast, Dict, List
7+
8+
import torch
9+
from executorch.backends.samsung.builders.node_visitor import (
10+
NodeVisitor,
11+
register_node_visitor,
12+
)
13+
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
14+
15+
16+
@register_node_visitor
17+
class AvgPool2dVisitor(NodeVisitor):
18+
target = "aten.avg_pool2d.default"
19+
20+
def __init__(self, *args) -> None:
21+
super().__init__(*args)
22+
23+
def define_node(
24+
self,
25+
node: torch.fx.Node,
26+
enn_graph: EnnGraph,
27+
vals_to_ids: Dict[torch.Tensor, int],
28+
) -> None:
29+
input = node.args[0]
30+
input_id = self.define_tensor(input, enn_graph, vals_to_ids)
31+
32+
kernel_size = cast(List[int], node.args[1])
33+
if len(kernel_size) == 1:
34+
kernel_size = kernel_size * 2
35+
36+
stride = cast(List[int], node.args[2]) if len(node.args) > 2 else kernel_size
37+
if len(stride) == 1:
38+
stride = stride * 2
39+
40+
padding = cast(List[int], node.args[3]) if len(node.args) > 3 else [0, 0]
41+
if len(padding) == 1:
42+
padding = padding * 2
43+
explicit_padding = [padding[0], padding[1], padding[0], padding[1]]
44+
45+
params = {}
46+
params["kernel_h"] = kernel_size[0]
47+
params["kernel_w"] = kernel_size[1]
48+
params["stride_h"] = stride[0]
49+
params["stride_w"] = stride[1]
50+
params["padding"] = "EXPLICIT"
51+
params["explicit_padding"] = explicit_padding
52+
53+
if len(node.args) > 4:
54+
ceil_mode = cast(bool, node.args[4])
55+
assert not ceil_mode, "Not support ceil_mode = True."
56+
57+
if len(node.args) > 5:
58+
params["count_include_pad"] = cast(bool, node.args[5])
59+
else:
60+
params["count_include_pad"] = True
61+
62+
if len(node.args) > 6:
63+
divisor_override = cast(int, node.args[6])
64+
assert divisor_override == kernel_size[0] * kernel_size[1], "Not supported divisor_override which is not equal to pooling region."
65+
66+
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
67+
68+
enn_graph.define_op(node.name, "AVGPOOL2D", [input_id], [output_id], params)
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import Dict
7+
8+
import torch
9+
from executorch.backends.samsung.builders.node_visitor import (
10+
NodeVisitor,
11+
register_node_visitor,
12+
)
13+
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
14+
15+
16+
@register_node_visitor
17+
class BatchNormVisitor(NodeVisitor):
18+
target = "aten._native_batch_norm_legit_no_training.default"
19+
20+
def __init__(self, *args) -> None:
21+
super().__init__(*args)
22+
23+
def define_node(
24+
self,
25+
node: torch.fx.Node,
26+
enn_graph: EnnGraph,
27+
vals_to_ids: Dict[torch.Tensor, int],
28+
) -> None:
29+
all_input_tensors = []
30+
input = node.args[0]
31+
input_id = self.define_tensor(input, enn_graph, vals_to_ids)
32+
all_input_tensors.append(input_id)
33+
34+
weight_node, bias_node, mean_node, var_node = (
35+
node.args[1],
36+
node.args[2],
37+
node.args[3],
38+
node.args[4],
39+
)
40+
weight_id = self.define_tensor(weight_node, enn_graph, vals_to_ids)
41+
all_input_tensors.append(weight_id)
42+
bias_id = self.define_tensor(bias_node, enn_graph, vals_to_ids)
43+
all_input_tensors.append(bias_id)
44+
mean_id = self.define_tensor(mean_node, enn_graph, vals_to_ids)
45+
all_input_tensors.append(mean_id)
46+
var_id = self.define_tensor(var_node, enn_graph, vals_to_ids)
47+
all_input_tensors.append(var_id)
48+
49+
eps = node.args[-1]
50+
params = {"epsilon": eps}
51+
52+
output_id = self.define_tensor(node, enn_graph, vals_to_ids, output_idx=0)
53+
54+
enn_graph.define_op(
55+
node.name, "BatchNormalization", all_input_tensors, [output_id], params
56+
)
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import cast, Dict, List
7+
8+
import torch
9+
from executorch.backends.samsung.builders.node_visitor import (
10+
NodeVisitor,
11+
register_node_visitor,
12+
)
13+
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
14+
from executorch.backends.transforms import get_shape
15+
16+
17+
@register_node_visitor
18+
class CatVisitor(NodeVisitor):
19+
target = "aten.cat.default"
20+
21+
def __init__(self, *args):
22+
super().__init__(*args)
23+
24+
def define_node(
25+
self,
26+
node: torch.fx.Node,
27+
enn_graph: EnnGraph,
28+
vals_to_ids: Dict[torch.Tensor, int],
29+
) -> None:
30+
tensors = cast(List[torch.fx.Node], node.args[0])
31+
input_tensor_ids = []
32+
33+
for in_tensor in tensors:
34+
input_id = self.define_tensor(in_tensor, enn_graph, vals_to_ids)
35+
input_tensor_ids.append(input_id)
36+
37+
in_shape = get_shape(node)
38+
axis = cast(int, node.args[1]) % len(in_shape) if len(node.args) >= 2 else 0
39+
params = {"axis": axis}
40+
41+
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
42+
enn_graph.define_op(node.name, "CONCAT", input_tensor_ids, [output_id], params)
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
from typing import cast, Dict
7+
8+
import torch
9+
from executorch.backends.samsung.builders.node_visitor import (
10+
NodeVisitor,
11+
register_node_visitor,
12+
)
13+
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
14+
15+
16+
@register_node_visitor
17+
class ClampVisitor(NodeVisitor):
18+
target = "aten.clamp.default"
19+
20+
def __init__(self, *args) -> None:
21+
super().__init__(*args)
22+
23+
def define_node(
24+
self,
25+
node: torch.fx.Node,
26+
enn_graph: EnnGraph,
27+
vals_to_ids: Dict[torch.Tensor, int],
28+
) -> None:
29+
input = node.args[0]
30+
input_id = self.define_tensor(input, enn_graph, vals_to_ids)
31+
32+
# The default value of lower bound and upper bound
33+
output_min = torch.finfo(torch.float32).min
34+
output_max = torch.finfo(torch.float32).max
35+
if node.args[1] is not None:
36+
output_min = cast(float, node.args[1])
37+
if len(node.args) > 2 and node.args[2] is not None:
38+
output_max = cast(float, node.args[2])
39+
40+
params = {
41+
"minimum": output_min,
42+
"maximum": output_max
43+
}
44+
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
45+
46+
enn_graph.define_op(node.name, "CLIP", [input_id], [output_id], params)

0 commit comments

Comments
 (0)