Skip to content

Commit eda67ae

Browse files
Jiseong-ohChen03ZhaoSamsungsangsoo.Ko
committed
Add more example code for other quantized models
Models contain: dlv3/edsr/iv3/iv4/mv3/resnet50/vit/w2l Co-authored-by: chen.zhao <[email protected]> Co-authored-by: sangsoo.Ko <[email protected]>
1 parent f9a1d79 commit eda67ae

24 files changed

+1652
-91
lines changed

backends/samsung/_passes/conv1d_to_conv2d.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,6 @@
1212

1313

1414
class Conv1dToConv2d(ExportPass):
15-
"""
16-
Convert conv1d to conv2d because exynos doesn't support conv1d
17-
"""
18-
1915
def __init__(self, edge_program: ExportedProgram):
2016
super().__init__()
2117
self.edge_program = edge_program

backends/samsung/_passes/fuse_conv_act.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@ def map_hardtan_relux(tanhnode: torch.fx.node.Node) -> Optional[str]:
1717
assert (
1818
tanhnode.target == exir_ops.edge.aten.hardtanh.default
1919
), "Must be a hardtanh node"
20-
# Not support ReLU1 now
2120
if not tanhnode.args[1] == 0.0:
2221
return None
2322
if tanhnode.args[2] == 6.0:

backends/samsung/builders/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
op_getitem,
2323
op_hardswish,
2424
op_hardtanh,
25+
op_hardsigmoid,
2526
op_layer_norm,
2627
op_leaky_relu,
2728
op_linear,
@@ -67,6 +68,7 @@
6768
op_getitem,
6869
op_hardswish,
6970
op_hardtanh,
71+
op_hardsigmoid,
7072
op_layer_norm,
7173
op_leaky_relu,
7274
op_linear,

backends/samsung/builders/op_avg_pool2d.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,7 @@ def define_node(
5454

5555
if len(node.args) > 4:
5656
ceil_mode = cast(bool, node.args[4])
57-
if ceil_mode:
58-
raise AssertionError("Not support ceil_mode = True.")
57+
assert not ceil_mode, "Not support ceil_mode = True."
5958

6059
if len(node.args) > 5:
6160
params["count_include_pad"] = cast(bool, node.args[5])
@@ -64,12 +63,8 @@ def define_node(
6463

6564
if len(node.args) > 6:
6665
divisor_override = cast(int, node.args[6])
67-
if divisor_override != kernel_size[0] * kernel_size[1]:
68-
raise AssertionError(
69-
"Not supported divisor_override which is not equal to pooling region."
70-
)
71-
66+
assert (
67+
divisor_override == kernel_size[0] * kernel_size[1]
68+
), "Not supported divisor_override which is not equal to pooling region."
7269
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
73-
vals_to_ids[node] = output_id
74-
7570
enn_graph.define_op(node.name, "AVGPOOL2D", [input_id], [output_id], params)

backends/samsung/builders/op_clamp.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,8 @@ def define_node(
3939
if len(node.args) > 2 and node.args[2] is not None:
4040
output_max = cast(float, node.args[2])
4141

42-
params = {}
42+
params = {"minimum": output_min, "maximum": output_max}
4343
self._update_params_qdtype(node, params)
44-
params["minimum"] = output_min
45-
params["maximum"] = output_max
4644

4745
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
4846

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Copyright (c) 2025 Samsung Electronics Co. LTD
2+
# All rights reserved
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from typing import Dict
8+
9+
import torch
10+
from executorch.backends.samsung.builders.node_visitor import (
11+
NodeVisitor,
12+
register_node_visitor,
13+
)
14+
from executorch.backends.samsung.serialization.enn_graph_schema import EnnGraph
15+
16+
17+
@register_node_visitor
18+
class HardSigmoidVisitor(NodeVisitor):
19+
target = "aten.hardsigmoid.default"
20+
21+
def __init__(self, *args) -> None:
22+
super().__init__(*args)
23+
24+
def define_node(
25+
self,
26+
node: torch.fx.Node,
27+
enn_graph: EnnGraph,
28+
vals_to_ids: Dict[torch.Tensor, int],
29+
) -> None:
30+
input = node.args[0]
31+
input_id = self.define_tensor(input, enn_graph, vals_to_ids)
32+
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
33+
params = {}
34+
self._update_params_qdtype(node, params)
35+
enn_graph.define_op(node.name, "HardSigmoid", [input_id], [output_id], params)

backends/samsung/builders/op_hardtanh.py

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -29,18 +29,13 @@ def define_node(
2929
) -> None:
3030
input = node.args[0]
3131
input_id = self.define_tensor(input, enn_graph, vals_to_ids)
32-
params = {}
33-
self._update_params_qdtype(node, params)
32+
3433
# default value of output_min and output_max
35-
output_min = -1
36-
output_max = 1
37-
if len(node.args) > 1:
38-
output_min = cast(float, node.args[1])
39-
if len(node.args) > 2:
40-
output_max = cast(float, node.args[2])
41-
42-
params["minimum"] = output_min
43-
params["maximum"] = output_max
34+
output_min = cast(float, node.args[1]) if len(node.args) > 1 else -1
35+
output_max = cast(float, node.args[2]) if len(node.args) > 2 else 1
36+
37+
params = {"minimum": output_min, "maximum": output_max}
38+
self._update_params_qdtype(node, params)
4439

4540
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
4641

backends/samsung/builders/op_layer_norm.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,7 @@ def define_node(
4444
bias_id = self.define_tensor(bias_node, enn_graph, vals_to_ids)
4545
all_input_tensors.append(bias_id)
4646

47-
if len(node.args) > 4:
48-
epsilon = node.args[4]
49-
else:
50-
epsilon = 1e-5
47+
epsilon = node.args[4] if len(node.args) > 4 else 1e-5
5148
params = {"epsilon": epsilon}
5249
self._update_params_qdtype(node, params)
5350
output_id = self.define_tensor(node, enn_graph, vals_to_ids)

backends/samsung/builders/op_squeeze.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -32,29 +32,6 @@ def define_node(
3232

3333
# output
3434
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
35-
params = {}
36-
params["new_shape"] = [*node.meta["val"].shape]
37-
enn_graph.define_op(node.name, "RESHAPE", [input_id], [output_id], params)
38-
39-
40-
@register_node_visitor
41-
class UnsqueezeVisitor(NodeVisitor):
42-
target = "aten.unsqueeze_copy.default"
43-
44-
def __init__(self, *args) -> None:
45-
super().__init__(*args)
4635

47-
def define_node(
48-
self,
49-
node: torch.fx.Node,
50-
enn_graph: EnnGraph,
51-
vals_to_ids: Dict[torch.Tensor, int],
52-
) -> None:
53-
input = node.args[0]
54-
input_id = self.define_tensor(input, enn_graph, vals_to_ids)
55-
56-
# output
57-
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
5836
params = {"new_shape": [*node.meta["val"].shape]}
59-
6037
enn_graph.define_op(node.name, "RESHAPE", [input_id], [output_id], params)

backends/samsung/builders/op_unsqueeze.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,5 @@ def define_node(
3131

3232
output_id = self.define_tensor(node, enn_graph, vals_to_ids)
3333

34-
enn_graph.define_op(node.name, "RESHAPE", [input_id], [output_id])
34+
params = {"new_shape": [*node.meta["val"].shape]}
35+
enn_graph.define_op(node.name, "RESHAPE", [input_id], [output_id], params)

0 commit comments

Comments
 (0)