Skip to content

Commit 080c7a4

Browse files
authored
Merge pull request #1074 from NikolasMarkou/upsample_optimizer
Feature: Added optimization step to remove upsample layers with all ones in scale
2 parents 29708c4 + b74e96d commit 080c7a4

File tree

3 files changed

+111
-0
lines changed

3 files changed

+111
-0
lines changed

tests/test_optimizers.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1177,6 +1177,64 @@ def test_cast_back_to_back_non_const_mixed_types(self):
11771177
self.run_and_compare(["res", "res2", "res3"], {"u": np.random.randn(1, 2, 3).astype(np.float32)}, model_proto,
11781178
"Cast", 5)
11791179

1180+
@check_opset_max_version(8, "until opset 8 scales is in attributes")
1181+
def test_upsample_all_ones_removed(self):
1182+
shape = (1, 1, 32, 32)
1183+
node1 = helper.make_node(
1184+
op_type="Upsample",
1185+
inputs=["X"],
1186+
outputs=["Y"],
1187+
scales=[1., 1., 1., 1.],
1188+
name="upsample1")
1189+
1190+
graph = helper.make_graph(
1191+
[node1],
1192+
"test_upsample_all_ones",
1193+
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
1194+
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, shape)],
1195+
)
1196+
1197+
model_proto = self.make_model(graph, producer_name="onnx-tests")
1198+
1199+
self.run_and_compare(
1200+
["Y"],
1201+
{"X": np.random.randn(*shape).astype(np.float32)},
1202+
model_proto,
1203+
"Upsample",
1204+
0)
1205+
1206+
@check_opset_min_version(9, ">= 9 scales is in input[1]")
1207+
@check_opset_max_version(9, "Upscale is deprecated in opsets >= 10")
1208+
def test_upsample_all_ones_removed_in_input(self):
1209+
shape = (1, 1, 32, 32)
1210+
const_tensor = helper.make_tensor(
1211+
name="S",
1212+
data_type=TensorProto.FLOAT,
1213+
dims=(1, 4),
1214+
vals=np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32))
1215+
node0 = helper.make_node("Constant", [], ["S"], value=const_tensor)
1216+
node1 = helper.make_node(
1217+
op_type="Upsample",
1218+
inputs=["X", "S"],
1219+
outputs=["Y"],
1220+
name="upsample1")
1221+
1222+
graph = helper.make_graph(
1223+
[node0, node1],
1224+
"test_upsample_all_ones",
1225+
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
1226+
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, shape)],
1227+
)
1228+
1229+
model_proto = self.make_model(graph, producer_name="onnx-tests")
1230+
1231+
self.run_and_compare(
1232+
["Y"],
1233+
{"X": np.random.randn(*shape).astype(np.float32)},
1234+
model_proto,
1235+
"Upsample",
1236+
0)
1237+
11801238

11811239
if __name__ == "__main__":
11821240
unittest_main()

tf2onnx/optimizer/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,13 @@
1515
from .transpose_optimizer import TransposeOptimizer
1616
from .loop_optimizer import LoopOptimizer
1717
from .back_to_back_optimizer import BackToBackOptimizer
18+
from .upsample_optimizer import UpsampleOptimizer
1819
from .. import logging
1920

2021
# optimizer sequence need to be considered carefully
2122
_optimizers = OrderedDict([
2223
("optimize_transpose", TransposeOptimizer),
24+
("remove_redundant_upsample", UpsampleOptimizer),
2325
("fold_constants", ConstFoldOptimizer),
2426
("loop_optimizer", LoopOptimizer),
2527
# merge_duplication should be used after optimize_transpose
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
"""Resize Optimizer.
2+
Replace resize operations with all ones in scale with Identity nodes
3+
"""
4+
5+
from __future__ import unicode_literals
6+
7+
import numpy as np
8+
9+
from .optimizer_base import GraphOptimizerBase
10+
11+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
12+
13+
14+
class UpsampleOptimizer(GraphOptimizerBase):
15+
"""Upsample Optimizer."""
16+
17+
def __init__(self): # pylint: disable=useless-super-delegation
18+
super(UpsampleOptimizer, self).__init__()
19+
self._g = None
20+
21+
def _optimize(self, graph):
22+
return self._apply_optimization(
23+
graph,
24+
self._optimize_at_current_graph_level)
25+
26+
def _optimize_at_current_graph_level(self, graph):
27+
self._g = graph
28+
# replace upsample node with all ones in scale with identity node
29+
for n in self._g.get_nodes():
30+
if n.type == "Upsample":
31+
node_changed = False
32+
# upsample in opset <=8 has scales in attributes
33+
if self._g.opset <= 8:
34+
scales = n.get_attr_value("scales")
35+
if scales and all([float(s) == 1. for s in scales]):
36+
n.type = "Identity"
37+
node_changed = True
38+
# upsample in opset >= 9 has scales in input[1]
39+
if self._g.opset >= 9 and len(n.input) == 2:
40+
scales_input = n.inputs[1]
41+
42+
if scales_input.is_const() and \
43+
np.all(scales_input.get_tensor_value(as_list=False) == 1.):
44+
n.type = "Identity"
45+
n.input = [n.input[0]]
46+
node_changed = True
47+
if node_changed:
48+
self.logger.debug("replacing " + n.name +
49+
" with Identity operation ")
50+
51+
return self._g

0 commit comments

Comments
 (0)