Skip to content

Commit e36c242

Browse files
authored
Merge pull request #459 from nbcsm/logging
refactor optimizer & refine logging
2 parents 2b3c23d + b93c773 commit e36c242

11 files changed

+134
-153
lines changed

tests/run_pretrained_models.py

Lines changed: 23 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
import sys
1414
import tarfile
1515
import time
16-
import traceback
1716
import zipfile
1817

1918
import PIL.Image
@@ -186,7 +185,7 @@ def run_onnxruntime(self, name, model_proto, inputs):
186185
"""Run test against msrt-next backend."""
187186
import onnxruntime as rt
188187
model_path = utils.save_onnx_model(TEMP_DIR, name, inputs, model_proto, include_test_data=True)
189-
print("\t\t" + model_path)
188+
logger.info("Model saved to %s", model_path)
190189
m = rt.InferenceSession(model_path)
191190
results = m.run(self.output_names, inputs)
192191
if self.perf:
@@ -201,12 +200,13 @@ def create_onnx_file(name, model_proto, inputs, outdir):
201200
os.makedirs(outdir, exist_ok=True)
202201
model_path = os.path.join(outdir, name + ".onnx")
203202
utils.save_protobuf(model_path, model_proto)
204-
print("\tcreated", model_path)
203+
logger.info("Created %s", model_path)
205204

206205
def run_test(self, name, backend="caffe2", onnx_file=None, opset=None, extra_opset=None,
207206
perf=None, fold_const=None):
208207
"""Run complete test against backend."""
209-
print(name)
208+
logger.info("===================================")
209+
logger.info("Running %s", name)
210210
self.perf = perf
211211

212212
# get the model
@@ -216,7 +216,7 @@ def run_test(self, name, backend="caffe2", onnx_file=None, opset=None, extra_ops
216216
else:
217217
model_path = self.local
218218
dir_name = os.path.dirname(self.local)
219-
print("\tdownloaded", model_path)
219+
logger.info("Downloaded to %s", model_path)
220220

221221
input_names = list(self.input_names.keys())
222222
outputs = self.output_names
@@ -261,29 +261,24 @@ def run_test(self, name, backend="caffe2", onnx_file=None, opset=None, extra_ops
261261

262262
# run the model with tensorflow
263263
if self.skip_tensorflow:
264-
print("\ttensorflow", "SKIPPED")
264+
logger.info("TensorFlow SKIPPED")
265265
else:
266266
tf_results = self.run_tensorflow(sess, inputs)
267-
print("\ttensorflow", "OK")
267+
logger.info("TensorFlow OK")
268268
model_proto = None
269269
try:
270270
# convert model to onnx
271271
onnx_graph = self.to_onnx(sess.graph, opset=opset, extra_opset=extra_opset,
272272
shape_override=shape_override, input_names=inputs.keys())
273273
model_proto = onnx_graph.make_model("converted from tf2onnx")
274-
new_model_proto = optimizer.optimize_graph(onnx_graph).make_model("optimized")
275-
if new_model_proto:
276-
model_proto = new_model_proto
277-
else:
278-
print("\tNON-CRITICAL, optimizers are not applied successfully")
279-
print("\tto_onnx", "OK")
274+
model_proto = optimizer.optimize_graph(onnx_graph).make_model("optimized")
275+
logger.info("To_ONNX, OK")
280276
if utils.is_debug_mode():
281277
onnx_graph.dump_graph()
282278
if onnx_file:
283279
self.create_onnx_file(name, model_proto, inputs, onnx_file)
284-
except Exception as ex:
285-
tb = traceback.format_exc()
286-
print("\tto_onnx", "FAIL", ex, tb)
280+
except Exception:
281+
logger.error("To_ONNX FAIL", exc_info=1)
287282

288283
try:
289284
onnx_results = None
@@ -295,27 +290,25 @@ def run_test(self, name, backend="caffe2", onnx_file=None, opset=None, extra_ops
295290
onnx_results = self.run_onnxruntime(name, model_proto, inputs)
296291
else:
297292
raise ValueError("unknown backend")
298-
print("\trun_onnx OK")
293+
logger.info("Run_ONNX OK")
299294

300295
try:
301296
if self.skip_tensorflow:
302-
print("\tResults: skipped tensorflow")
297+
logger.info("Results: skipped tensorflow")
303298
else:
304299
if self.check_only_shape:
305300
for tf_res, onnx_res in zip(tf_results, onnx_results):
306301
np.testing.assert_array_equal(tf_res.shape, onnx_res.shape)
307302
else:
308303
for tf_res, onnx_res in zip(tf_results, onnx_results):
309304
np.testing.assert_allclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol)
310-
print("\tResults: OK")
305+
logger.info("Results: OK")
311306
return True
312-
except Exception as ex:
313-
tb = traceback.format_exc()
314-
print("\tResults", ex, tb)
307+
except Exception:
308+
logger.error("Results", exc_info=1)
315309

316-
except Exception as ex:
317-
tb = traceback.format_exc()
318-
print("\trun_onnx", "FAIL", ex, tb)
310+
except Exception:
311+
logger.error("Run_ONNX FAIL", exc_info=1)
319312

320313
return False
321314

@@ -379,7 +372,7 @@ def main():
379372
Test.target = args.target
380373
tests = tests_from_yaml(args.config)
381374
if args.list:
382-
print(sorted(tests.keys()))
375+
logger.info(sorted(tests.keys()))
383376
return 0
384377
if args.tests:
385378
test_keys = args.tests.split(",")
@@ -397,17 +390,17 @@ def main():
397390
ret = t.run_test(test, backend=args.backend, onnx_file=args.onnx_file,
398391
opset=args.opset, extra_opset=args.extra_opset, perf=args.perf,
399392
fold_const=args.fold_const)
400-
except Exception as ex:
393+
except Exception:
394+
logger.error("Failed to run %s", test, exc_info=1)
401395
ret = None
402-
tb = traceback.format_exc()
403-
print(ex, tb)
404396
finally:
405397
if not utils.is_debug_mode():
406398
utils.delete_directory(TEMP_DIR)
407399
if not ret:
408400
failed += 1
409401

410-
print("=== RESULT: {} failed of {}, backend={}".format(failed, count, args.backend))
402+
logger.info("===================================")
403+
logger.info("RESULT: %s failed of %s, backend=%s", failed, count, args.backend)
411404

412405
if args.perf:
413406
with open(args.perf, "w") as f:

tf2onnx/convert.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,8 @@ def main():
8585
if args.debug:
8686
utils.set_debug_mode(True)
8787

88+
logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)
89+
8890
# override unknown dimensions from -1 to 1 (aka batchsize 1) since not every runtime does
8991
# support unknown dimensions.
9092
utils.ONNX_UNKNOWN_DIMENSION = args.unknown_dim
@@ -126,16 +128,17 @@ def main():
126128

127129
model_proto = g.make_model("converted from {}".format(model_path))
128130

129-
new_model_proto = GraphUtil.optimize_model_proto(model_proto)
130-
if new_model_proto:
131-
model_proto = new_model_proto
132-
else:
133-
print("NON-CRITICAL, optimizers are not applied successfully")
131+
logger.info("")
132+
model_proto = GraphUtil.optimize_model_proto(model_proto)
134133

135134
# write onnx graph
135+
logger.info("")
136+
logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
136137
if args.output:
137138
utils.save_protobuf(args.output, model_proto)
138-
print("\nComplete successfully, the onnx model is generated at " + args.output)
139+
logger.info("ONNX model is saved at %s", args.output)
140+
else:
141+
logger.info("To export ONNX model to file, please run with `--output` option")
139142

140143

141144
if __name__ == "__main__":

tf2onnx/graph.py

Lines changed: 18 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
import collections
1313
import copy
1414
import logging
15-
import sys
16-
import traceback
1715
import six
1816
import numpy as np
1917

@@ -23,7 +21,6 @@
2321
from tf2onnx import optimizer
2422
from tf2onnx.schemas import get_schema
2523

26-
2724
logger = logging.getLogger(__name__)
2825

2926

@@ -1039,18 +1036,19 @@ def extract_sub_graph_nodes(self, outputs_name, input_checker=None, ignore_unuse
10391036

10401037
def delete_unused_nodes(self, outputs_name):
10411038
"""Delete nodes not in subgraph ending with output_names."""
1042-
if outputs_name:
1043-
# we need keep those placeholders that are used as input of Loop's body graph.
1044-
# some of them are not used in the graph, but still need be there to keep the graph complete.
1045-
related_nodes = self.extract_sub_graph_nodes(outputs_name, ignore_unused_placeholder=False)
1046-
for node in related_nodes:
1047-
attr_body_graphs = node.get_body_graphs()
1048-
if attr_body_graphs:
1049-
for _, body_graph in attr_body_graphs.items():
1050-
body_graph.delete_unused_nodes(body_graph.outputs)
1051-
self.reset_nodes(related_nodes)
1052-
else:
1053-
print("WARNING: outputs not specified, delete_unused_nodes not taking effect.")
1039+
if not outputs_name:
1040+
logger.debug("Outputs not specified, delete_unused_nodes not taking effect.")
1041+
return
1042+
1043+
# we need keep those placeholders that are used as input of Loop's body graph.
1044+
# some of them are not used in the graph, but still need be there to keep the graph complete.
1045+
related_nodes = self.extract_sub_graph_nodes(outputs_name, ignore_unused_placeholder=False)
1046+
for node in related_nodes:
1047+
attr_body_graphs = node.get_body_graphs()
1048+
if attr_body_graphs:
1049+
for _, body_graph in attr_body_graphs.items():
1050+
body_graph.delete_unused_nodes(body_graph.outputs)
1051+
self.reset_nodes(related_nodes)
10541052

10551053

10561054
class GraphUtil(object):
@@ -1066,7 +1064,7 @@ def optimize_model_proto(onnx_model_proto):
10661064
10671065
Returns:
10681066
model proto after optimization, if optimizer run successfully
1069-
or None, if exceptions happens
1067+
or onnx_model_proto, if exceptions happens
10701068
"""
10711069
try:
10721070
kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto)
@@ -1080,12 +1078,10 @@ def optimize_model_proto(onnx_model_proto):
10801078
helper.set_model_props(model_proto, metadata_props)
10811079
return model_proto
10821080
except Exception:
1083-
# sometimes, onnx shape inference will fail for some reason, in this case,
1084-
# we just log the error, and skip the transpose optimizer.
1085-
type_, value_, traceback_ = sys.exc_info()
1086-
ex_ext = traceback.format_exception(type_, value_, traceback_)
1087-
print("NON-CRITICAL error in optimizer: ", ex_ext)
1088-
return None
1081+
# sometimes, onnx shape inference will fail for some reason,
1082+
# return onnx_model_proto for this case
1083+
logger.warning("Failed to optimize model proto", exc_info=1)
1084+
return onnx_model_proto
10891085

10901086
@staticmethod
10911087
def get_onnx_model_properties(onnx_model_proto):

tf2onnx/logging.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -47,15 +47,15 @@ def basicConfig(**kwargs): # pylint: disable=invalid-name, function-redefined
4747
set_tf_verbosity(_logging.getLogger().getEffectiveLevel())
4848

4949

50-
_VERBOSITY_TO_LEVEL = [INFO, VERBOSE, DEBUG]
50+
_LOG_LEVELS = [FATAL, ERROR, WARNING, INFO, VERBOSE, DEBUG]
5151

5252

53-
def get_verbosity_level(verbosity, default_level=INFO):
53+
def get_verbosity_level(verbosity, base_level=INFO):
5454
""" If verbosity is specified, return corresponding level, otherwise, return default_level. """
5555
if verbosity is None:
56-
return default_level
57-
verbosity = min(max(0, verbosity), len(_VERBOSITY_TO_LEVEL) - 1)
58-
return _VERBOSITY_TO_LEVEL[verbosity]
56+
return base_level
57+
verbosity = min(max(0, verbosity) + _LOG_LEVELS.index(base_level), len(_LOG_LEVELS) - 1)
58+
return _LOG_LEVELS[verbosity]
5959

6060

6161
def set_level(level):
@@ -66,6 +66,9 @@ def set_level(level):
6666

6767
def set_tf_verbosity(level):
6868
""" Set TF logging verbosity."""
69+
# TF log is too verbose, adjust it
70+
level = WARNING if level >= VERBOSE else level
71+
6972
tf.logging.set_verbosity(level)
7073

7174
# TF_CPP_MIN_LOG_LEVEL:

tf2onnx/optimizer/__init__.py

Lines changed: 38 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -6,43 +6,51 @@
66
from __future__ import print_function
77
from __future__ import unicode_literals
88

9-
import sys
10-
import traceback
119
from collections import OrderedDict
10+
import copy
1211

13-
from tf2onnx.optimizer.const_fold_optimizer import ConstFoldOptimizer
14-
from tf2onnx.optimizer.identity_optimizer import IdentityOptimizer
15-
from tf2onnx.optimizer.merge_duplicated_nodes_optimizer import MergeDuplicatedNodesOptimizer
16-
from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
17-
18-
# pylint: disable=missing-docstring, broad-except
12+
from .const_fold_optimizer import ConstFoldOptimizer
13+
from .identity_optimizer import IdentityOptimizer
14+
from .merge_duplicated_nodes_optimizer import MergeDuplicatedNodesOptimizer
15+
from .transpose_optimizer import TransposeOptimizer
16+
from .. import logging
1917

2018
# optimizer sequence need to be considered carefully
2119
_optimizers = OrderedDict([
22-
("transpose_opt", TransposeOptimizer),
23-
("fold_const", ConstFoldOptimizer),
24-
# merge_duplicated_nodes should be used after transpose_opt
25-
# for transpose_opt may have some trans nodes that can be merge
26-
("merge_duplicated_nodes", MergeDuplicatedNodesOptimizer),
27-
("identity_opt", IdentityOptimizer),
20+
("optimize_transpose", TransposeOptimizer),
21+
("fold_constants", ConstFoldOptimizer),
22+
# merge_duplication should be used after optimize_transpose
23+
# for optimize_transpose may have some trans nodes that can be merge
24+
("merge_duplication", MergeDuplicatedNodesOptimizer),
25+
("remove_identity", IdentityOptimizer),
2826
])
2927

3028

31-
def optimize_graph(graph):
32-
try:
33-
opts = _get_optimizers()
34-
for opt in opts.values():
35-
graph = opt().optimize(graph)
36-
37-
graph.update_proto()
38-
return graph
39-
except Exception:
40-
# degradation to non-optimized model proto
41-
type_, value_, traceback_ = sys.exc_info()
42-
ex_ext = traceback.format_exception(type_, value_, traceback_)
43-
print("NON-CRITICAL error in optimizer: ", ex_ext)
44-
return None
45-
46-
4729
def _get_optimizers():
4830
return _optimizers
31+
32+
33+
def optimize_graph(graph):
34+
""" Optimize graph, return optimized graph. No throw. """
35+
logger = logging.getLogger(__name__)
36+
logger.info("Optimizing ONNX model")
37+
38+
before = graph.dump_node_statistics()
39+
opts = _get_optimizers()
40+
for name, factory in opts.items():
41+
try:
42+
logger.verbose("Apply %s", name)
43+
current = copy.deepcopy(graph)
44+
graph = factory().optimize(current)
45+
except Exception: # pylint: disable=broad-except
46+
# if current optimizer fails, continue with other optimizers
47+
logger.warning("Failed to apply %s", name, exc_info=1)
48+
49+
after = graph.dump_node_statistics()
50+
diff = copy.deepcopy(after)
51+
diff.subtract(before)
52+
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
53+
for k, v in diff.most_common() if v != 0]
54+
logger.info("After optimization: %s", ', '.join(diff) if diff else "no change")
55+
56+
return graph

tf2onnx/optimizer/const_fold_optimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66
for example, input of transpose node is const then we can do transpose statically instead of at runtime
77
"""
88

9-
from tf2onnx.optimizer.optimizer_base import GraphOptimizerBase
10-
from tf2onnx import utils
9+
from .. import utils
10+
from .optimizer_base import GraphOptimizerBase
1111

1212
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
1313

0 commit comments

Comments
 (0)