Skip to content

Commit 25ae1e1

Browse files
committed
Merge master
Signed-off-by: bedapisl <[email protected]>
2 parents 4a3d11b + b51df2f commit 25ae1e1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+1313
-238
lines changed

README.md

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
| Build Type | OS | Python | Tensorflow | Onnx opset | Status |
44
| --- | --- | --- | --- | --- | --- |
5-
| Unit Test - Basic | Linux, MacOS<sup>\*</sup>, Windows<sup>\*</sup> | 3.6, 3.7 | 1.12-1.15, 2.1-2.2 | 7-12 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test?branchName=master)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=16&branchName=master) |
6-
| Unit Test - Full | Linux, MacOS, Windows | 3.6, 3.7 | 1.12-1.15, 2.1-2.2 | 7-12 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test-matrix?branchName=master)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=18&branchName=master) | |
5+
| Unit Test - Basic | Linux, MacOS<sup>\*</sup>, Windows<sup>\*</sup> | 3.6, 3.7, 3.8 | 1.12-1.15, 2.1-2.3 | 7-12 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test?branchName=master)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=16&branchName=master) |
6+
| Unit Test - Full | Linux, MacOS, Windows | 3.6, 3.7, 3.8 | 1.12-1.15, 2.1-2.3 | 7-12 | [![Build Status](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_apis/build/status/unit_test-matrix?branchName=master)](https://dev.azure.com/tensorflow-onnx/tensorflow-onnx/_build/latest?definitionId=18&branchName=master) | |
77

88
## Supported Versions
99

@@ -20,7 +20,7 @@ If you want the graph to be generated with a specific opset, use ```--opset``` i
2020

2121
We support all ```tf-1.x graphs```. To keep our test matrix manageable we test tf2onnx running on top of ```tf-1.12 and up```. tf2onnx-1.5.4 was the last version that was tested all the way back to tf-1.4.
2222

23-
There is now ```experimental support for tf-2.x```.
23+
There is now ```support for tf-2.x```.
2424
With the exception of LSTM unit tests, all unit tests are enabled and passing.
2525
Unit tests that we still need to fix are marked with ```@skip_tf2```.
2626
GRU/LSTM's are converting but not runnable due to type/shape inference issues at runtime (working on that one).
@@ -193,6 +193,12 @@ Only valid with parameter `--saved_model`. Specifies which signature to use with
193193

194194
Only valid with parameter `--saved_model`. If a model contains a list of concrete functions, under the function name `__call__` (as can be viewed using the command `saved_model_cli show --all`), this parameter is a 0-based integer specifying which function in that list should be converted. This parameter takes priority over `--signature_def`, which will be ignored.
195195

196+
#### --large_model
197+
198+
(This is experimental, valid only for TF2.x models)
199+
200+
Only valid with parameter `--saved_model`. When set, creates a zip file containing the ONNX protobuf model and large tensor values stored externally. This allows for converting models that exceed the 2 GB protobuf limit.
201+
196202
#### --target
197203

198204
Some models require special handling to run on some runtimes. In particular, the model may use unsupported data types. Workarounds are activated with ```--target TARGET```. Currently supported values are listed on this [wiki](https://github.com/onnx/tensorflow-onnx/wiki/target). If your model will be run on Windows ML, you should specify the appropriate target value.
@@ -274,7 +280,8 @@ tf2onnx.tfonnx.process_tf_graph(tf_graph,
274280
opset=None, custom_op_handlers=None,
275281
custom_rewriter=None, extra_opset=None,
276282
shape_override=None, inputs_as_nchw=None,
277-
input_names=None, output_names=None):
283+
input_names=None, output_names=None,
284+
const_node_values=None):
278285
"""Convert tensorflow graph to onnx graph.
279286
Args:
280287
tf_graph: tensorflow graph
@@ -289,11 +296,12 @@ tf2onnx.tfonnx.process_tf_graph(tf_graph,
289296
inputs_as_nchw: transpose inputs in list from nchw to nchw
290297
input_names: list of input node names in graph, input name format as node_name:port_id
291298
output_names: list of output node names in graph, output name format as node_name:port_id
299+
const_node_values: an optional dict mapping node names to tensor values
292300
Return:
293301
onnx graph
294302
"""
295303
```
296-
For example in [examples/call_coverter_via_python.py]():
304+
For example in [examples/call_converter_via_python.py]():
297305
```
298306
import tensorflow as tf
299307
import tf2onnx

examples/benchmark_tfmodel_ort.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
"""
2+
The following code compares the speed of tensorflow against onnxruntime
3+
with a model downloaded from Tensorflow Hub.
4+
"""
5+
import time
6+
import numpy
7+
from tqdm import tqdm
8+
import tensorflow_hub as hub
9+
import onnxruntime as ort
10+
11+
12+
def generate_random_images(shape=(100, 100), n=10):
13+
imgs = []
14+
for i in range(n):
15+
sh = (1,) + shape + (3,)
16+
img = numpy.clip(numpy.abs(numpy.random.randn(*sh)), 0, 1) * 255
17+
img = img.astype(numpy.float32)
18+
imgs.append(img)
19+
return imgs
20+
21+
22+
def measure_time(fct, imgs):
23+
results = []
24+
times = []
25+
for img in tqdm(imgs):
26+
begin = time.perf_counter()
27+
result = fct(img)
28+
end = time.perf_counter()
29+
results.append(result)
30+
times.append(end - begin)
31+
return results, times
32+
33+
34+
imgs = generate_random_images()
35+
36+
# Download model from https://tfhub.dev/captain-pool/esrgan-tf2/1
37+
# python -m tf2onnx.convert --saved-model esrgan --output "esrgan-tf2.onnx" --opset 12
38+
ort = ort.InferenceSession('esrgan-tf2.onnx')
39+
fct_ort = lambda img: ort.run(None, {'input_0:0': img})
40+
results_ort, duration_ort = measure_time(fct_ort, imgs)
41+
print(len(imgs), duration_ort)
42+
43+
model = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1")
44+
results_tf, duration_tf = measure_time(model, imgs)
45+
print(len(imgs), duration_tf)
46+
47+
print("ratio ORT / TF", sum(duration_ort) / sum(duration_tf))

examples/end2end_tfhub.py

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
"""
2+
This example retrieves a model from tensorflowhub.
3+
It is converted into ONNX. Predictions are compared to
4+
the predictions from tensorflow to check there is no
5+
discrepencies. Inferencing time is also compared between
6+
*onnxruntime*, *tensorflow* and *tensorflow.lite*.
7+
"""
8+
from onnxruntime import InferenceSession
9+
import os
10+
import sys
11+
import subprocess
12+
import timeit
13+
import numpy as np
14+
import tensorflow as tf
15+
from tensorflow import keras
16+
from tensorflow.keras import Input
17+
try:
18+
import tensorflow_hub as tfhub
19+
except ImportError:
20+
# no tensorflow_hub
21+
print("tensorflow_hub not installed.")
22+
sys.exit(0)
23+
24+
########################################
25+
# Downloads the model.
26+
hub_layer = tfhub.KerasLayer(
27+
"https://tfhub.dev/google/efficientnet/b0/classification/1")
28+
model = keras.Sequential()
29+
model.add(Input(shape=(224, 224, 3), dtype=tf.float32))
30+
model.add(hub_layer)
31+
print(model.summary())
32+
33+
########################################
34+
# Saves the model.
35+
if not os.path.exists("efficientnetb0clas"):
36+
os.mkdir("efficientnetb0clas")
37+
tf.keras.models.save_model(model, "efficientnetb0clas")
38+
39+
input_names = [n.name for n in model.inputs]
40+
output_names = [n.name for n in model.outputs]
41+
print('inputs:', input_names)
42+
print('outputs:', output_names)
43+
44+
########################################
45+
# Testing the model.
46+
input = np.random.randn(2, 224, 224, 3).astype(np.float32)
47+
expected = model.predict(input)
48+
print(expected)
49+
50+
########################################
51+
# Run the command line.
52+
proc = subprocess.run(
53+
'python -m tf2onnx.convert --saved-model efficientnetb0clas '
54+
'--output efficientnetb0clas.onnx --opset 12'.split(),
55+
capture_output=True)
56+
print(proc.returncode)
57+
print(proc.stdout.decode('ascii'))
58+
print(proc.stderr.decode('ascii'))
59+
60+
########################################
61+
# Runs onnxruntime.
62+
session = InferenceSession("efficientnetb0clas.onnx")
63+
got = session.run(None, {'input_1:0': input})
64+
print(got[0])
65+
66+
########################################
67+
# Measures the differences.
68+
print(np.abs(got[0] - expected).max())
69+
70+
########################################
71+
# Measures processing time.
72+
print('tf:', timeit.timeit('model.predict(input)',
73+
number=10, globals=globals()))
74+
print('ort:', timeit.timeit("session.run(None, {'input_1:0': input})",
75+
number=10, globals=globals()))

examples/end2end_tfkeras.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
"""
2+
This example builds a simple model without training.
3+
It is converted into ONNX. Predictions are compared to
4+
the predictions from tensorflow to check there is no
5+
discrepencies. Inferencing time is also compared between
6+
*onnxruntime*, *tensorflow* and *tensorflow.lite*.
7+
"""
8+
from onnxruntime import InferenceSession
9+
import os
10+
import subprocess
11+
import timeit
12+
import numpy as np
13+
import tensorflow as tf
14+
from tensorflow import keras
15+
from tensorflow.keras import layers, Input
16+
17+
########################################
18+
# Creates the model.
19+
model = keras.Sequential()
20+
model.add(Input((4, 4)))
21+
model.add(layers.SimpleRNN(8))
22+
model.add(layers.Dense(2))
23+
print(model.summary())
24+
input_names = [n.name for n in model.inputs]
25+
output_names = [n.name for n in model.outputs]
26+
print('inputs:', input_names)
27+
print('outputs:', output_names)
28+
29+
########################################
30+
# Training
31+
# ....
32+
# Skipped.
33+
34+
########################################
35+
# Testing the model.
36+
input = np.random.randn(2, 4, 4).astype(np.float32)
37+
expected = model.predict(input)
38+
print(expected)
39+
40+
########################################
41+
# Saves the model.
42+
if not os.path.exists("simple_rnn"):
43+
os.mkdir("simple_rnn")
44+
tf.keras.models.save_model(model, "simple_rnn")
45+
46+
########################################
47+
# Run the command line.
48+
proc = subprocess.run('python -m tf2onnx.convert --saved-model simple_rnn '
49+
'--output simple_rnn.onnx --opset 12'.split(),
50+
capture_output=True)
51+
print(proc.returncode)
52+
print(proc.stdout.decode('ascii'))
53+
print(proc.stderr.decode('ascii'))
54+
55+
########################################
56+
# Runs onnxruntime.
57+
session = InferenceSession("simple_rnn.onnx")
58+
got = session.run(None, {'input_1:0': input})
59+
print(got[0])
60+
61+
########################################
62+
# Measures the differences.
63+
print(np.abs(got[0] - expected).max())
64+
65+
########################################
66+
# Measures processing time.
67+
print('tf:', timeit.timeit('model.predict(input)',
68+
number=100, globals=globals()))
69+
print('ort:', timeit.timeit("session.run(None, {'input_1:0': input})",
70+
number=100, globals=globals()))

tests/backend_test_base.py

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@
2626
from tf2onnx import optimizer
2727
from tf2onnx.tf_loader import tf_reset_default_graph, tf_session, tf_placeholder, from_function, freeze_session
2828
from tf2onnx.tf_loader import tf_optimize, is_tf2
29+
from tf2onnx.tf_utils import compress_graph_def
30+
from tf2onnx.graph import ExternalTensorStorage
2931

3032

3133
class Tf2OnnxBackendTestBase(unittest.TestCase):
@@ -72,9 +74,10 @@ def run_onnxruntime(self, model_path, inputs, output_names):
7274
results = m.run(output_names, inputs)
7375
return results
7476

75-
def run_backend(self, g, outputs, input_dict):
76-
model_proto = g.make_model("test")
77-
model_path = self.save_onnx_model(model_proto, input_dict)
77+
def run_backend(self, g, outputs, input_dict, large_model=False):
78+
tensor_storage = ExternalTensorStorage() if large_model else None
79+
model_proto = g.make_model("test", external_tensor_storage=tensor_storage)
80+
model_path = self.save_onnx_model(model_proto, input_dict, external_tensor_storage=tensor_storage)
7881

7982
if self.config.backend == "onnxruntime":
8083
y = self.run_onnxruntime(model_path, input_dict, outputs)
@@ -86,7 +89,8 @@ def run_backend(self, g, outputs, input_dict):
8689

8790
def run_test_case(self, func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-07, atol=1e-5,
8891
convert_var_to_const=True, constant_fold=True, check_value=True, check_shape=True,
89-
check_dtype=True, process_args=None, onnx_feed_dict=None, graph_validator=None, as_session=False):
92+
check_dtype=True, process_args=None, onnx_feed_dict=None, graph_validator=None, as_session=False,
93+
large_model=False):
9094
# optional - passed to process_tf_graph
9195
if process_args is None:
9296
process_args = {}
@@ -121,7 +125,9 @@ def run_test_case(self, func, feed_dict, input_names_with_port, output_names_wit
121125
concrete_func = tf.function(func, input_signature=tuple(input_tensors))
122126
concrete_func = concrete_func.get_concrete_function()
123127
graph_def = from_function(concrete_func,
124-
input_names=list(feed_dict.keys()), output_names=output_names_with_port)
128+
input_names=list(feed_dict.keys()),
129+
output_names=output_names_with_port,
130+
large_model=large_model)
125131
else:
126132
#
127133
# use graph to execute the tensorflow func
@@ -151,6 +157,9 @@ def run_test_case(self, func, feed_dict, input_names_with_port, output_names_wit
151157

152158
tf_reset_default_graph()
153159
with tf_session() as sess:
160+
const_node_values = None
161+
if large_model:
162+
const_node_values = compress_graph_def(graph_def)
154163
tf.import_graph_def(graph_def, name='')
155164

156165
if self.config.is_debug_mode:
@@ -161,9 +170,11 @@ def run_test_case(self, func, feed_dict, input_names_with_port, output_names_wit
161170
g = process_tf_graph(sess.graph, opset=self.config.opset,
162171
input_names=list(feed_dict.keys()),
163172
output_names=output_names_with_port,
164-
target=self.config.target, **process_args)
173+
target=self.config.target,
174+
const_node_values=const_node_values,
175+
**process_args)
165176
g = optimizer.optimize_graph(g)
166-
actual = self.run_backend(g, output_names_with_port, onnx_feed_dict)
177+
actual = self.run_backend(g, output_names_with_port, onnx_feed_dict, large_model)
167178

168179
for expected_val, actual_val in zip(expected, actual):
169180
if check_value:
@@ -180,10 +191,11 @@ def run_test_case(self, func, feed_dict, input_names_with_port, output_names_wit
180191

181192
return g
182193

183-
def save_onnx_model(self, model_proto, feed_dict, postfix=""):
194+
def save_onnx_model(self, model_proto, feed_dict, postfix="", external_tensor_storage=None):
184195
target_path = utils.save_onnx_model(self.test_data_directory, self._testMethodName + postfix, feed_dict,
185196
model_proto, include_test_data=self.config.is_debug_mode,
186-
as_text=self.config.is_debug_mode)
197+
as_text=self.config.is_debug_mode,
198+
external_tensor_storage=external_tensor_storage)
187199

188200
self.logger.debug("create model file: %s", target_path)
189201
return target_path

0 commit comments

Comments
 (0)