Skip to content

Commit 31e694f

Browse files
committed
Merge branch 'master' into r1.8
2 parents 05babf2 + 5d2b73c commit 31e694f

38 files changed

+2704
-506
lines changed

README.md

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,12 +130,14 @@ You find an end-to-end tutorial for ssd-mobilenet [here](tutorials/ConvertingSSD
130130
python -m tf2onnx.convert
131131
--saved-model SOURCE_SAVED_MODEL_PATH |
132132
--checkpoint SOURCE_CHECKPOINT_METAFILE_PATH |
133+
--tflite SOURCE_TFLITE_PATH |
133134
--input | --graphdef SOURCE_GRAPHDEF_PB
134135
--output TARGET_ONNX_MODEL
135136
[--inputs GRAPH_INPUTS]
136137
[--outputs GRAPH_OUTPUS]
137138
[--inputs-as-nchw inputs_provided_as_nchw]
138139
[--opset OPSET]
140+
[--dequantize]
139141
[--tag TAG]
140142
[--signature_def SIGNATURE_DEF]
141143
[--concrete_function CONCRETE_FUNCTION]
@@ -158,6 +160,12 @@ TensorFlow model as saved_model. We expect the path to the saved_model directory
158160

159161
TensorFlow model as checkpoint. We expect the path to the .meta file.
160162

163+
#### --tflite
164+
165+
(This is experimental)
166+
167+
Convert a tflite model by providing a path to the .tflite file. Inputs/outputs do not need to be specified.
168+
161169
#### --input or --graphdef
162170

163171
TensorFlow model as graphdef file.
@@ -182,6 +190,12 @@ ONNX requires default values for graph inputs to be constant, while Tensorflow's
182190

183191
By default we use the opset 8 to generate the graph. By specifying ```--opset``` the user can override the default to generate a graph with the desired opset. For example ```--opset 5``` would create a onnx graph that uses only ops available in opset 5. Because older opsets have in most cases fewer ops, some models might not convert on a older opset.
184192

193+
#### --dequantize
194+
195+
(This is experimental, only supported for tflite)
196+
197+
Produces a float32 model from a quantized tflite model. Detects ReLU and ReLU6 ops from quantization bounds.
198+
185199
#### --tag
186200

187201
Only valid with parameter `--saved_model`. Specifies the tag in the saved_model to be used. Typical value is 'serve'.
@@ -387,7 +401,7 @@ The converter needs to take care of a few things:
387401

388402
tf2onnx starts with a frozen graph. This is because of item 3 above.
389403

390-
### Step 2 - 1:1 convertion of the protobuf from tensorflow to onnx
404+
### Step 2 - 1:1 conversion of the protobuf from tensorflow to onnx
391405

392406
tf2onnx first does a simple conversion from the TensorFlow protobuf format to the ONNX protobuf format without looking at individual ops.
393407
We do this so we can use the ONNX graph as internal representation and write helper functions around it.

ci_build/azure_pipelines/pretrained_model_test.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,16 @@
11
# Pre-trained model test
22

33
jobs:
4+
- template: 'templates/job_generator.yml'
5+
parameters:
6+
python_versions: ['3.7']
7+
tf_versions: ['2.4.1']
8+
skip_tflite_tests: 'False'
9+
skip_tf_tests: 'True'
10+
job:
11+
steps:
12+
- template: 'pretrained_model_test.yml'
13+
414
- template: 'templates/job_generator.yml'
515
parameters:
616
python_versions: ['3.7']

ci_build/azure_pipelines/templates/pretrained_model_test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,6 @@ steps:
66
status=0
77
# TODO: fix unity model path
88
# python tests/run_pretrained_models.py --backend $CI_ONNX_BACKEND --opset $CI_ONNX_OPSET --config tests/unity.yaml || status=$?
9-
python tests/run_pretrained_models.py --backend $CI_ONNX_BACKEND --opset $CI_ONNX_OPSET --config tests/run_pretrained_models.yaml || status=$?
9+
python tests/run_pretrained_models.py --backend $CI_ONNX_BACKEND --opset $CI_ONNX_OPSET --skip_tf_tests $CI_SKIP_TF_TESTS --skip_tflite_tests $CI_SKIP_TFLITE_TESTS --config tests/run_pretrained_models.yaml || status=$?
1010
exit $status
1111
displayName: 'Test Pre-trained Model'

tests/ade20k.jpg

92.9 KB
Loading

tests/backend_test_base.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,17 @@ def run_onnxcaffe2(self, onnx_graph, inputs):
7676
def run_onnxruntime(self, model_path, inputs, output_names):
7777
"""Run test against onnxruntime backend."""
7878
import onnxruntime as rt
79+
providers = ['CPUExecutionProvider']
80+
if rt.get_device() == "GPU":
81+
gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
82+
if gpus is None or len(gpus) > 1:
83+
providers = ['CUDAExecutionProvider']
7984
opt = rt.SessionOptions()
8085
# in case of issues with the runtime, one can enable more logging
8186
# opt.log_severity_level = 0
8287
# opt.log_verbosity_level = 255
8388
# opt.enable_profiling = True
84-
m = rt.InferenceSession(model_path, opt)
89+
m = rt.InferenceSession(model_path, opt, providers=providers)
8590
results = m.run(output_names, inputs)
8691
return results
8792

@@ -177,10 +182,9 @@ def freeze_and_run_tf(self, func, feed_dict, outputs, as_session, premade_placeh
177182
tf.import_graph_def(graph_def, name='')
178183
graph_def = tf_optimize(list(feed_dict.keys()), outputs, graph_def, fold_constant=constant_fold)
179184

180-
if True or self.config.is_debug_mode:
181-
model_path = os.path.join(self.test_data_directory, self._testMethodName + "_after_tf_optimize.pb")
182-
utils.save_protobuf(model_path, graph_def)
183-
self.logger.debug("created file %s", model_path)
185+
model_path = os.path.join(self.test_data_directory, self._testMethodName + "_after_tf_optimize.pb")
186+
utils.save_protobuf(model_path, graph_def)
187+
self.logger.debug("created file %s", model_path)
184188
return result, graph_def, initialized_tables
185189

186190
def convert_to_tflite(self, graph_def, feed_dict, outputs):

tests/car.JPEG

29.9 KB
Loading

0 commit comments

Comments
 (0)