Skip to content

Commit 473a9ce

Browse files
authored
Merge pull request #1547 from xadupre/ortext
Replace onnxruntime_customops by onnxruntime_extensions
2 parents 7bafec5 + 56223c1 commit 473a9ce

File tree

5 files changed

+29
-6
lines changed

5 files changed

+29
-6
lines changed

README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -293,6 +293,9 @@ model_proto, external_tensor_storage = tf2onnx.convert.from_keras(model,
293293
model: the tf.keras model we want to convert
294294
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
295295
opset: the opset to be used for the ONNX model, default is the latest
296+
custom_ops: if a model contains ops not recognized by onnx runtime,
297+
you can tag these ops with a custom op domain so that the
298+
runtime can still open the model. Type is a dictionary `{op name: domain}`.
296299
target: list of workarounds applied to help certain platforms
297300
custom_op_handlers: dictionary of custom ops handlers
298301
custom_rewriter: list of custom graph rewriters
@@ -322,6 +325,9 @@ model_proto, external_tensor_storage = tf2onnx.convert.from_function(function,
322325
function: the tf.function we want to convert
323326
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
324327
opset: the opset to be used for the ONNX model, default is the latest
328+
custom_ops: if a model contains ops not recognized by onnx runtime,
329+
you can tag these ops with a custom op domain so that the
330+
runtime can still open the model. Type is a dictionary `{op name: domain}`.
325331
target: list of workarounds applied to help certain platforms
326332
custom_op_handlers: dictionary of custom ops handlers
327333
custom_rewriter: list of custom graph rewriters

ci_build/azure_pipelines/templates/setup.yml

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,20 @@ steps:
2525
pip install --index-url https://test.pypi.org/simple/ ort-nightly
2626
fi
2727
28-
if [[ $CI_PLATFORM == "windows" ]] ;
28+
if [[ $CI_TF_VERSION == 2.* ]] ;
2929
then
30-
pip install -i https://test.pypi.org/simple/ onnxruntime-customops==0.0.1
31-
if [[ $CI_TF_VERSION == 2.* ]] ;
30+
pip install onnxruntime-extensions
31+
if [[ $CI_TF_VERSION == 2.3* ]] ;
3232
then
33-
pip install tensorflow-text
33+
pip install tensorflow-text==${CI_TF_VERSION}
34+
fi
35+
if [[ $CI_TF_VERSION == 2.4* ]] ;
36+
then
37+
pip install tensorflow-text==${CI_TF_VERSION}
38+
fi
39+
if [[ $CI_TF_VERSION == 2.5* ]] ;
40+
then
41+
pip install tensorflow-text>=2.5
3442
fi
3543
fi
3644

tests/backend_test_base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def run_onnxruntime(self, model_path, inputs, output_names, use_custom_ops=False
8080
providers = ['CUDAExecutionProvider']
8181
opt = rt.SessionOptions()
8282
if use_custom_ops:
83-
from onnxruntime_customops import get_library_path
83+
from onnxruntime_extensions import get_library_path
8484
opt.register_custom_ops_library(get_library_path())
8585
# in case of issues with the runtime, one can enable more logging
8686
# opt.log_severity_level = 0

tests/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ def requires_custom_ops(message=""):
242242
""" Skip until custom ops framework is on PyPI. """
243243
reason = _append_message("test needs custom ops framework", message)
244244
try:
245-
import onnxruntime_customops #pylint: disable=import-outside-toplevel,unused-import
245+
import onnxruntime_extensions #pylint: disable=import-outside-toplevel,unused-import
246246
can_import = True
247247
except ModuleNotFoundError:
248248
can_import = False

tf2onnx/convert.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,9 @@ def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_
332332
model: the tf.keras model we want to convert
333333
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
334334
opset: the opset to be used for the ONNX model, default is the latest
335+
custom_ops: if a model contains ops not recognized by onnx runtime,
336+
you can tag these ops with a custom op domain so that the
337+
runtime can still open the model. Type is a dictionary `{op name: domain}`.
335338
target: list of workarounds applied to help certain platforms
336339
custom_op_handlers: dictionary of custom ops handlers
337340
custom_rewriter: list of custom graph rewriters
@@ -417,6 +420,9 @@ def from_function(function, input_signature=None, opset=None, custom_ops=None, c
417420
function: the tf.function we want to convert
418421
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
419422
opset: the opset to be used for the ONNX model, default is the latest
423+
custom_ops: if a model contains ops not recognized by onnx runtime,
424+
you can tag these ops with a custom op domain so that the
425+
runtime can still open the model. Type is a dictionary `{op name: domain}`.
420426
target: list of workarounds applied to help certain platforms
421427
custom_op_handlers: dictionary of custom ops handlers
422428
custom_rewriter: list of custom graph rewriters
@@ -478,6 +484,9 @@ def from_graph_def(graph_def, name=None, input_names=None, output_names=None, op
478484
output_names: list of output names
479485
name: A name for the graph
480486
opset: the opset to be used for the ONNX model, default is the latest
487+
custom_ops: if a model contains ops not recognized by onnx runtime,
488+
you can tag these ops with a custom op domain so that the
489+
runtime can still open the model. Type is a dictionary `{op name: domain}`.
481490
target: list of workarounds applied to help certain platforms
482491
custom_op_handlers: dictionary of custom ops handlers
483492
custom_rewriter: list of custom graph rewriters

0 commit comments

Comments
 (0)