Skip to content

Commit 200ceed

Browse files
Merge pull request #1243 from onnx/tom/opset13updates3
Set UT to test opset 13
2 parents 4b81055 + d31e72e commit 200ceed

File tree

6 files changed

+17
-7
lines changed

6 files changed

+17
-7
lines changed

ci_build/azure_pipelines/templates/job_generator.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,8 @@ parameters:
55
python_versions: ['3.7']
66
tf_versions: ['']
77
onnx_versions: ['']
8-
onnx_opsets: ['12', '11', '10', '9', '8', '7']
9-
onnx_backends: {onnxruntime: ['1.4.0']}
8+
onnx_opsets: ['13', '12', '11', '10', '9', '8', '7']
9+
onnx_backends: {onnxruntime: ['1.6.0']}
1010
job: {}
1111
run_setup: 'True'
1212
report_coverage: 'False'

ci_build/azure_pipelines/templates/unit_test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# Run unit test
22

33
parameters:
4-
onnx_opsets: ['12', '11', '10', '9', '8', '7']
4+
onnx_opsets: ['13', '12', '11', '10', '9', '8', '7']
55

66
steps:
77
- ${{ each onnx_opset in parameters.onnx_opsets }}:

tf2onnx/constants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,5 +42,5 @@
4242
# Mapping opset to IR version.
4343
# Note: opset 7 and opset 8 came out with IR3 but we need IR4 because of PlaceholderWithDefault
4444
OPSET_TO_IR_VERSION = {
45-
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7
45+
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7, 13: 7
4646
}

tf2onnx/graph.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1135,6 +1135,9 @@ def make_model(self, graph_doc, optimize=False, graph_name="tf2onnx", external_t
11351135
kwargs["opset_imports"] = opsets
11361136
model_proto = helper.make_model(graph, **kwargs)
11371137

1138+
utils.make_sure(self.opset in constants.OPSET_TO_IR_VERSION,
1139+
"Opset %s is not supported yet. Please use a lower opset" % self.opset)
1140+
11381141
# set the IR version based on opset
11391142
try:
11401143
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.opset, model_proto.ir_version)

tf2onnx/onnx_opset/reduction.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,11 @@ def version_13(cls, ctx, node, **kwargs):
6363
node.set_attr("noop_with_empty_axes", 1)
6464
if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64:
6565
ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64)
66+
input_shape = ctx.get_shape(node.input[1])
67+
input_rank = len(input_shape) if input_shape is not None else None
68+
if input_rank != 1:
69+
new_shape = ctx.make_const(utils.make_name("reshape_const"), np.array([-1], np.int64))
70+
ctx.insert_new_node_on_input(node, "Reshape", [node.input[1], new_shape.output[0]])
6671
else:
6772
cls.version_11(ctx, node, **kwargs)
6873

tf2onnx/onnx_opset/tensor.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1192,7 +1192,8 @@ def version_1(cls, ctx, node, **kwargs):
11921192
# for each output we need to squeeze axis
11931193
for n in node.output:
11941194
op_name = utils.make_name(node.name)
1195-
squeeze_node = ctx.insert_new_node_on_output("Squeeze", n, name=op_name, axes=[axis])
1195+
squeeze_node = GraphBuilder(ctx).make_squeeze({'data': n, 'axes': [axis]}, name=op_name, return_node=True)
1196+
ctx.insert_node_on_output(squeeze_node, n)
11961197
ctx.copy_shape(n, squeeze_node.output[0])
11971198
ctx.copy_dtype(n, squeeze_node.output[0])
11981199

@@ -1637,8 +1638,9 @@ def any_version(cls, opset, ctx, node, **kwargs):
16371638
# add valid_outputs count
16381639
output_idx = 2 if node.type in ["NonMaxSuppressionV5"] else 1
16391640
shape_op = ctx.make_node("Shape", inputs=[nms_output.output[0]])
1640-
reduce_op = ctx.make_node("ReduceSum", inputs=shape_op.output, attr={"axes": [0], "keepdims": 0})
1641-
ctx.make_node("Cast", inputs=[reduce_op.output[0]], attr={"to": onnx_pb.TensorProto.INT32},
1641+
reduce_op = GraphBuilder(ctx).make_reduce_sum(
1642+
{"data": shape_op.output[0], "axes": [0], "keepdims": 0, "noop_with_empty_axes": 1})
1643+
ctx.make_node("Cast", inputs=[reduce_op], attr={"to": onnx_pb.TensorProto.INT32},
16421644
outputs=[node.output[output_idx]], dtypes=dtypes[output_idx], shapes=shapes[output_idx],
16431645
op_name_scope=node.name)
16441646

0 commit comments

Comments
 (0)