Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions CHANGELOGS.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
Change Logs
===========

0.6.3
+++++

* :pr:`140`: improves command line find

0.6.2
+++++

Expand Down
7 changes: 1 addition & 6 deletions _doc/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -206,19 +206,14 @@ The function replaces dynamic dimensions defined as strings by
Older versions
++++++++++++++

* `0.6.3 <../v0.6.3/index.html>`_
* `0.6.2 <../v0.6.2/index.html>`_
* `0.6.1 <../v0.6.1/index.html>`_
* `0.6.0 <../v0.6.0/index.html>`_
* `0.5.0 <../v0.5.0/index.html>`_
* `0.4.4 <../v0.4.4/index.html>`_
* `0.4.3 <../v0.4.3/index.html>`_
* `0.4.2 <../v0.4.2/index.html>`_
* `0.4.1 <../v0.4.1/index.html>`_
* `0.4.0 <../v0.4.0/index.html>`_
* `0.3.0 <../v0.3.0/index.html>`_
* `0.2.2 <../v0.2.2/index.html>`_
* `0.2.1 <../v0.2.1/index.html>`_
* `0.2.0 <../v0.2.0/index.html>`_
* `0.1.0 <../v0.1.0/index.html>`_

The documentation was updated on:
Expand Down
216 changes: 216 additions & 0 deletions _unittests/ut_helpers/test_onnx_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,13 @@
iterator_initializer_constant,
from_array_extended,
tensor_statistics,
enumerate_results,
shadowing_names,
)


TFLOAT = TensorProto.FLOAT
TINT64 = TensorProto.INT64


class TestOnnxHelper(ExtTestCase):
Expand Down Expand Up @@ -251,6 +254,219 @@ def test_statistics(self):
stat = tensor_statistics(rnd)
self.assertEqual(stat["stype"], "FLOAT")

@hide_stdout()
def test_enumerate_results(self):
model = oh.make_model(
oh.make_graph(
[
oh.make_node("Unsqueeze", ["X", "zero"], ["xu1"]),
oh.make_node("Unsqueeze", ["xu1", "un"], ["xu2"]),
oh.make_node("Reshape", ["xu2", "shape1"], ["xm1"]),
oh.make_node("Reshape", ["Y", "shape2"], ["xm2c"]),
oh.make_node("Cast", ["xm2c"], ["xm2"], to=1),
oh.make_node("MatMul", ["xm1", "xm2"], ["xm"]),
oh.make_node("Reshape", ["xm", "shape3"], ["Z"]),
],
"dummy",
[oh.make_tensor_value_info("X", TFLOAT, [320, 1280])],
[oh.make_tensor_value_info("Z", TFLOAT, [3, 5, 320, 640])],
[
onh.from_array(
np.random.rand(3, 5, 1280, 640).astype(np.float32), name="Y"
),
onh.from_array(np.array([0], dtype=np.int64), name="zero"),
onh.from_array(np.array([1], dtype=np.int64), name="un"),
onh.from_array(np.array([1, 320, 1280], dtype=np.int64), name="shape1"),
onh.from_array(np.array([15, 1280, 640], dtype=np.int64), name="shape2"),
onh.from_array(np.array([3, 5, 320, 640], dtype=np.int64), name="shape3"),
],
),
opset_imports=[oh.make_opsetid("", 18)],
ir_version=9,
)
res = list(enumerate_results(model, "xu1", verbose=2))
ress = ";".join(str(r) for r in res)
self.assertEqual(
"<< xu1 - (0:Unsqueeze:) :: Unsqueeze(X, zero) -> xu1;"
">> xu1 - (1:Unsqueeze:) :: Unsqueeze(xu1, un) -> xu2",
ress,
)
self.assertEqual(2, len(list(enumerate_results(model, "shape1", verbose=2))))
self.assertEqual(2, len(list(enumerate_results(model, "X", verbose=2))))
self.assertEqual(2, len(list(enumerate_results(model, "Z", verbose=2))))

def test_enumerate_results_loop(self):
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)

model = oh.make_model(
graph=oh.make_graph(
name="loop_test",
inputs=[
oh.make_tensor_value_info("trip_count", TINT64, ["a"]),
oh.make_tensor_value_info("cond", TensorProto.BOOL, [1]),
],
outputs=[oh.make_tensor_value_info("res", TFLOAT, [])],
nodes=[
oh.make_node("SequenceEmpty", [], ["seq_empty"], dtype=TFLOAT),
oh.make_node(
"Loop",
inputs=["trip_count", "cond", "seq_empty"],
outputs=["seq_res"],
body=oh.make_graph(
[
oh.make_node(
"Identity", inputs=["cond_in"], outputs=["cond_out"]
),
oh.make_node(
"Constant",
inputs=[],
outputs=["x"],
value=oh.make_tensor(
name="const_tensor_x",
data_type=TFLOAT,
dims=x.shape,
vals=x.flatten().astype(float),
),
),
oh.make_node(
"Constant",
inputs=[],
outputs=["one"],
value=oh.make_tensor(
name="const_tensor_one",
data_type=TINT64,
dims=(),
vals=[1],
),
),
oh.make_node(
"Constant",
inputs=[],
outputs=["slice_start"],
value=oh.make_tensor(
name="const_tensor_zero",
data_type=TINT64,
dims=(1,),
vals=[0],
),
),
oh.make_node(
"Add", inputs=["iter_count", "one"], outputs=["end"]
),
oh.make_node(
"Constant",
inputs=[],
outputs=["axes"],
value=oh.make_tensor(
name="const_tensor_axes",
data_type=TINT64,
dims=(1,),
vals=[0],
),
),
oh.make_node(
"Unsqueeze", inputs=["end", "axes"], outputs=["slice_end"]
),
oh.make_node(
"Slice",
inputs=["x", "slice_start", "slice_end"],
outputs=["slice_out"],
),
oh.make_node(
"SequenceInsert",
inputs=["seq_in", "slice_out"],
outputs=["seq_out"],
),
],
"loop_body",
[
oh.make_tensor_value_info("iter_count", TINT64, []),
oh.make_tensor_value_info("cond_in", TensorProto.BOOL, []),
oh.make_tensor_sequence_value_info("seq_in", TFLOAT, None),
],
[
oh.make_tensor_value_info("cond_out", TensorProto.BOOL, []),
oh.make_tensor_sequence_value_info("seq_out", TFLOAT, None),
],
),
),
oh.make_node(
"ConcatFromSequence",
inputs=["seq_res"],
outputs=["res"],
axis=0,
new_axis=0,
),
],
)
)
res = list(enumerate_results(model, "slice_start", verbose=2))
self.assertEqual(len(res), 2)

def test_shadowing_names(self):
def _mkv_(name):
value_info_proto = ValueInfoProto()
value_info_proto.name = name
return value_info_proto

model = oh.make_model(
oh.make_graph(
[
oh.make_node("ReduceSum", ["X"], ["Xred"]),
oh.make_node("Add", ["X", "two"], ["X0"]),
oh.make_node("Add", ["X0", "zero"], ["X00"]),
oh.make_node("CastLike", ["one", "Xred"], ["one_c"]),
oh.make_node("Greater", ["Xred", "one_c"], ["cond"]),
oh.make_node("Identity", ["two"], ["three"]),
oh.make_node(
"If",
["cond"],
["Z_c"],
then_branch=oh.make_graph(
[
# shadowing
oh.make_node("Constant", [], ["three"], value_floats=[2.1]),
oh.make_node("Add", ["X00", "three"], ["Y"]),
],
"then",
[],
[_mkv_("Y")],
),
else_branch=oh.make_graph(
[
# not shadowing
oh.make_node("Sub", ["X0", "three"], ["Y"]),
],
"else",
[],
[_mkv_("Y")],
),
),
oh.make_node("CastLike", ["Z_c", "X"], ["Z"]),
],
"test",
[
oh.make_tensor_value_info("X", TensorProto.FLOAT, ["N"]),
oh.make_tensor_value_info("one", TensorProto.FLOAT, ["N"]),
],
[oh.make_tensor_value_info("Z", TensorProto.UNDEFINED, ["N"])],
[
onh.from_array(np.array([0], dtype=np.float32), name="zero"),
onh.from_array(np.array([2], dtype=np.float32), name="two"),
],
),
opset_imports=[oh.make_operatorsetid("", 18)],
ir_version=10,
)
self.assertEqual(
(
{"three"},
set(),
{"cond", "Z", "X0", "Z_c", "three", "one_c", "Xred", "X00", "Y"},
),
shadowing_names(model),
)


if __name__ == "__main__":
unittest.main(verbosity=2)
1 change: 0 additions & 1 deletion _unittests/ut_reference/test_ort_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,6 @@ def test_init_torch_bfloat16(self):

@hide_stdout()
def test_if(self):

def _mkv_(name):
value_info_proto = onnx.ValueInfoProto()
value_info_proto.name = name
Expand Down
2 changes: 1 addition & 1 deletion _unittests/ut_reference/test_torch_onnx_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -1468,7 +1468,7 @@ def run(self, x, scale, bias=None):
)
expected = torch_sess.run(None, feeds)
got = torch_sess_custom.run(None, feeds)
self.assertEqualAny(expected, got)
self.assertEqualAny(expected, got, atol=1e-3)
self.assertEqual([1], LayerNormalizationOrt._shared)


Expand Down
7 changes: 7 additions & 0 deletions _unittests/ut_xrun_doc/test_command_lines_exe.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,13 @@ def test_parser_find(self):
text = st.getvalue()
self.assertIsInstance(text, str)

def test_parser_find_v2(self):
st = StringIO()
with redirect_stdout(st):
main(["find", "-i", self.dummy_path, "-n", "node_Add_188", "--v2"])
text = st.getvalue()
self.assertIsInstance(text, str)

def test_parser_config(self):
st = StringIO()
with redirect_stdout(st):
Expand Down
2 changes: 1 addition & 1 deletion onnx_diagnostic/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@
Functions, classes to dig into a model when this one is right, slow, wrong...
"""

__version__ = "0.6.2"
__version__ = "0.6.3"
__author__ = "Xavier Dupré"
27 changes: 24 additions & 3 deletions onnx_diagnostic/_command_lines_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,24 +191,45 @@ def get_parser_find() -> ArgumentParser:
"--names",
type=str,
required=False,
help="names to look at comma separated values",
help="names to look at comma separated values, if 'SHADOW', "
"search for shadowing names",
)
parser.add_argument(
"-v",
"--verbose",
default=0,
type=int,
required=False,
help="verbosity",
)
parser.add_argument(
"--v2",
default=False,
action=BooleanOptionalAction,
help="use enumerate_results instead of onnx_find",
)
return parser


def _cmd_find(argv: List[Any]):
from .helpers.onnx_helper import onnx_find
from .helpers.onnx_helper import onnx_find, enumerate_results, shadowing_names

parser = get_parser_find()
args = parser.parse_args(argv[1:])
onnx_find(args.input, verbose=args.verbose, watch=set(args.names.split(",")))
if args.names == "SHADOW":
onx = onnx.load(args.input, load_external_data=False)
s, ps = shadowing_names(onx)[:2]
print(f"shadowing names: {s}")
print(f"post-shadowing names: {ps}")
elif args.v2:
onx = onnx.load(args.input, load_external_data=False)
res = list(
enumerate_results(onx, name=set(args.names.split(",")), verbose=args.verbose)
)
if not args.verbose:
print("\n".join(map(str, res)))
else:
onnx_find(args.input, verbose=args.verbose, watch=set(args.names.split(",")))


def get_parser_config() -> ArgumentParser:
Expand Down
Loading
Loading