|
14 | 14 | from backend_test_base import Tf2OnnxBackendTestBase
|
15 | 15 | from common import unittest_main, group_nodes_by_type
|
16 | 16 |
|
17 |
| - |
18 | 17 | # pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
|
19 | 18 |
|
20 | 19 | class OptimizerTests(Tf2OnnxBackendTestBase):
|
@@ -423,5 +422,69 @@ def test_duplicated_need_multiple_run(self):
|
423 | 422 | op_type="Log", remaining_op_num=3)
|
424 | 423 | # Merge Duplicated Nodes Optimizer Tests End
|
425 | 424 |
|
| 425 | + # Const Fold Optimizer Tests Start |
| 426 | + |
| 427 | + def test_const_fold_trans_with_const1(self): |
| 428 | + shape = (6, 6) |
| 429 | + const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape, |
| 430 | + vals=np.random.randn(*shape).flatten().astype(np.float32)) |
| 431 | + node1 = helper.make_node("Constant", [], ["const"], value=const_tensor) |
| 432 | + node2 = helper.make_node("Transpose", ["const"], ["value1"]) |
| 433 | + node3 = helper.make_node("Add", ["value1", "X"], ["res"]) |
| 434 | + |
| 435 | + graph = helper.make_graph( |
| 436 | + [node1, node2, node3], |
| 437 | + "test_const_fold_trans_with_const1", |
| 438 | + [helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)], |
| 439 | + [helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)], |
| 440 | + ) |
| 441 | + |
| 442 | + model_proto = helper.make_model(graph, producer_name="onnx-tests") |
| 443 | + self.run_transpose_compare(["res"], {"X": np.random.randn(*shape).astype(np.float32)}, |
| 444 | + model_proto, remaining_transpose_num=0) |
| 445 | + |
| 446 | + def test_const_fold_trans_with_const2(self): |
| 447 | + # need multiple optimization run |
| 448 | + shape = (6, 6) |
| 449 | + const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape, |
| 450 | + vals=np.random.randn(*shape).flatten().astype(np.float32)) |
| 451 | + node1 = helper.make_node("Constant", [], ["const"], value=const_tensor) |
| 452 | + node2 = helper.make_node("Transpose", ["const"], ["value1"]) |
| 453 | + node3 = helper.make_node("Transpose", ["value1"], ["value2"]) |
| 454 | + node4 = helper.make_node("Add", ["value2", "X"], ["res"]) |
| 455 | + |
| 456 | + graph = helper.make_graph( |
| 457 | + [node1, node2, node3, node4], |
| 458 | + "test_const_fold_trans_with_const2", |
| 459 | + [helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)], |
| 460 | + [helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)], |
| 461 | + ) |
| 462 | + |
| 463 | + model_proto = helper.make_model(graph, producer_name="onnx-tests") |
| 464 | + self.run_transpose_compare(["res"], {"X": np.random.randn(*shape).astype(np.float32)}, |
| 465 | + model_proto, remaining_transpose_num=0) |
| 466 | + |
| 467 | + def test_const_fold_node_is_output(self): |
| 468 | + # need multiple optimization run |
| 469 | + shape = (6, 6) |
| 470 | + const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape, |
| 471 | + vals=np.random.randn(*shape).flatten().astype(np.float32)) |
| 472 | + node1 = helper.make_node("Constant", [], ["const"], value=const_tensor) |
| 473 | + node2 = helper.make_node("Transpose", ["const"], ["value1"]) |
| 474 | + node3 = helper.make_node("Transpose", ["value1"], ["res"]) |
| 475 | + |
| 476 | + graph = helper.make_graph( |
| 477 | + [node1, node2, node3], |
| 478 | + "test_const_fold_node_is_output", |
| 479 | + [], |
| 480 | + [helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)], |
| 481 | + ) |
| 482 | + |
| 483 | + model_proto = helper.make_model(graph, producer_name="onnx-tests") |
| 484 | + self.run_transpose_compare(["res"], {}, |
| 485 | + model_proto, remaining_transpose_num=0) |
| 486 | + # Const Fold Optimizer Tests End |
| 487 | + |
| 488 | + |
426 | 489 | if __name__ == "__main__":
|
427 | 490 | unittest_main()
|
0 commit comments