|
10 | 10 | import unittest
|
11 | 11 | from itertools import product
|
12 | 12 |
|
| 13 | +import os |
13 | 14 | import numpy as np
|
14 | 15 | import tensorflow as tf
|
15 | 16 |
|
| 17 | +from tensorflow.python.ops import lookup_ops |
16 | 18 | from backend_test_base import Tf2OnnxBackendTestBase
|
17 | 19 | # pylint reports unused-wildcard-import which is false positive, __all__ is defined in common
|
18 | 20 | from common import * # pylint: disable=wildcard-import,unused-wildcard-import
|
@@ -1828,6 +1830,16 @@ def test_strided_slice_dynamic_7(self):
|
1828 | 1830 | _ = tf.identity(x_, name=_TFOUTPUT)
|
1829 | 1831 | self._run_test_case([_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
|
1830 | 1832 |
|
| 1833 | + @check_opset_min_version(10, "Slice") |
| 1834 | + def test_new_axis_mask(self): |
| 1835 | + x_val = np.arange(5*10*10*10*10*20*30).astype("float32").reshape((5, 10, 10, 10, 10, 20, 30)) |
| 1836 | + y_val = np.array(9, dtype=np.int32) |
| 1837 | + x = tf.placeholder(tf.float32, x_val.shape, name=_TFINPUT) |
| 1838 | + y = tf.placeholder(tf.int32, y_val.shape, name=_TFINPUT1) |
| 1839 | + x_ = x[tf.newaxis, 0:y, y::2, tf.newaxis, :, tf.newaxis, :y, tf.newaxis, ..., 9] |
| 1840 | + _ = tf.identity(x_, name=_TFOUTPUT) |
| 1841 | + self._run_test_case([_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}) |
| 1842 | + |
1831 | 1843 | @skip_caffe2_backend("fails with schema error")
|
1832 | 1844 | @check_opset_min_version(7, "batchnorm")
|
1833 | 1845 | def test_batchnorm(self):
|
@@ -2473,26 +2485,46 @@ def test_batch_to_spacend(self):
|
2473 | 2485 | self._run_test_case([_OUTPUT], {_INPUT: input_val})
|
2474 | 2486 |
|
2475 | 2487 | @check_opset_min_version(11, "BatchToSpaceND")
|
2476 |
| - def test_batch_to_spacend_non_const(self): |
2477 |
| - input_x_val = np.random.random_sample([40, 3, 5, 100]).astype(np.float32) # NHWC |
2478 |
| - block_shape_val = np.array([2, 2]).astype(np.int64) |
2479 |
| - crops_val = np.array([[1, 0], [2, 1]]).astype(np.int64) |
2480 |
| - input_x = tf.placeholder(dtype=tf.float32, shape=input_x_val.shape, name=_TFINPUT) |
2481 |
| - block_shape = tf.placeholder(dtype=tf.int64, shape=block_shape_val.shape, name=_TFINPUT1) |
2482 |
| - crops = tf.placeholder(dtype=tf.int64, shape=crops_val.shape, name=_TFINPUT2) |
2483 |
| - _ = tf.batch_to_space_nd(input_x, block_shape, crops, name=_TFOUTPUT) |
2484 |
| - self._run_test_case([_OUTPUT], {_INPUT: input_x_val, _INPUT1: block_shape_val, _INPUT2: crops_val}) |
| 2488 | + def test_batch_to_spacend_non_const_7d(self): |
| 2489 | + x_type, y_type, z_type = np.int64, np.int64, np.int64 |
| 2490 | + # test 3D upto 7D input tensors |
| 2491 | + for x_shape in [[12, 4, 4], [12, 4, 8, 3], [12, 4, 8, 3, 2], [12, 4, 8, 3, 2, 3], [12, 4, 8, 3, 2, 1, 3]]: |
| 2492 | + # test 1D upto 2D block shapes |
| 2493 | + for block_shape in [[2, 3], [2]]: |
| 2494 | + tf.reset_default_graph() |
| 2495 | + # crop 1 layer at end of each dim |
| 2496 | + crops = [[0, 1] for dim in block_shape] |
| 2497 | + y_val = np.array(block_shape).astype(y_type) |
| 2498 | + x_val = np.array([x + 1 for x in range(0, np.prod(x_shape))], dtype=x_type).reshape(x_shape) |
| 2499 | + z_val = np.array(crops).astype(z_type) |
| 2500 | + # x and z can be dynamic. |
| 2501 | + # y = block_shape cannot be dynamic without change to Transpose op spec |
| 2502 | + x = tf.placeholder(dtype=x_type, shape=x_val.shape, name=_TFINPUT) |
| 2503 | + y = tf.constant(dtype=y_type, value=y_val, shape=y_val.shape, name=_TFINPUT1) |
| 2504 | + z = tf.placeholder(dtype=z_type, shape=z_val.shape, name=_TFINPUT2) |
| 2505 | + _ = tf.batch_to_space_nd(x, y, z, name=_TFOUTPUT) |
| 2506 | + self._run_test_case([_OUTPUT], {_INPUT: x_val, _INPUT2: z_val}) |
2485 | 2507 |
|
2486 | 2508 | @check_opset_min_version(11, "SpaceToBatchND")
|
2487 |
| - def test_space_to_batchnd_non_const(self): |
2488 |
| - input_x_val = np.random.random_sample([40, 5, 7, 66]).astype(np.float32) # NHWC |
2489 |
| - block_size_val = np.array([2, 2]).astype(np.int64) |
2490 |
| - pad_val = np.array([[0, 1], [2, 1]]).astype(np.int64) |
2491 |
| - input_x = tf.placeholder(dtype=tf.float32, shape=input_x_val.shape, name=_TFINPUT) |
2492 |
| - block_size = tf.placeholder(dtype=tf.int64, shape=block_size_val.shape, name=_TFINPUT1) |
2493 |
| - pad = tf.placeholder(dtype=tf.int64, shape=pad_val.shape, name=_TFINPUT2) |
2494 |
| - _ = tf.space_to_batch_nd(input_x, block_size, pad, name=_TFOUTPUT) |
2495 |
| - self._run_test_case([_OUTPUT], {_INPUT: input_x_val, _INPUT1: block_size_val, _INPUT2: pad_val}) |
| 2509 | + def test_space_to_batchnd_non_const_7d(self): |
| 2510 | + x_type, y_type, z_type = np.int64, np.int64, np.int64 |
| 2511 | + # test 3D upto 7D input tensors |
| 2512 | + for x_shape in [[2, 4, 4], [1, 4, 8, 3], [1, 4, 8, 3, 2], [1, 4, 8, 3, 2, 3], [1, 4, 8, 3, 2, 1, 3]]: |
| 2513 | + # test 1D upto 2D block shapes |
| 2514 | + for block_shape in [[2], [2, 2]]: |
| 2515 | + tf.reset_default_graph() |
| 2516 | + # pad 1 layer at begin and end of each dim |
| 2517 | + pads = [[1, 1] for dim in block_shape] |
| 2518 | + y_val = np.array(block_shape).astype(y_type) |
| 2519 | + x_val = np.array([x + 1 for x in range(0, np.prod(x_shape))], dtype=x_type).reshape(x_shape) |
| 2520 | + z_val = np.array(pads).astype(z_type) |
| 2521 | + # x and z can be dynamic. |
| 2522 | + # y = block_shape cannot be dynamic without change to Transpose op spec |
| 2523 | + x = tf.placeholder(dtype=x_type, shape=x_val.shape, name=_TFINPUT) |
| 2524 | + y = tf.constant(dtype=y_type, value=y_val, shape=y_val.shape, name=_TFINPUT1) |
| 2525 | + z = tf.placeholder(dtype=z_type, shape=z_val.shape, name=_TFINPUT2) |
| 2526 | + _ = tf.space_to_batch_nd(x, y, z, name=_TFOUTPUT) |
| 2527 | + self._run_test_case([_OUTPUT], {_INPUT: x_val, _INPUT2: z_val}) |
2496 | 2528 |
|
2497 | 2529 | @check_opset_min_version(11, "CropAndResize")
|
2498 | 2530 | def test_crop_and_resize_linear(self):
|
@@ -2964,6 +2996,20 @@ def test_Conv2DBackpropInput_valid(self):
|
2964 | 2996 | name=_TFOUTPUT)
|
2965 | 2997 | self._run_test_case([_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
|
2966 | 2998 |
|
| 2999 | + @check_opset_min_version(8, "CategoryMapper") |
| 3000 | + def test_hashtable_lookup(self): |
| 3001 | + filnm = "vocab.tmp" |
| 3002 | + words = ["apple", "pear", "banana", "cherry", "grape"] |
| 3003 | + query = np.array(['cherry'], dtype=object) |
| 3004 | + with open(filnm, "w") as f: |
| 3005 | + for word in words: |
| 3006 | + f.write(word + "\n") |
| 3007 | + query_holder = tf.placeholder(tf.string, shape=[len(query)], name=_TFINPUT) |
| 3008 | + hash_table = lookup_ops.index_table_from_file(filnm) |
| 3009 | + lookup_results = hash_table.lookup(query_holder) |
| 3010 | + self._run_test_case([lookup_results.name], {_INPUT: query}) |
| 3011 | + os.remove(filnm) |
| 3012 | + |
2967 | 3013 |
|
2968 | 3014 | if __name__ == '__main__':
|
2969 | 3015 | unittest_main()
|
0 commit comments