Skip to content

Commit 3bd3081

Browse files
committed
Remove usage of numpy bool aliases for builtins
Signed-off-by: Deyu Huang <[email protected]>
1 parent 0bfdf63 commit 3bd3081

File tree

8 files changed

+35
-35
lines changed

8 files changed

+35
-35
lines changed

tests/test_backend.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import os
77
import unittest
8-
from distutils.version import LooseVersion
8+
from packaging.version import Version
99
from itertools import product
1010

1111
import numpy as np
@@ -72,7 +72,7 @@
7272
matrix_diag_part = tf.compat.v1.matrix_diag_part
7373
fake_quant_with_min_max_args = tf.quantization.fake_quant_with_min_max_args
7474
fake_quant_with_min_max_vars = tf.quantization.fake_quant_with_min_max_vars
75-
elif LooseVersion(tf.__version__) >= "1.13":
75+
elif Version(tf.__version__) >= Version("1.13"):
7676
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
7777
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
7878
multinomial = tf.compat.v1.random.multinomial
@@ -86,7 +86,7 @@
8686
quantize_and_dequantize = tf.compat.v1.quantization.quantize_and_dequantize
8787
resize_nearest_neighbor = tf.compat.v1.image.resize_nearest_neighbor
8888
resize_bilinear = tf.compat.v1.image.resize_bilinear
89-
if LooseVersion(tf.__version__) >= "1.14":
89+
if Version(tf.__version__) >= Version("1.14"):
9090
resize_bilinear_v2 = tf.compat.v2.image.resize
9191
is_nan = tf.math.is_nan
9292
is_inf = tf.math.is_inf
@@ -1320,8 +1320,8 @@ def func(x1):
13201320

13211321
@check_onnxruntime_incompatibility("Add")
13221322
def test_logicaland(self):
1323-
x_val1 = np.array([1, 0, 1, 1], dtype=np.bool).reshape((2, 2))
1324-
x_val2 = np.array([0, 1, 1, 1], dtype=np.bool).reshape((2, 2))
1323+
x_val1 = np.array([1, 0, 1, 1], dtype=bool).reshape((2, 2))
1324+
x_val2 = np.array([0, 1, 1, 1], dtype=bool).reshape((2, 2))
13251325
def func(x1, x2):
13261326
mi = tf.logical_and(x1, x2)
13271327
return tf.identity(mi, name=_TFOUTPUT)
@@ -3505,9 +3505,9 @@ def func(x):
35053505
def test_where_bool(self):
35063506
x_val = np.array([1, 2, -3, 4, -5], dtype=np.float32)
35073507
true_result = np.array([True, False, True, False, True],
3508-
dtype=np.bool)
3508+
dtype=bool)
35093509
false_result = np.array([False, True, False, True, True],
3510-
dtype=np.bool)
3510+
dtype=bool)
35113511
def func(x):
35123512
picks = tf.where(x > -1, true_result, false_result)
35133513
return tf.identity(picks, name=_TFOUTPUT)
@@ -3770,36 +3770,36 @@ def func(input_1, input_2):
37703770
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2}, rtol=1e-4)
37713771

37723772
def test_logical_not(self):
3773-
input_val = np.random.randint(0, 2, (10, 20)).astype(np.bool)
3773+
input_val = np.random.randint(0, 2, (10, 20)).astype(bool)
37743774
def func(x):
37753775
res = tf.logical_not(x)
37763776
return tf.identity(res, name=_TFOUTPUT)
37773777
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
37783778

37793779
def test_reduce_all(self):
3780-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3780+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
37813781
def func(x):
37823782
res = tf.reduce_all(input_tensor=x, keepdims=False)
37833783
res1 = tf.reduce_all(input_tensor=x, axis=[0], keepdims=False)
37843784
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
37853785
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
37863786

3787-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3787+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
37883788
def func(input_x):
37893789
res = tf.reduce_all(input_tensor=input_x, keepdims=True)
37903790
res1 = tf.reduce_all(input_tensor=input_x, axis=[0], keepdims=True)
37913791
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
37923792
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
37933793

37943794
def test_reduce_any(self):
3795-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3795+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
37963796
def func(x):
37973797
res = tf.reduce_any(input_tensor=x, keepdims=False)
37983798
res1 = tf.reduce_any(input_tensor=x, axis=[0], keepdims=False)
37993799
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
38003800
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
38013801

3802-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3802+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38033803
def func(x):
38043804
res = tf.reduce_any(input_tensor=x, keepdims=True)
38053805
res1 = tf.reduce_any(input_tensor=x, axis=[0], keepdims=True)
@@ -3808,14 +3808,14 @@ def func(x):
38083808

38093809
@check_opset_min_version(11, "ReduceMin")
38103810
def test_reduce_all_negative_axis(self):
3811-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3811+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38123812
def func(x):
38133813
res = tf.reduce_all(input_tensor=x, keepdims=False)
38143814
res1 = tf.reduce_all(input_tensor=x, axis=[-1], keepdims=False)
38153815
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
38163816
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
38173817

3818-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3818+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38193819
def func(input_x):
38203820
res = tf.reduce_all(input_tensor=input_x, keepdims=True)
38213821
res1 = tf.reduce_all(input_tensor=input_x, axis=[-1], keepdims=True)
@@ -3824,14 +3824,14 @@ def func(input_x):
38243824

38253825
@check_opset_min_version(11, "ReduceSum")
38263826
def test_reduce_any_negative_axis(self):
3827-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3827+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38283828
def func(x):
38293829
res = tf.reduce_any(input_tensor=x, keepdims=False)
38303830
res1 = tf.reduce_any(input_tensor=x, axis=[-1], keepdims=False)
38313831
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
38323832
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
38333833

3834-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3834+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38353835
def func(x):
38363836
res = tf.reduce_any(input_tensor=x, keepdims=True)
38373837
res1 = tf.reduce_any(input_tensor=x, axis=[-1], keepdims=True)
@@ -3841,15 +3841,15 @@ def func(x):
38413841
@check_opset_min_version(11, "ReduceSum")
38423842
@check_tf_min_version("1.15")
38433843
def test_reduce_any_empty_axis(self):
3844-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3844+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38453845
def func(x):
38463846
res = tf.reduce_any(input_tensor=x, keepdims=False)
38473847
res1 = tf.reduce_any(input_tensor=x, axis=[], keepdims=False)
38483848
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
38493849
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
38503850

38513851
def test_reduce_all_scalar_axis(self):
3852-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3852+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38533853
def func(x):
38543854
res = tf.reduce_all(input_tensor=x, keepdims=False)
38553855
res1 = tf.reduce_all(input_tensor=x, axis=0, keepdims=False)
@@ -3859,7 +3859,7 @@ def func(x):
38593859
@check_opset_min_version(13, "ReduceSum")
38603860
@check_tf_min_version("1.15")
38613861
def test_reduce_any_nonconst_axis(self):
3862-
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
3862+
input_val = np.random.randint(0, 2, (2, 20)).astype(bool)
38633863
y_val = np.array([1], np.int32)
38643864
def func(x, y):
38653865
res = tf.reduce_any(input_tensor=x, axis=y, keepdims=False)

tests/test_onnx_shape_inference.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ def test_if(self):
353353
sub = else_subgraph.make_node("Sub", [INPUT1, INPUT3])
354354
else_subgraph.add_graph_output(sub.output[0])
355355

356-
cond = graph.make_const("cond", np.array(True, dtype=np.bool))
356+
cond = graph.make_const("cond", np.array(True, dtype=bool))
357357
branches = {"then_branch": then_subgraph, "else_branch": else_subgraph}
358358
if_node = graph.make_node("If", [cond.output[0]], branches=branches)
359359

@@ -381,7 +381,7 @@ def test_loop(self):
381381
subgraph.add_graph_output(out.output[0])
382382

383383
max_iter = graph.make_const("max_iter", np.array([10], dtype=np.int64))
384-
cond_const = graph.make_const("cond_const", np.array([True], dtype=np.bool))
384+
cond_const = graph.make_const("cond_const", np.array([True], dtype=bool))
385385
branches = {"body": subgraph}
386386
loop = graph.make_node("Loop", [max_iter.output[0], cond_const.output[0], INPUT1],
387387
output_count=2, branches=branches)

tests/test_optimizers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -988,7 +988,7 @@ def _define_loop_graph(external_inputs):
988988

989989
def _make_loop(external_inputs, outputs):
990990
trip_cnt = self._make_onnx_const(np.array(10, dtype=np.int64), "trip_cnt")
991-
cond = self._make_onnx_const(np.array(True, dtype=np.bool), "cond")
991+
cond = self._make_onnx_const(np.array(True, dtype=bool), "cond")
992992
sub_graph = _define_loop_graph(external_inputs)
993993
loop_node = helper.make_node("Loop", ["trip_cnt", "cond", "cond"], outputs,
994994
name="loop", body=sub_graph)
@@ -1779,7 +1779,7 @@ def test_identity_in_subgraph_non_graph_output(self):
17791779
),
17801780
)
17811781

1782-
cond_value = np.array(True, dtype=np.bool)
1782+
cond_value = np.array(True, dtype=bool)
17831783
node3 = helper.make_node(
17841784
'Constant',
17851785
inputs=[],
@@ -1788,7 +1788,7 @@ def test_identity_in_subgraph_non_graph_output(self):
17881788
name='cond_value',
17891789
data_type=TensorProto.BOOL,
17901790
dims=iter_num_value.shape,
1791-
vals=cond_value.flatten().astype(np.bool).tolist(),
1791+
vals=cond_value.flatten().astype(bool).tolist(),
17921792
),
17931793
)
17941794

tests/test_tfjs_runner.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ class TestTfjsRunner(unittest.TestCase):
1717
def test_tfjs_runner(self):
1818
float_array = np.array([[1.1, 2.2], [3.3, 4.4]], np.float32)
1919
int_array = np.array([[1, 2], [3, 4]], np.int32)
20-
bool_array = np.array([[True, False], [True, True]], np.bool)
20+
bool_array = np.array([[True, False], [True, True]], bool)
2121
string_array = np.array([['Hello world', ''], ['π', 'Tensor']], np.str)
2222
complex_array = np.array([[1 + 0.1j, 2 + 0.2j], [3 + 0.3j, 4 + 0.4j]], np.complex64)
2323

tf2onnx/custom_opsets/string_ops.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def version_1(cls, ctx, node, **kwargs):
3030
del node.attr[a]
3131
unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({'data': node.input[1], 'axes': [0]}, return_node=True)
3232

33-
skip_empty_const = ctx.make_const(utils.make_name('skip_empty_const'), np.array([skip_empty], np.bool))
33+
skip_empty_const = ctx.make_const(utils.make_name('skip_empty_const'), np.array([skip_empty], bool))
3434
ctx.replace_inputs(node, [node.input[0], unsqueeze_node.output[0], skip_empty_const.output[0]])
3535

3636
@tf_op("StringToHashBucketFast", domain=constants.CONTRIB_OPS_DOMAIN)

tf2onnx/onnx_opset/nn.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ def convert_symmetric_pads(cls, ctx, node):
853853
output = node.output[0]
854854
shape = ctx.make_node("Shape", [output]).output[0]
855855
dims = ctx.make_node("Split", [shape], output_count=rank).output
856-
two_false = ctx.make_const(utils.make_name("two_false"), np.array([False, False], np.bool)).output[0]
856+
two_false = ctx.make_const(utils.make_name("two_false"), np.array([False, False], bool)).output[0]
857857
inv_second = ctx.make_const(utils.make_name("inv_second"), np.array([1, -1], np.int64)).output[0]
858858
dec_second = ctx.make_const(utils.make_name("dec_second"), np.array([0, 1], np.int64)).output[0]
859859
for a in non_zero_axes:
@@ -1325,7 +1325,7 @@ def any_version_after11(cls, opset, ctx, node, **kwargs):
13251325
g.add_graph_output(cond_out_name, TensorProto.BOOL, [])
13261326
g.add_graph_output(squeeze_x.output[0], ctx.get_dtype(node.input[0]), [-1, -1, -1])
13271327
trip_node = ctx.make_node("Size", [box_ind])
1328-
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool))
1328+
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=bool))
13291329
ctx.remove_node(node.name)
13301330
branches = {"body": g}
13311331
inner_loop = ctx.make_node("Loop", [trip_node.output[0], cond_const.output[0]], name=node.name,
@@ -1638,7 +1638,7 @@ def version_7(cls, ctx, node, **kwargs):
16381638
# 2: "loop" to generate mask matrix: generate col or row of matrix one by one
16391639
g = ctx.create_new_graph_with_same_config()
16401640
node_name = utils.make_name("const_zero_bool")
1641-
const_zero_bool = g.make_const(name=node_name, np_val=np.array([[0]]).astype(np.bool))
1641+
const_zero_bool = g.make_const(name=node_name, np_val=np.array([[0]]).astype(bool))
16421642
g.set_dtype(const_zero_bool.output[0], onnx_pb.TensorProto.BOOL)
16431643

16441644
g.add_graph_input("trip", onnx_pb.TensorProto.INT64, [])
@@ -1668,7 +1668,7 @@ def version_7(cls, ctx, node, **kwargs):
16681668
line_num = ctx.make_node(op_type="Gather", inputs=[shape.output[0], col_or_row_num_index.output[0]])
16691669
trip_cnt = line_num.output[0]
16701670
node_name = utils.make_name("true")
1671-
cond = ctx.make_const(name=node_name, np_val=np.array(1).astype(np.bool))
1671+
cond = ctx.make_const(name=node_name, np_val=np.array(1).astype(bool))
16721672
col_init = one_line.output[0]
16731673

16741674
branches = {"body": g}

tf2onnx/onnx_opset/tensor.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -497,7 +497,7 @@ def _make_gathernd_inner_loop(ctx, params, index, dtype):
497497
# gather_res = gather(gather_cur, index[i])
498498
scope_name = utils.make_name("gathernd_inner_loop")
499499
trip_node = ctx.make_node("Size", [index.output[0]])
500-
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool))
500+
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=bool))
501501
trip_name = utils.make_name("i")
502502
cond_name = utils.make_name("cond")
503503
cond_out_name = utils.make_name("cond_out")
@@ -548,7 +548,7 @@ def make_gathernd(ctx, params, indices, output, scope_name, t_params, shapes, dt
548548

549549
# outter loop for each index
550550
# for (int i=0; i<outter_shape; i++) inner_loop(params, flatten_indices[i])
551-
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool))
551+
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=bool))
552552
ctx.make_const(utils.make_name("dummy"), np.ones((), dtype=np.int64))
553553

554554
# body graph creation
@@ -2830,7 +2830,7 @@ def cum_prod_of_vector(vector):
28302830
shape = ctx.get_shape(vector)
28312831
rank = shape[0] if shape is not None else -1
28322832
if rank != -1:
2833-
lower_tri = np.tri(rank, rank, dtype=np.bool)
2833+
lower_tri = np.tri(rank, rank, dtype=bool)
28342834
lower_triangular_bool = ctx.make_const(utils.make_name("lower_tri_const"), lower_tri).output[0]
28352835
else:
28362836
rank = ctx.make_node("Shape", [vector]).output[0]
@@ -3306,7 +3306,7 @@ def normalize():
33063306
body_graph.add_graph_output(padded_output.output[0], ctx.get_dtype(node.input[0]), per_loop_shape)
33073307
body_graph.add_graph_output(gap_k.output[0], TensorProto.INT64, [-1])
33083308
# make loop
3309-
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool))
3309+
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=bool))
33103310
branches = {"body": body_graph}
33113311
main_loop = ctx.make_node('Loop', [total_k.output[0], cond_const.output[0]], output_count=2, branches=branches)
33123312
# reshape output

tf2onnx/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
onnx_pb.TensorProto.UINT64: np.uint64,
4343
onnx_pb.TensorProto.INT64: np.int64,
4444
onnx_pb.TensorProto.UINT64: np.uint64,
45-
onnx_pb.TensorProto.BOOL: np.bool,
45+
onnx_pb.TensorProto.BOOL: bool,
4646
onnx_pb.TensorProto.COMPLEX64: np.complex64,
4747
onnx_pb.TensorProto.COMPLEX128: np.complex128,
4848
onnx_pb.TensorProto.STRING: object,

0 commit comments

Comments
 (0)