Skip to content

Commit 3e490a8

Browse files
authored
Merge pull request #452 from mindest/dev_ci_build
fix ci build errors in tf 1.13
2 parents 0de7d19 + 73a9d34 commit 3e490a8

File tree

4 files changed

+58
-10
lines changed

4 files changed

+58
-10
lines changed

tests/common.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,13 @@ def _append_message(reason, message):
124124
return reason
125125

126126

127+
def check_tf_max_version(max_accepted_version, message=""):
128+
""" Skip if tf_version > max_required_version """
129+
config = get_test_config()
130+
reason = _append_message("conversion requires tf <= {}".format(max_accepted_version), message)
131+
return unittest.skipIf(config.tf_version > LooseVersion(max_accepted_version), reason)
132+
133+
127134
def check_tf_min_version(min_required_version, message=""):
128135
""" Skip if tf_version < min_required_version """
129136
config = get_test_config()

tests/test_backend.py

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -370,6 +370,22 @@ def test_nn_dropout(self):
370370
# here we set it False to test PlaceholderWithDefault bug: https://github.com/onnx/tensorflow-onnx/pull/446
371371
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, constant_fold=False)
372372

373+
@check_tf_min_version("1.13")
374+
def test_nn_dropout_with_rate(self):
375+
rate = tf.placeholder_with_default(0., (), "rate")
376+
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
377+
# Define a scope for reusing the variables
378+
x = tf.placeholder(tf.float32, shape=x_val.shape, name="input_1")
379+
x_ = tf.identity(x)
380+
381+
fc1 = tf.nn.dropout(x_, rate=rate)
382+
383+
_ = tf.identity(fc1, name="output")
384+
feed_dict = {"input_1:0": x_val}
385+
input_names_with_port = ["input_1:0"]
386+
output_names_with_port = ["output:0"]
387+
self.run_test_case(feed_dict, input_names_with_port, output_names_with_port, constant_fold=False)
388+
373389
def test_conv2d_with_input_transpose(self):
374390
x_shape = [2, 32, 32, 3]
375391
kernel_shape = [3, 3, 3, 3]
@@ -784,13 +800,15 @@ def test_relu(self):
784800
@skip_caffe2_backend("fails on caffe2 with dim issue")
785801
@check_onnxruntime_incompatibility("Mul")
786802
def test_leaky_relu(self):
787-
for alpha in [0.1, -0.1, 1.0, -1.0, 10.0, -10.0]:
788-
x_val = 1000 * np.random.random_sample([1000, 100]).astype(np.float32)
789-
x = tf.placeholder(tf.float32, [None] * x_val.ndim, name=_TFINPUT)
790-
x_ = tf.nn.leaky_relu(x, alpha)
791-
_ = tf.identity(x_, name=_TFOUTPUT)
792-
self._run_test_case([_OUTPUT], {_INPUT: x_val})
793-
tf.reset_default_graph()
803+
x_types = [np.float32, np.int32, np.int64]
804+
for x_type in x_types:
805+
x_val = 1000 * np.random.random_sample([1000, 100]).astype(x_type)
806+
for alpha in [0.1, -0.1, 1.0, -1.0]:
807+
x = tf.placeholder(x_val.dtype, [None] * x_val.ndim, name=_TFINPUT)
808+
x_ = tf.nn.leaky_relu(x, alpha)
809+
_ = tf.identity(x_, name=_TFOUTPUT)
810+
self._run_test_case([_OUTPUT], {_INPUT: x_val})
811+
tf.reset_default_graph()
794812

795813
@check_onnxruntime_incompatibility("Elu")
796814
def test_elu(self):

tests/test_graph.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from tf2onnx.tfonnx import process_tf_graph
2525
from tf2onnx.handler import tf_op
2626

27-
from common import get_test_config, unittest_main
27+
from common import get_test_config, unittest_main, check_tf_min_version, check_tf_max_version
2828

2929

3030
# pylint: disable=missing-docstring,unused-argument,unused-variable
@@ -152,6 +152,7 @@ def test_randomnormal(self):
152152
'RandomNormal__2:0 -> output }'
153153
self.assertEqual(expected, actual)
154154

155+
@check_tf_max_version("1.12")
155156
def test_dropout(self):
156157
with tf.Session() as sess:
157158
x1 = tf.placeholder(tf.float32, [2, 3], name="input1")
@@ -170,6 +171,28 @@ def test_dropout(self):
170171
'Add:0 -> output1 output1:0 -> output2 output2:0 -> output }'
171172
self.assertEqual(expected, actual)
172173

174+
@check_tf_min_version("1.13")
175+
def test_dropout_2(self):
176+
with tf.Session() as sess:
177+
x1 = tf.placeholder(tf.float32, [2, 3], name="input1")
178+
x2 = tf.placeholder(tf.float32, [1, 3], name="input2")
179+
prop = tf.placeholder(tf.float32, name="prob")
180+
x_ = tf.add(x1, x2)
181+
x_ = tf.nn.dropout(x_, prop)
182+
x_ = tf.identity(x_, name="output1")
183+
x_ = tf.identity(x_, name="output2")
184+
_ = tf.identity(x_, name="output")
185+
g = process_tf_graph(sess.graph, opset=self.config.opset)
186+
actual = onnx_to_graphviz(g)
187+
expected = 'digraph { "dropout/sub/x" [op_type=Const] "sub/x" [op_type=Const] ' \
188+
'prob [op_type=Placeholder shape="[]"] sub [op_type=Sub] "dropout/sub" [op_type=Sub] ' \
189+
'input2 [op_type=Placeholder shape="[1, 3]"] input1 [op_type=Placeholder shape="[2, 3]"] ' \
190+
'Add [op_type=Add] output1 [op_type=Identity] output2 [op_type=Identity] ' \
191+
'output [op_type=Identity] "sub/x":0 -> sub prob:0 -> sub "dropout/sub/x":0 -> ' \
192+
'"dropout/sub" sub:0 -> "dropout/sub" input1:0 -> Add input2:0 -> Add Add:0 -> ' \
193+
'output1 output1:0 -> output2 output2:0 -> output }'
194+
self.assertEqual(expected, actual)
195+
173196
def test_add(self):
174197
with tf.Session() as sess:
175198
x1 = tf.placeholder(tf.float32, [2, 3], name="input1")

tf2onnx/onnx_opset/math.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ class RealDiv(common.BroadcastOp):
3232
pass
3333

3434

35-
@tf_op(["Abs", "Ceil", "Elu", "Exp", "Floor", "Log", "LogSoftmax", "Neg", "Relu", "Sigmoid", "Sqrt", "Tanh",
36-
"Softplus", "Softsign", "Reciprocal"])
35+
@tf_op(["Abs", "Ceil", "Elu", "Exp", "Floor", "LeakyRelu", "Log", "LogSoftmax", "Neg", "Relu", "Sigmoid", "Sqrt",
36+
"Tanh", "Softplus", "Softsign", "Reciprocal"])
3737
class DirectOp:
3838
@classmethod
3939
def version_4(cls, ctx, node, **kwargs):

0 commit comments

Comments
 (0)