diff --git a/help_utils/help_utils.py b/help_utils/help_utils.py index 050fc68..100bd37 100644 --- a/help_utils/help_utils.py +++ b/help_utils/help_utils.py @@ -167,4 +167,9 @@ def np_print(ary): result = tf.reshape(result, tf.shape(tensor)) result = tf.cast(result, tf.float32) sum_ = tf.reduce_sum(result) - tf.summary.scalar('print_s/{}'.format(tensor_name), sum_) + tf_major_ver = int(tf.__version__.split(".")[0]) + tf_minor_ver = int(tf.__version__.split(".")[1]) + if(tf_major_ver==0 and tf_minor_ver<11): + tf.scalar_summary('print_s/{}'.format(tensor_name), sum_) + else: + tf.summary.scalar('print_s/{}'.format(tensor_name), sum_) diff --git a/libs/box_utils/boxes_utils.py b/libs/box_utils/boxes_utils.py index fcba223..20ae251 100644 --- a/libs/box_utils/boxes_utils.py +++ b/libs/box_utils/boxes_utils.py @@ -7,6 +7,8 @@ import tensorflow as tf from libs.box_utils.coordinate_convert import forward_convert +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def clip_boxes_to_img_boundaries(decode_boxes, img_shape): ''' @@ -88,10 +90,12 @@ def padd_boxes_with_zeros(boxes, scores, max_num_of_boxes): zero_boxes = tf.zeros(shape=[pad_num, 4], dtype=boxes.dtype) zero_scores = tf.zeros(shape=[pad_num], dtype=scores.dtype) - - final_boxes = tf.concat([boxes, zero_boxes], axis=0) - - final_scores = tf.concat([scores, zero_scores], axis=0) + if(tf_major_ver<1): + final_boxes = tf.concat([boxes, zero_boxes], concat_dim=0) + final_scores = tf.concat([scores, zero_scores], concat_dim=0) + else: + final_boxes = tf.concat([boxes, zero_boxes], axis=0) + final_scores = tf.concat([scores, zero_scores], axis=0) return final_boxes, final_scores diff --git a/libs/box_utils/nms_rotate.py b/libs/box_utils/nms_rotate.py index af02066..a13ff58 100644 --- a/libs/box_utils/nms_rotate.py +++ b/libs/box_utils/nms_rotate.py @@ -11,6 +11,8 @@ if cfgs.ROTATE_NMS_USE_GPU: from libs.box_utils.rotate_polygon_nms import rotate_gpu_nms +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def nms_rotate(decode_boxes, scores, iou_threshold, max_output_size, use_angle_condition=False, angle_threshold=0, use_gpu=True, gpu_id=0): @@ -29,11 +31,16 @@ def nms_rotate(decode_boxes, scores, iou_threshold, max_output_size, angle_gap_threshold=angle_threshold, use_angle_condition=use_angle_condition, device_id=gpu_id) - - keep = tf.cond( - tf.greater(tf.shape(keep)[0], max_output_size), - true_fn=lambda: tf.slice(keep, [0], [max_output_size]), - false_fn=lambda: keep) + if(tf_major_ver==1 and tf_minor_ver<2) or (tf_major_ver==0): + keep = tf.cond( + tf.greater(tf.shape(keep)[0], max_output_size), + fn1=lambda: tf.slice(keep, [0], [max_output_size]), + fn2=lambda: keep) + else: + keep = tf.cond( + tf.greater(tf.shape(keep)[0], max_output_size), + true_fn=lambda: tf.slice(keep, [0], [max_output_size]), + false_fn=lambda: keep) else: keep = tf.py_func(nms_rotate_cpu, @@ -87,7 +94,12 @@ def nms_rotate_gpu(boxes_list, scores, iou_threshold, use_angle_condition=False, if use_angle_condition: y_c, x_c, h, w, theta = tf.unstack(boxes_list, axis=1) boxes_list = tf.transpose(tf.stack([x_c, y_c, w, h, theta])) - det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], axis=1) + if(tf_major_ver<1): + det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], concat_dim=1) + else: + det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], axis=1) + + keep = tf.py_func(rotate_gpu_nms, inp=[det_tensor, iou_threshold, device_id], Tout=tf.int64) @@ -95,7 +107,11 @@ def nms_rotate_gpu(boxes_list, scores, iou_threshold, use_angle_condition=False, else: y_c, x_c, h, w, theta = tf.unstack(boxes_list, axis=1) boxes_list = tf.transpose(tf.stack([x_c, y_c, w, h, theta])) - det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], axis=1) + if(tf_major_ver<1): + det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], concat_dim=1) + else: + det_tensor = tf.concat([boxes_list, tf.expand_dims(scores, axis=1)], axis=1) + keep = tf.py_func(rotate_gpu_nms, inp=[det_tensor, iou_threshold, device_id], Tout=tf.int64) diff --git a/libs/fast_rcnn/build_fast_rcnn.py b/libs/fast_rcnn/build_fast_rcnn.py index b700ae6..28b24c8 100644 --- a/libs/fast_rcnn/build_fast_rcnn.py +++ b/libs/fast_rcnn/build_fast_rcnn.py @@ -14,7 +14,8 @@ import numpy as np DEBUG = False - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class FastRCNN(object): def __init__(self, @@ -133,9 +134,12 @@ def get_rois(self): [self.roi_pool_kernel_size, self.roi_pool_kernel_size], stride=self.roi_pool_kernel_size) all_level_roi_list.append(level_i_rois) - - all_level_rois = tf.concat(all_level_roi_list, axis=0) - all_level_proposals = tf.concat(all_level_proposal_list, axis=0) + if(tf_major_ver<1): + all_level_rois = tf.concat(all_level_roi_list, concat_dim=0) + all_level_proposals = tf.concat(all_level_proposal_list, concat_dim=0) + else: + all_level_rois = tf.concat(all_level_roi_list, axis=0) + all_level_proposals = tf.concat(all_level_proposal_list, axis=0) return all_level_rois, all_level_proposals @@ -219,8 +223,11 @@ def fast_rcnn_minibatch(self, reference_boxes): negative_indices = tf.random_shuffle(negative_indices) negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives]) - - minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0) + + if(tf_major_ver<1): + minibatch_indices = tf.concat([positive_indices, negative_indices], concat_dim=0) + else: + minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0) minibatch_indices = tf.random_shuffle(minibatch_indices) minibatch_reference_boxes_mattached_gtboxes = tf.gather(reference_boxes_mattached_gtboxes, @@ -259,7 +266,10 @@ def fast_rcnn_loss(self): tmp_class_weights = tf.ones(shape=[tf.shape(minibatch_encode_boxes)[0], 4], dtype=tf.float32) tmp_class_weights = tmp_class_weights * tf.expand_dims(category_list[i], axis=1) class_weights_list.append(tmp_class_weights) - class_weights = tf.concat(class_weights_list, axis=1) # [minibatch_size, num_classes*4] + if(tf_major_ver<1): + class_weights = tf.concat(class_weights_list, concat_dim=1) # [minibatch_size, num_classes*4] + else: + class_weights = tf.concat(class_weights_list, axis=1) # [minibatch_size, num_classes*4] # loss with tf.variable_scope('fast_rcnn_classification_loss'): @@ -312,10 +322,15 @@ def fast_rcnn_proposals(self, decode_boxes, scores): tmp_category = tf.gather(category, valid_indices) category_list.append(tmp_category) + if(tf_major_ver<1): + all_nms_boxes = tf.concat(after_nms_boxes, concat_dim=0) + all_nms_scores = tf.concat(after_nms_scores, concat_dim=0) + all_category = tf.concat(category_list, concat_dim=0) + else: + all_nms_boxes = tf.concat(after_nms_boxes, axis=0) + all_nms_scores = tf.concat(after_nms_scores, axis=0) + all_category = tf.concat(category_list, axis=0) - all_nms_boxes = tf.concat(after_nms_boxes, axis=0) - all_nms_scores = tf.concat(after_nms_scores, axis=0) - all_category = tf.concat(category_list, axis=0) all_nms_boxes = boxes_utils.clip_boxes_to_img_boundaries(all_nms_boxes, img_shape=self.img_shape) diff --git a/libs/fast_rcnn/build_fast_rcnn1.py b/libs/fast_rcnn/build_fast_rcnn1.py index 336ad60..2010536 100644 --- a/libs/fast_rcnn/build_fast_rcnn1.py +++ b/libs/fast_rcnn/build_fast_rcnn1.py @@ -16,7 +16,8 @@ from libs.configs import cfgs DEBUG = False - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class FastRCNN(object): def __init__(self, @@ -136,9 +137,12 @@ def get_rois(self): [self.roi_pool_kernel_size, self.roi_pool_kernel_size], stride=self.roi_pool_kernel_size) all_level_roi_list.append(level_i_rois) - - all_level_rois = tf.concat(all_level_roi_list, axis=0) - all_level_proposals = tf.concat(all_level_proposal_list, axis=0) + if(tf_major_ver<1): + all_level_rois = tf.concat(all_level_roi_list, concat_dim=0) + all_level_proposals = tf.concat(all_level_proposal_list, concat_dim=0) + else: + all_level_rois = tf.concat(all_level_roi_list, axis=0) + all_level_proposals = tf.concat(all_level_proposal_list, axis=0) return all_level_rois, all_level_proposals def fast_rcnn_net(self): @@ -240,7 +244,10 @@ def fast_rcnn_minibatch(self, reference_boxes): negative_indices = tf.random_shuffle(negative_indices) negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives]) - minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0) + if(tf_major_ver<1): + minibatch_indices = tf.concat([positive_indices, negative_indices], concat_dim=0) + else: + minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0) minibatch_indices = tf.random_shuffle(minibatch_indices) minibatch_reference_boxes_mattached_gtboxes = tf.gather(reference_boxes_mattached_gtboxes, @@ -298,7 +305,10 @@ def fast_rcnn_loss(self): tmp_class_weights = tf.ones(shape=[tf.shape(minibatch_encode_boxes)[0], 4], dtype=tf.float32) tmp_class_weights = tmp_class_weights * tf.expand_dims(category_list[i], axis=1) class_weights_list.append(tmp_class_weights) - class_weights = tf.concat(class_weights_list, axis=1) # [minibatch_size, num_classes*4] + if(tf_major_ver<1): + class_weights = tf.concat(class_weights_list, concat_dim=1) # [minibatch_size, num_classes*4] + else: + class_weights = tf.concat(class_weights_list, axis=1) # [minibatch_size, num_classes*4] class_weights_list_rotate = [] category_list_rotate = tf.unstack(minibatch_label_one_hot, axis=1) @@ -306,7 +316,10 @@ def fast_rcnn_loss(self): tmp_class_weights_rotate = tf.ones(shape=[tf.shape(minibatch_encode_boxes_rotate)[0], 5], dtype=tf.float32) tmp_class_weights_rotate = tmp_class_weights_rotate * tf.expand_dims(category_list_rotate[i], axis=1) class_weights_list_rotate.append(tmp_class_weights_rotate) - class_weights_rotate = tf.concat(class_weights_list_rotate, axis=1) # [minibatch_size, num_classes*5] + if(tf_major_ver<1): + class_weights_rotate = tf.concat(class_weights_list_rotate, concat_dim=1) # [minibatch_size, num_classes*5] + else: + class_weights_rotate = tf.concat(class_weights_list_rotate, axis=1) # [minibatch_size, num_classes*5] # loss with tf.variable_scope('fast_rcnn_classification_loss'): @@ -371,10 +384,14 @@ def fast_rcnn_proposals(self, decode_boxes, scores): tmp_category = tf.gather(category, valid_indices) category_list.append(tmp_category) - - all_nms_boxes = tf.concat(after_nms_boxes, axis=0) - all_nms_scores = tf.concat(after_nms_scores, axis=0) - all_category = tf.concat(category_list, axis=0) + if(tf_major_ver<1): + all_nms_boxes = tf.concat(after_nms_boxes, concat_dim=0) + all_nms_scores = tf.concat(after_nms_scores, concat_dim=0) + all_category = tf.concat(category_list, concat_dim=0) + else: + all_nms_boxes = tf.concat(after_nms_boxes, axis=0) + all_nms_scores = tf.concat(after_nms_scores, axis=0) + all_category = tf.concat(category_list, axis=0) all_nms_boxes = boxes_utils.clip_boxes_to_img_boundaries(all_nms_boxes, img_shape=self.img_shape) @@ -429,10 +446,14 @@ def fast_rcnn_proposals_rotate(self, decode_boxes, scores): tmp_category = tf.gather(category, valid_indices) category_list.append(tmp_category) - - all_nms_boxes = tf.concat(after_nms_boxes, axis=0) - all_nms_scores = tf.concat(after_nms_scores, axis=0) - all_category = tf.concat(category_list, axis=0) + if(tf_major_ver<1): + all_nms_boxes = tf.concat(after_nms_boxes, concat_dim=0) + all_nms_scores = tf.concat(after_nms_scores, concat_dim=0) + all_category = tf.concat(category_list, concat_dim=0) + else: + all_nms_boxes = tf.concat(after_nms_boxes, axis=0) + all_nms_scores = tf.concat(after_nms_scores, axis=0) + all_category = tf.concat(category_list, axis=0) # all_nms_boxes = boxes_utils.clip_boxes_to_img_boundaries(all_nms_boxes, # img_shape=self.img_shape) diff --git a/libs/networks/nets/vggnet16.py b/libs/networks/nets/vggnet16.py index e62b839..9ac4bd9 100644 --- a/libs/networks/nets/vggnet16.py +++ b/libs/networks/nets/vggnet16.py @@ -13,7 +13,8 @@ from libs.configs import cfgs VGG_MEAN = [103.939, 116.779, 123.68] - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class Vgg16: def __init__(self, vgg16_npy_path=cfgs.VGG16_WEIGHT_PATH): @@ -72,18 +73,29 @@ def build(self, rgb, rgb2gbr=False): if rgb2gbr: # Convert RGB to BGR red, green, blue = tf.split(self.color, num_or_size_splits=3, axis=3) - self.color = tf.concat([blue - VGG_MEAN[0], - green - VGG_MEAN[1], - red - VGG_MEAN[2]], axis=3) + if(tf_major_ver<1): + self.color = tf.concat([blue - VGG_MEAN[0], + green - VGG_MEAN[1], + red - VGG_MEAN[2]], concat_dim=3) + else: + self.color = tf.concat([blue - VGG_MEAN[0], + green - VGG_MEAN[1], + red - VGG_MEAN[2]], axis=3) + self.conv1_1 = self.conv_op(input_op=self.color, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1) else: blue, green, red = tf.split(self.color, num_or_size_splits=3, axis=3) - self.color = tf.concat([blue - VGG_MEAN[0], - green - VGG_MEAN[1], - red - VGG_MEAN[2]], axis=3) + if(tf_major_ver<1): + self.color = tf.concat([blue - VGG_MEAN[0], + green - VGG_MEAN[1], + red - VGG_MEAN[2]], concat_dim=3) + else: + self.color = tf.concat([blue - VGG_MEAN[0], + green - VGG_MEAN[1], + red - VGG_MEAN[2]], axis=3) self.conv1_1 = self.conv_op(input_op=self.color, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1) diff --git a/libs/networks/slim_nets/alexnet_test.py b/libs/networks/slim_nets/alexnet_test.py index 6fc9a05..f269674 100644 --- a/libs/networks/slim_nets/alexnet_test.py +++ b/libs/networks/slim_nets/alexnet_test.py @@ -22,7 +22,8 @@ from nets import alexnet slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class AlexnetV2Test(tf.test.TestCase): @@ -137,7 +138,10 @@ def testForward(self): with self.test_session() as sess: inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = alexnet.alexnet_v2(inputs) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits) self.assertTrue(output.any()) diff --git a/libs/networks/slim_nets/inception_resnet_v2_test.py b/libs/networks/slim_nets/inception_resnet_v2_test.py index c369ed9..e413200 100644 --- a/libs/networks/slim_nets/inception_resnet_v2_test.py +++ b/libs/networks/slim_nets/inception_resnet_v2_test.py @@ -20,7 +20,8 @@ import tensorflow as tf from nets import inception - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class InceptionTest(tf.test.TestCase): @@ -224,7 +225,10 @@ def testUnknownBatchSize(self): self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) @@ -238,7 +242,10 @@ def testEvaluation(self): num_classes, is_training=False) predictions = tf.argmax(logits, 1) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) @@ -256,7 +263,10 @@ def testTrainEvalWithReuse(self): is_training=False, reuse=True) predictions = tf.argmax(logits, 1) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) diff --git a/libs/networks/slim_nets/inception_v1.py b/libs/networks/slim_nets/inception_v1.py index 4207c2a..ad66c84 100644 --- a/libs/networks/slim_nets/inception_v1.py +++ b/libs/networks/slim_nets/inception_v1.py @@ -24,7 +24,8 @@ slim = tf.contrib.slim trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def inception_v1_base(inputs, final_endpoint='Mixed_5c', @@ -93,7 +94,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -110,7 +114,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -132,7 +139,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -149,7 +159,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -166,7 +179,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -183,7 +199,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -200,7 +219,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -222,7 +244,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points @@ -239,7 +264,10 @@ def inception_v1_base(inputs, with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if final_endpoint == end_point: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint) diff --git a/libs/networks/slim_nets/inception_v1_test.py b/libs/networks/slim_nets/inception_v1_test.py index 11eb14e..bce07fe 100644 --- a/libs/networks/slim_nets/inception_v1_test.py +++ b/libs/networks/slim_nets/inception_v1_test.py @@ -24,7 +24,8 @@ from nets import inception slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class InceptionV1Test(tf.test.TestCase): @@ -140,7 +141,10 @@ def testUnknownImageShape(self): [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) @@ -157,7 +161,10 @@ def testUnknowBatchSize(self): images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) @@ -172,7 +179,10 @@ def testEvaluation(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) @@ -189,7 +199,10 @@ def testTrainEvalWithReuse(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) @@ -201,7 +214,10 @@ def testLogitsNotSqueezed(self): spatial_squeeze=False) with self.test_session() as sess: - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) diff --git a/libs/networks/slim_nets/inception_v2.py b/libs/networks/slim_nets/inception_v2.py index 2651f71..97d9faa 100644 --- a/libs/networks/slim_nets/inception_v2.py +++ b/libs/networks/slim_nets/inception_v2.py @@ -24,7 +24,8 @@ slim = tf.contrib.slim trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def inception_v2_base(inputs, final_endpoint='Mixed_5c', @@ -145,7 +146,10 @@ def inception_v2_base(inputs, branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 256 @@ -175,7 +179,10 @@ def inception_v2_base(inputs, branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 320 @@ -200,7 +207,11 @@ def inception_v2_base(inputs, with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d( net, [3, 3], stride=2, scope='MaxPool_1a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 @@ -230,7 +241,10 @@ def inception_v2_base(inputs, branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 @@ -260,7 +274,10 @@ def inception_v2_base(inputs, branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 @@ -290,7 +307,10 @@ def inception_v2_base(inputs, branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -321,7 +341,10 @@ def inception_v2_base(inputs, branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 @@ -346,7 +369,10 @@ def inception_v2_base(inputs, with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 @@ -376,7 +402,10 @@ def inception_v2_base(inputs, branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -407,7 +436,10 @@ def inception_v2_base(inputs, branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint) diff --git a/libs/networks/slim_nets/inception_v2_test.py b/libs/networks/slim_nets/inception_v2_test.py index 397aa50..ccd707f 100644 --- a/libs/networks/slim_nets/inception_v2_test.py +++ b/libs/networks/slim_nets/inception_v2_test.py @@ -25,6 +25,8 @@ slim = tf.contrib.slim +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class InceptionV2Test(tf.test.TestCase): @@ -192,7 +194,10 @@ def testUnknownImageShape(self): [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) @@ -209,7 +214,10 @@ def testUnknowBatchSize(self): images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) @@ -224,7 +232,10 @@ def testEvaluation(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) @@ -241,7 +252,10 @@ def testTrainEvalWithReuse(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) @@ -253,7 +267,10 @@ def testLogitsNotSqueezed(self): spatial_squeeze=False) with self.test_session() as sess: - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) diff --git a/libs/networks/slim_nets/inception_v3.py b/libs/networks/slim_nets/inception_v3.py index d64bcfd..1e4f240 100644 --- a/libs/networks/slim_nets/inception_v3.py +++ b/libs/networks/slim_nets/inception_v3.py @@ -25,6 +25,8 @@ slim = tf.contrib.slim trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def inception_v3_base(inputs, final_endpoint='Mixed_7c', @@ -158,7 +160,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -182,7 +187,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -205,7 +213,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -224,7 +235,10 @@ def inception_v3_base(inputs, with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -253,7 +267,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -282,7 +299,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_6: 17 x 17 x 768. @@ -310,7 +330,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -339,7 +362,10 @@ def inception_v3_base(inputs, branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -361,7 +387,10 @@ def inception_v3_base(inputs, with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # mixed_9: 8 x 8 x 2048. @@ -371,21 +400,34 @@ def inception_v3_base(inputs, branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1') - branch_1 = tf.concat(axis=3, values=[ - slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), - slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')]) + if(tf_major_ver<1): + branch_1 = tf.concat(concat_dim=3, values=[ + slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), + slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')]) + else: + branch_1 = tf.concat(axis=3, values=[ + slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), + slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')]) with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d( branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3') - branch_2 = tf.concat(axis=3, values=[ - slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), - slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) + if(tf_major_ver<1): + branch_2 = tf.concat(concat_dim=3, values=[ + slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), + slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) + else: + branch_2 = tf.concat(axis=3, values=[ + slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), + slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points @@ -396,21 +438,36 @@ def inception_v3_base(inputs, branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1') - branch_1 = tf.concat(axis=3, values=[ - slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), - slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')]) + if(tf_major_ver<1): + branch_1 = tf.concat(concat_dim=3, values=[ + slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), + slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')]) + else: + branch_1 = tf.concat(axis=3, values=[ + slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'), + slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')]) + + with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d( branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3') - branch_2 = tf.concat(axis=3, values=[ - slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), - slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) + if(tf_major_ver<1): + branch_2 = tf.concat(concat_dim=3, values=[ + slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), + slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) + else: + branch_2 = tf.concat(axis=3, values=[ + slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'), + slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')]) with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1') - net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint) diff --git a/libs/networks/slim_nets/inception_v3_test.py b/libs/networks/slim_nets/inception_v3_test.py index a6f3c95..ea476ff 100644 --- a/libs/networks/slim_nets/inception_v3_test.py +++ b/libs/networks/slim_nets/inception_v3_test.py @@ -25,6 +25,8 @@ slim = tf.contrib.slim +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class InceptionV3Test(tf.test.TestCase): @@ -221,7 +223,10 @@ def testUnknownImageShape(self): [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] feed_dict = {inputs: input_np} - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048]) @@ -238,7 +243,10 @@ def testUnknowBatchSize(self): images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) @@ -253,7 +261,10 @@ def testEvaluation(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) @@ -271,7 +282,10 @@ def testTrainEvalWithReuse(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) @@ -283,7 +297,10 @@ def testLogitsNotSqueezed(self): spatial_squeeze=False) with self.test_session() as sess: - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) diff --git a/libs/networks/slim_nets/inception_v4.py b/libs/networks/slim_nets/inception_v4.py index b4f07ea..2fa3e06 100644 --- a/libs/networks/slim_nets/inception_v4.py +++ b/libs/networks/slim_nets/inception_v4.py @@ -29,7 +29,8 @@ from nets import inception_utils slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def block_inception_a(inputs, scope=None, reuse=None): """Builds Inception-A block for Inception v4 network.""" @@ -49,7 +50,10 @@ def block_inception_a(inputs, scope=None, reuse=None): with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1') - return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + return tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) def block_reduction_a(inputs, scope=None, reuse=None): @@ -69,7 +73,10 @@ def block_reduction_a(inputs, scope=None, reuse=None): with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') - return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + if(tf_major_ver<1): + return tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2]) + else: + return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) def block_inception_b(inputs, scope=None, reuse=None): @@ -93,7 +100,10 @@ def block_inception_b(inputs, scope=None, reuse=None): with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1') - return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + return tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) def block_reduction_b(inputs, scope=None, reuse=None): @@ -115,7 +125,10 @@ def block_reduction_b(inputs, scope=None, reuse=None): with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') - return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) + if(tf_major_ver<1): + return tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2]) + else: + return tf.concat(axis=3, values=[branch_0, branch_1, branch_2]) def block_inception_c(inputs, scope=None, reuse=None): @@ -128,20 +141,33 @@ def block_inception_c(inputs, scope=None, reuse=None): branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') - branch_1 = tf.concat(axis=3, values=[ - slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), - slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) + if(tf_major_ver<1): + branch_1 = tf.concat(concat_dim=3, values=[ + slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), + slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) + else: + branch_1 = tf.concat(axis=3, values=[ + slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), + slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')]) with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1') branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3') - branch_2 = tf.concat(axis=3, values=[ - slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), - slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) + if(tf_major_ver<1): + branch_2 = tf.concat(concat_dim=3, values=[ + slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), + slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) + else: + branch_2 = tf.concat(axis=3, values=[ + slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), + slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')]) with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1') - return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) + if(tf_major_ver<1): + return tf.concat(concat_dim=3, values=[branch_0, branch_1, branch_2, branch_3]) + else: + return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3]) def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): @@ -192,7 +218,10 @@ def add_and_check_final(name, net): with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID', scope='Conv2d_0a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_3a', net): return net, end_points # 73 x 73 x 160 @@ -207,7 +236,10 @@ def add_and_check_final(name, net): branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_4a', net): return net, end_points # 71 x 71 x 192 @@ -218,7 +250,10 @@ def add_and_check_final(name, net): with tf.variable_scope('Branch_1'): branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') - net = tf.concat(axis=3, values=[branch_0, branch_1]) + if(tf_major_ver<1): + net = tf.concat(concat_dim=3, values=[branch_0, branch_1]) + else: + net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_5a', net): return net, end_points # 35 x 35 x 384 diff --git a/libs/networks/slim_nets/inception_v4_test.py b/libs/networks/slim_nets/inception_v4_test.py index 11cffb6..3abd382 100644 --- a/libs/networks/slim_nets/inception_v4_test.py +++ b/libs/networks/slim_nets/inception_v4_test.py @@ -20,7 +20,8 @@ import tensorflow as tf from nets import inception - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class InceptionTest(tf.test.TestCase): @@ -175,7 +176,10 @@ def testUnknownBatchSize(self): self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) @@ -189,7 +193,10 @@ def testEvaluation(self): num_classes, is_training=False) predictions = tf.argmax(logits, 1) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) @@ -207,7 +214,10 @@ def testTrainEvalWithReuse(self): is_training=False, reuse=True) predictions = tf.argmax(logits, 1) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) diff --git a/libs/networks/slim_nets/mobilenet_v1_test.py b/libs/networks/slim_nets/mobilenet_v1_test.py index 44e6644..9d0bc9d 100644 --- a/libs/networks/slim_nets/mobilenet_v1_test.py +++ b/libs/networks/slim_nets/mobilenet_v1_test.py @@ -24,7 +24,8 @@ from nets import mobilenet_v1 slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class MobilenetV1Test(tf.test.TestCase): @@ -379,7 +380,10 @@ def testUnknownImageShape(self): [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] feed_dict = {inputs: input_np} - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) @@ -396,7 +400,10 @@ def testUnknowBatchSize(self): images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) @@ -411,7 +418,10 @@ def testEvaluation(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) @@ -429,7 +439,10 @@ def testTrainEvalWithReuse(self): predictions = tf.argmax(logits, 1) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) @@ -441,7 +454,10 @@ def testLogitsNotSqueezed(self): spatial_squeeze=False) with self.test_session() as sess: - tf.global_variables_initializer().run() + if(tf_major_ver==0 and tf_minor_ver<12): + tf.initialize_all_variables().run() + else: + tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) diff --git a/libs/networks/slim_nets/overfeat_test.py b/libs/networks/slim_nets/overfeat_test.py index 446f9ac..1b241b5 100644 --- a/libs/networks/slim_nets/overfeat_test.py +++ b/libs/networks/slim_nets/overfeat_test.py @@ -22,7 +22,8 @@ from nets import overfeat slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class OverFeatTest(tf.test.TestCase): @@ -137,7 +138,10 @@ def testForward(self): with self.test_session() as sess: inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = overfeat.overfeat(inputs) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits) self.assertTrue(output.any()) diff --git a/libs/networks/slim_nets/resnet_v1_test.py b/libs/networks/slim_nets/resnet_v1_test.py index 5b27a18..006cde0 100644 --- a/libs/networks/slim_nets/resnet_v1_test.py +++ b/libs/networks/slim_nets/resnet_v1_test.py @@ -25,7 +25,8 @@ from nets import resnet_v1 slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def create_test_input(batch_size, height, width, channels): """Create test input tensor. @@ -104,7 +105,10 @@ def testConv2DSameEven(self): y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) @@ -145,7 +149,10 @@ def testConv2DSameOdd(self): y4_expected = y2_expected with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) @@ -236,7 +243,10 @@ def testAtrousValuesBottleneck(self): tf.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected = self._stack_blocks_nondense(inputs, blocks) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output, expected = sess.run([output, expected]) self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4) @@ -378,7 +388,10 @@ def testAtrousFullyConvolutionalValues(self): # Feature extraction at the nominal network rate. expected, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=1e-4, rtol=1e-4) @@ -397,7 +410,10 @@ def testUnknownBatchSize(self): [None, 1, 1, num_classes]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes)) @@ -412,7 +428,10 @@ def testFullyConvolutionalUnknownHeightWidth(self): [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 3, 3, 32)) @@ -431,7 +450,10 @@ def testAtrousFullyConvolutionalUnknownHeightWidth(self): [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32)) diff --git a/libs/networks/slim_nets/resnet_v2_test.py b/libs/networks/slim_nets/resnet_v2_test.py index c181664..1e7551c 100644 --- a/libs/networks/slim_nets/resnet_v2_test.py +++ b/libs/networks/slim_nets/resnet_v2_test.py @@ -25,7 +25,8 @@ from nets import resnet_v2 slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) def create_test_input(batch_size, height, width, channels): """Create test input tensor. @@ -104,7 +105,10 @@ def testConv2DSameEven(self): y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) @@ -145,7 +149,11 @@ def testConv2DSameOdd(self): y4_expected = y2_expected with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) + self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) @@ -236,7 +244,10 @@ def testAtrousValuesBottleneck(self): tf.get_variable_scope().reuse_variables() # Feature extraction at the nominal network rate. expected = self._stack_blocks_nondense(inputs, blocks) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output, expected = sess.run([output, expected]) self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4) @@ -380,7 +391,10 @@ def testAtrousFullyConvolutionalValues(self): expected, _ = self._resnet_small(inputs, None, is_training=False, global_pool=False) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) self.assertAllClose(output.eval(), expected.eval(), atol=1e-4, rtol=1e-4) @@ -399,7 +413,10 @@ def testUnknownBatchSize(self): [None, 1, 1, num_classes]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 1, 1, num_classes)) @@ -415,7 +432,10 @@ def testFullyConvolutionalUnknownHeightWidth(self): [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 3, 3, 32)) @@ -434,7 +454,10 @@ def testAtrousFullyConvolutionalUnknownHeightWidth(self): [batch, None, None, 32]) images = create_test_input(batch, height, width, 3) with self.test_session() as sess: - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(output, {inputs: images.eval()}) self.assertEqual(output.shape, (batch, 9, 9, 32)) diff --git a/libs/networks/slim_nets/vgg_test.py b/libs/networks/slim_nets/vgg_test.py index 8e383b3..51dcb9d 100644 --- a/libs/networks/slim_nets/vgg_test.py +++ b/libs/networks/slim_nets/vgg_test.py @@ -22,7 +22,8 @@ from nets import vgg slim = tf.contrib.slim - +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class VGGATest(tf.test.TestCase): @@ -148,7 +149,10 @@ def testForward(self): with self.test_session() as sess: inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = vgg.vgg_a(inputs) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits) self.assertTrue(output.any()) @@ -292,7 +296,10 @@ def testForward(self): with self.test_session() as sess: inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = vgg.vgg_16(inputs) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits) self.assertTrue(output.any()) @@ -447,7 +454,10 @@ def testForward(self): with self.test_session() as sess: inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = vgg.vgg_19(inputs) - sess.run(tf.global_variables_initializer()) + if(tf_major_ver==0 and tf_minor_ver<12): + sess.run(tf.initialize_all_variables()) + else: + sess.run(tf.global_variables_initializer()) output = sess.run(logits) self.assertTrue(output.any()) diff --git a/libs/rpn/build_rpn.py b/libs/rpn/build_rpn.py index 81f0d4e..063c992 100644 --- a/libs/rpn/build_rpn.py +++ b/libs/rpn/build_rpn.py @@ -16,6 +16,8 @@ from libs.losses import losses from help_utils.help_utils import print_tensors DEBUG = True +tf_major_ver = int(tf.__version__.split(".")[0]) +tf_minor_ver = int(tf.__version__.split(".")[1]) class RPN(object): @@ -140,8 +142,10 @@ def build_dense_feature_pyramid(self): p_sub = tf.image.resize_nearest_neighbor(p_temp, [up_sample_shape[1], up_sample_shape[2]], name='build_P%d/up_sample_nearest_neighbor' % layer) p_concat.append(p_sub) - - p = tf.concat(p_concat, axis=3) + if(tf_major_ver < 1): + p = tf.concat(p_concat,concat_dim=3) + else: + p = tf.concat(p_concat, axis=3) p_conv = slim.conv2d(p, 256, kernel_size=[3, 3], stride=[1, 1], padding='SAME', scope='build_P%d/avoid_aliasing' % layer) @@ -211,8 +215,10 @@ def make_anchors(self): name='make_anchors_{}'.format(level)) tmp_anchors = tf.reshape(tmp_anchors, [-1, 4]) anchor_list.append(tmp_anchors) - - all_level_anchors = tf.concat(anchor_list, axis=0) + if(tf_major_ver < 1): + all_level_anchors = tf.concat(anchor_list, concat_dim=0) + else: + all_level_anchors = tf.concat(anchor_list, axis=0) return all_level_anchors def rpn_net(self): @@ -256,9 +262,12 @@ def rpn_net(self): rpn_scores_list.append(rpn_box_scores) rpn_encode_boxes_list.append(rpn_encode_boxes) - - rpn_all_encode_boxes = tf.concat(rpn_encode_boxes_list, axis=0) - rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0) + if(tf_major_ver<1): + rpn_all_encode_boxes = tf.concat(rpn_encode_boxes_list, concat_dim=0) + rpn_all_boxes_scores = tf.concat(rpn_scores_list, concat_dim=0) + else: + rpn_all_encode_boxes = tf.concat(rpn_encode_boxes_list, axis=0) + rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0) return rpn_all_encode_boxes, rpn_all_boxes_scores @@ -366,8 +375,10 @@ def make_minibatch(self, valid_anchors): negatives_indices = tf.random_shuffle(negatives_indices) negatives_indices = tf.slice(negatives_indices, begin=[0], size=[num_of_negatives]) - - minibatch_indices = tf.concat([positive_indices, negatives_indices], axis=0) + if(tf_major_ver<1): + minibatch_indices = tf.concat([positive_indices, negatives_indices], concat_dim=0) + else: + minibatch_indices = tf.concat([positive_indices, negatives_indices], axis=0) minibatch_indices = tf.random_shuffle(minibatch_indices) minibatch_anchor_matched_gtboxes = tf.gather(anchor_matched_gtboxes, minibatch_indices) @@ -402,15 +413,23 @@ def rpn_losses(self): minibatch_decode_boxes = encode_and_decode.decode_boxes(encode_boxes=minibatch_encode_boxes, reference_boxes=minibatch_anchors, scale_factors=self.scale_factors) - - tf.summary.image('/positive_anchors', positive_anchors_in_img) - tf.summary.image('/negative_anchors', negative_anchors_in_img) + tf_major_ver = int(tf.__version__.split(".")[0]) + tf_minor_ver = int(tf.__version__.split(".")[1]) + if(tf_major_ver==0 and tf_minor_ver<12): + tf.image_summary('/positive_anchors', positive_anchors_in_img) + tf.image_summary('/negative_anchors', negative_anchors_in_img) + else: + tf.summary.image('/positive_anchors', positive_anchors_in_img) + tf.summary.image('/negative_anchors', negative_anchors_in_img) top_k_scores, top_k_indices = tf.nn.top_k(minibatch_boxes_scores[:, 1], k=5) top_detections_in_img = draw_box_with_color(self.img_batch, tf.gather(minibatch_decode_boxes, top_k_indices), text=tf.shape(top_k_scores)[0]) - tf.summary.image('/top_5', top_detections_in_img) + if(tf_major_ver==0 and tf_minor_ver<12): + tf.image_summary('/top_5', top_detections_in_img) + else: + tf.summary.image('/top_5', top_detections_in_img) # losses with tf.variable_scope('rpn_location_loss'): diff --git a/tools/eval.py b/tools/eval.py index d22dc09..fe1e961 100644 --- a/tools/eval.py +++ b/tools/eval.py @@ -136,10 +136,18 @@ def eval_ship(img_num): fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = fast_rcnn.fast_rcnn_predict() # train - init_op = tf.group( - tf.global_variables_initializer(), - tf.local_variables_initializer() - ) + tf_major_ver = int(tf.__version__.split(".")[0]) + tf_minor_ver = int(tf.__version__.split(".")[1]) + if(tf_major_ver==0 and tf_minor_ver<12): + init_op = tf.group( + tf.initialize_all_variables(), + tf.initialize_local_variables() + ) + else: + init_op = tf.group( + tf.global_variables_initializer(), + tf.local_variables_initializer() + ) restorer, restore_ckpt = restore_model.get_restorer() diff --git a/tools/eval1.py b/tools/eval1.py index ebfd139..8a6e944 100644 --- a/tools/eval1.py +++ b/tools/eval1.py @@ -141,11 +141,20 @@ def eval_ship(img_num, mode): if mode == 0: fast_rcnn_decode_boxes_rotate = get_horizen_minAreaRectangle(fast_rcnn_decode_boxes_rotate, False) + # train - init_op = tf.group( - tf.global_variables_initializer(), - tf.local_variables_initializer() - ) + tf_major_ver = int(tf.__version__.split(".")[0]) + tf_minor_ver = int(tf.__version__.split(".")[1]) + if(tf_major_ver==0 and tf_minor_ver<12): + init_op = tf.group( + tf.initialize_all_variables(), + tf.initialize_local_variables() + ) + else: + init_op = tf.group( + tf.global_variables_initializer(), + tf.local_variables_initializer() + ) restorer, restore_ckpt = restore_model.get_restorer() diff --git a/tools/test.py b/tools/test.py index 5f1a91c..cf2ee31 100644 --- a/tools/test.py +++ b/tools/test.py @@ -111,12 +111,22 @@ def test(img_num): level=cfgs.LEVEL) fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = fast_rcnn.fast_rcnn_predict() + + tf_major_ver = int(tf.__version__.split(".")[0]) + tf_minor_ver = int(tf.__version__.split(".")[1]) + if(tf_major_ver==0 and tf_minor_ver<12): + # train + init_op = tf.group( + tf.initialize_all_variables(), + tf.initialize_local_variables() + ) + else: + # train + init_op = tf.group( + tf.global_variables_initializer(), + tf.local_variables_initializer() + ) - # train - init_op = tf.group( - tf.global_variables_initializer(), - tf.local_variables_initializer() - ) restorer, restore_ckpt = restore_model.get_restorer() diff --git a/tools/test1.py b/tools/test1.py index 6fa33a2..2a7a0f8 100644 --- a/tools/test1.py +++ b/tools/test1.py @@ -113,11 +113,20 @@ def test(img_num): fast_rcnn_decode_boxes_rotate, fast_rcnn_score_rotate, num_of_objects_rotate, detection_category_rotate = \ fast_rcnn.fast_rcnn_predict() - # train - init_op = tf.group( - tf.global_variables_initializer(), - tf.local_variables_initializer() - ) + tf_major_ver = int(tf.__version__.split(".")[0]) + tf_minor_ver = int(tf.__version__.split(".")[1]) + if(tf_major_ver==0 and tf_minor_ver<12): + # train + init_op = tf.group( + tf.initialize_all_variables(), + tf.initialize_local_variables() + ) + else: + # train + init_op = tf.group( + tf.global_variables_initializer(), + tf.local_variables_initializer() + ) restorer, restore_ckpt = restore_model.get_restorer()