Skip to content

Commit 27011b5

Browse files
authored
Merge pull request #52 from qxin/tl_stu
[layers] change _shape to get_shape
2 parents 5de99fd + 38ee118 commit 27011b5

File tree

1 file changed

+18
-18
lines changed

1 file changed

+18
-18
lines changed

tensorlayer/layers.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def __init__(
274274
name ='input_layer'
275275
):
276276
Layer.__init__(self, inputs=inputs, name=name)
277-
print(" tensorlayer:Instantiate InputLayer %s: %s" % (self.name, inputs._shape))
277+
print(" tensorlayer:Instantiate InputLayer %s: %s" % (self.name, inputs.get_shape()))
278278
self.outputs = inputs
279279
self.all_layers = []
280280
self.all_params = []
@@ -589,7 +589,7 @@ def __init__(
589589
if self.inputs.get_shape().ndims != 2:
590590
raise Exception("The input dimension must be rank 2, please reshape or flatten it")
591591

592-
n_in = int(self.inputs._shape[-1])
592+
n_in = int(self.inputs.get_shape()[-1])
593593
self.n_units = n_units
594594
print(" tensorlayer:Instantiate DenseLayer %s: %d, %s" % (self.name, self.n_units, act.__name__))
595595
with tf.variable_scope(name) as vs:
@@ -937,7 +937,7 @@ def __init__(
937937
self.inputs = layer.outputs
938938
if self.inputs.get_shape().ndims != 2:
939939
raise Exception("The input dimension must be rank 2")
940-
n_in = int(self.inputs._shape[-1])
940+
n_in = int(self.inputs.get_shape()[-1])
941941
self.n_units = n_units
942942
print(" tensorlayer:Instantiate DropconnectDenseLayer %s: %d, %s" % (self.name, self.n_units, act.__name__))
943943

@@ -1379,15 +1379,15 @@ def __init__(
13791379
):
13801380
Layer.__init__(self, name=name)
13811381
self.inputs = layer.outputs
1382-
if len(self.inputs._shape) == 3:
1382+
if len(self.inputs.get_shape()) == 3:
13831383
if is_scale:
1384-
size_h = size[0] * int(self.inputs._shape[0])
1385-
size_w = size[1] * int(self.inputs._shape[1])
1384+
size_h = size[0] * int(self.inputs.get_shape()[0])
1385+
size_w = size[1] * int(self.inputs.get_shape()[1])
13861386
size = [size_h, size_w]
1387-
elif len(self.inputs._shape) == 4:
1387+
elif len(self.inputs.get_shape()) == 4:
13881388
if is_scale:
1389-
size_h = size[0] * int(self.inputs._shape[1])
1390-
size_w = size[1] * int(self.inputs._shape[2])
1389+
size_h = size[0] * int(self.inputs.get_shape()[1])
1390+
size_w = size[1] * int(self.inputs.get_shape()[2])
13911391
size = [size_h, size_w]
13921392
else:
13931393
raise Exception("Donot support shape %s" % self.inputs.get_shape())
@@ -1443,7 +1443,7 @@ def __init__(
14431443
if act is None:
14441444
act = tf.identity
14451445
with tf.variable_scope(name) as vs:
1446-
shape = [filter_size[0], filter_size[1], int(self.inputs._shape[-1]), n_filter]
1446+
shape = [filter_size[0], filter_size[1], int(self.inputs.get_shape()[-1]), n_filter]
14471447
filters = tf.get_variable(name='filter', shape=shape, initializer=W_init, **W_init_args )
14481448
if b_init:
14491449
b = tf.get_variable(name='b', shape=(n_filter), initializer=b_init, **b_init_args )
@@ -1524,7 +1524,7 @@ def Conv2d(net, n_filter=32, filter_size=(3, 3), strides=(1, 1), act = None,
15241524
act = tf.identity
15251525
net = Conv2dLayer(net,
15261526
act = act,
1527-
shape = [filter_size[0], filter_size[1], int(net.outputs._shape[-1]), n_filter], # 32 features for each 5x5 patch
1527+
shape = [filter_size[0], filter_size[1], int(net.outputs.get_shape()[-1]), n_filter], # 32 features for each 5x5 patch
15281528
strides = [1, strides[0], strides[1], 1],
15291529
padding = padding,
15301530
W_init = W_init,
@@ -1557,7 +1557,7 @@ def DeConv2d(net, n_out_channel = 32, filter_size=(3, 3),
15571557
batch_size = tf.shape(net.outputs)[0]
15581558
net = DeConv2dLayer(layer = net,
15591559
act = act,
1560-
shape = [filter_size[0], filter_size[1], n_out_channel, int(net.outputs._shape[-1])],
1560+
shape = [filter_size[0], filter_size[1], n_out_channel, int(net.outputs.get_shape()[-1])],
15611561
output_shape = [batch_size, int(out_size[0]), int(out_size[1]), n_out_channel],
15621562
strides = [1, strides[0], strides[1], 1],
15631563
padding = padding,
@@ -2949,7 +2949,7 @@ def __init__(
29492949
Layer.__init__(self, name=name)
29502950
self.inputs = layer.outputs
29512951
self.outputs = flatten_reshape(self.inputs, name=name)
2952-
self.n_units = int(self.outputs._shape[-1])
2952+
self.n_units = int(self.outputs.get_shape()[-1])
29532953
print(" tensorlayer:Instantiate FlattenLayer %s: %d" % (self.name, self.n_units))
29542954
self.all_layers = list(layer.all_layers)
29552955
self.all_params = list(layer.all_params)
@@ -2994,7 +2994,7 @@ def __init__(
29942994
Layer.__init__(self, name=name)
29952995
self.inputs = layer.outputs
29962996
self.outputs = tf.reshape(self.inputs, shape=shape, name=name)
2997-
print(" tensorlayer:Instantiate ReshapeLayer %s: %s" % (self.name, self.outputs._shape))
2997+
print(" tensorlayer:Instantiate ReshapeLayer %s: %s" % (self.name, self.outputs.get_shape()))
29982998
self.all_layers = list(layer.all_layers)
29992999
self.all_params = list(layer.all_params)
30003000
self.all_drop = dict(layer.all_drop)
@@ -3100,7 +3100,7 @@ def __init__(
31003100
for l in layer:
31013101
self.inputs.append(l.outputs)
31023102
self.outputs = tf.concat(concat_dim, self.inputs, name=name) # 1.2
3103-
self.n_units = int(self.outputs._shape[-1])
3103+
self.n_units = int(self.outputs.get_shape()[-1])
31043104
print(" tensorlayer:Instantiate ConcatLayer %s, %d" % (self.name, self.n_units))
31053105

31063106
self.all_layers = list(layer[0].all_layers)
@@ -3150,12 +3150,12 @@ def __init__(
31503150
):
31513151
Layer.__init__(self, name=name)
31523152

3153-
print(" tensorlayer:Instantiate ElementwiseLayer %s: %s, %s" % (self.name, layer[0].outputs._shape, combine_fn.__name__))
3153+
print(" tensorlayer:Instantiate ElementwiseLayer %s: %s, %s" % (self.name, layer[0].outputs.get_shape(), combine_fn.__name__))
31543154

31553155
self.outputs = layer[0].outputs
31563156
# print(self.outputs._shape, type(self.outputs._shape))
31573157
for l in layer[1:]:
3158-
assert str(self.outputs._shape) == str(l.outputs._shape), "Hint: the input shapes should be the same. %s != %s" % (self.outputs._shape , str(l.outputs._shape))
3158+
assert str(self.outputs.get_shape()) == str(l.outputs.get_shape()), "Hint: the input shapes should be the same. %s != %s" % (self.outputs.get_shape() , str(l.outputs.get_shape()))
31593159
self.outputs = combine_fn(self.outputs, l.outputs, name=name)
31603160

31613161
self.all_layers = list(layer[0].all_layers)
@@ -3269,7 +3269,7 @@ def __init__(
32693269
if channel_shared:
32703270
w_shape = (1,)
32713271
else:
3272-
w_shape = int(self.inputs._shape[-1])
3272+
w_shape = int(self.inputs.get_shape()[-1])
32733273

32743274
# with tf.name_scope(name) as scope:
32753275
with tf.variable_scope(name) as vs:

0 commit comments

Comments
 (0)