Skip to content

Commit fcfcac5

Browse files
committed
shorten mnist example / update depthwise conv 2d
1 parent ad28732 commit fcfcac5

File tree

2 files changed

+24
-21
lines changed

2 files changed

+24
-21
lines changed

example/tutorial_mnist.py

Lines changed: 14 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -61,13 +61,13 @@ def main_test_layers(model='relu'):
6161
network = tl.layers.InputLayer(x, name='input')
6262
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
6363
network = tl.layers.DenseLayer(network, n_units=800,
64-
act = tf.nn.relu, name='relu1')
64+
act=tf.nn.relu, name='relu1')
6565
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')
6666
network = tl.layers.DenseLayer(network, n_units=800,
67-
act = tf.nn.relu, name='relu2')
67+
act=tf.nn.relu, name='relu2')
6868
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3')
6969
network = tl.layers.DenseLayer(network, n_units=10,
70-
act = tf.identity,
70+
act=tf.identity,
7171
name='output')
7272
elif model == 'dropconnect':
7373
network = tl.layers.InputLayer(x, name='input')
@@ -79,7 +79,7 @@ def main_test_layers(model='relu'):
7979
name='dropconnect_relu2')
8080
network = tl.layers.DropconnectDenseLayer(network, keep = 0.5,
8181
n_units=10,
82-
act = tf.identity,
82+
act=tf.identity,
8383
name='output')
8484

8585
# To print all attributes of a Layer.
@@ -295,20 +295,18 @@ def main_test_stacked_denoise_AE(model='relu'):
295295
network = tl.layers.DropoutLayer(network, keep=0.5, name='denoising1')
296296
# 1st layer
297297
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
298-
network = tl.layers.DenseLayer(network, n_units=800, act = act, name=model+'1')
298+
network = tl.layers.DenseLayer(network, n_units=800, act=act, name=model+'1')
299299
x_recon1 = network.outputs
300300
recon_layer1 = tl.layers.ReconLayer(network, x_recon=x, n_units=784,
301-
act = act_recon, name='recon_layer1')
301+
act=act_recon, name='recon_layer1')
302302
# 2nd layer
303303
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')
304304
network = tl.layers.DenseLayer(network, n_units=800, act = act, name=model+'2')
305305
recon_layer2 = tl.layers.ReconLayer(network, x_recon=x_recon1, n_units=800,
306-
act = act_recon, name='recon_layer2')
306+
act=act_recon, name='recon_layer2')
307307
# 3rd layer
308308
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3')
309-
network = tl.layers.DenseLayer(network, n_units=10,
310-
act = tf.identity,
311-
name='output')
309+
network = tl.layers.DenseLayer(network, 10, act=tf.identity, name='output')
312310

313311
# Define fine-tune process
314312
y = network.outputs
@@ -485,23 +483,20 @@ def main_test_cnn_layer():
485483
# pool = tf.nn.max_pool,
486484
# name ='pool2',) # output: (?, 7, 7, 64)
487485
## Simplified conv API for beginner (the same with the above layers)
488-
network = tl.layers.Conv2d(network, n_filter=32, filter_size=(5, 5), strides=(1, 1),
486+
network = tl.layers.Conv2d(network, 32, (5, 5), (1, 1),
489487
act=tf.nn.relu, padding='SAME', name='cnn1')
490-
network = tl.layers.MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
488+
network = tl.layers.MaxPool2d(network, (2, 2), (2, 2),
491489
padding='SAME', name='pool1')
492-
network = tl.layers.Conv2d(network, n_filter=64, filter_size=(5, 5), strides=(1, 1),
490+
network = tl.layers.Conv2d(network, 64, (5, 5), (1, 1),
493491
act=tf.nn.relu, padding='SAME', name='cnn2')
494-
network = tl.layers.MaxPool2d(network, filter_size=(2, 2), strides=(2, 2),
492+
network = tl.layers.MaxPool2d(network, (2, 2), (2, 2),
495493
padding='SAME', name='pool2')
496494
## end of conv
497495
network = tl.layers.FlattenLayer(network, name='flatten')
498496
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop1')
499-
network = tl.layers.DenseLayer(network, n_units=256,
500-
act = tf.nn.relu, name='relu1')
497+
network = tl.layers.DenseLayer(network, 256, act=tf.nn.relu, name='relu1')
501498
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')
502-
network = tl.layers.DenseLayer(network, n_units=10,
503-
act = tf.identity,
504-
name='output')
499+
network = tl.layers.DenseLayer(network, 10, act=tf.identity, name='output')
505500

506501
y = network.outputs
507502

tensorlayer/layers.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2489,7 +2489,7 @@ def __init__(
24892489
# n_filter = 32,
24902490
channel_multiplier = 3,
24912491
shape = (3, 3),
2492-
strides = (1, 1, 1, 1),
2492+
strides = (1, 1),
24932493
act = None,
24942494
padding='SAME',
24952495
W_init = tf.truncated_normal_initializer(stddev=0.02),
@@ -2500,10 +2500,13 @@ def __init__(
25002500
):
25012501
Layer.__init__(self, name=name)
25022502
self.inputs = layer.outputs
2503+
2504+
if act is None:
2505+
act = tf.identity
2506+
25032507
print(" [TL] DepthwiseConv2d %s: shape:%s strides:%s pad:%s act:%s" %
25042508
(self.name, str(shape), str(strides), padding, act.__name__))
25052509

2506-
assert len(strides) == 4, "len(strides) should be 4."
25072510
if act is None:
25082511
act = tf.identity
25092512

@@ -2515,6 +2518,11 @@ def __init__(
25152518

25162519
shape = [shape[0], shape[1], pre_channel, channel_multiplier]
25172520

2521+
if len(strides) == 2:
2522+
strides = [1, strides[0], strides[1], 1]
2523+
2524+
assert len(strides) == 4, "len(strides) should be 4."
2525+
25182526
with tf.variable_scope(name) as vs:
25192527
W = tf.get_variable(name='W_sepconv2d', shape=shape, initializer=W_init, **W_init_args ) # [filter_height, filter_width, in_channels, channel_multiplier]
25202528
if b_init:

0 commit comments

Comments
 (0)