Skip to content

Commit 1e76100

Browse files
authored
Update tutorial_mlp_dropout1.py
1 parent bba7ed8 commit 1e76100

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

example/tutorial_mlp_dropout1.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,16 @@
1717
network = tl.layers.InputLayer(x, name='input')
1818
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
1919
network = tl.layers.DenseLayer(network, n_units=800,
20-
act = tf.nn.relu, name='relu1')
20+
act = tf.nn.relu, name='relu1')
2121
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')
2222
network = tl.layers.DenseLayer(network, n_units=800,
23-
act = tf.nn.relu, name='relu2')
23+
act = tf.nn.relu, name='relu2')
2424
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3')
2525
# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to
2626
# speed up computation, so we use identity here.
2727
# see tf.nn.sparse_softmax_cross_entropy_with_logits()
2828
network = tl.layers.DenseLayer(network, n_units=10,
29-
act = tf.identity,
30-
name='output')
29+
act = tf.identity, name='output')
3130

3231
# define cost function and metric.
3332
y = network.outputs
@@ -38,8 +37,8 @@
3837

3938
# define the optimizer
4039
train_params = network.all_params
41-
train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999,
42-
epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)
40+
train_op = tf.train.AdamOptimizer(learning_rate=0.0001
41+
).minimize(cost, var_list=train_params)
4342

4443
# initialize all variables in the session
4544
tl.layers.initialize_global_variables(sess)

0 commit comments

Comments
 (0)