|
17 | 17 | network = tl.layers.InputLayer(x, name='input') |
18 | 18 | network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1') |
19 | 19 | network = tl.layers.DenseLayer(network, n_units=800, |
20 | | - act = tf.nn.relu, name='relu1') |
| 20 | + act = tf.nn.relu, name='relu1') |
21 | 21 | network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2') |
22 | 22 | network = tl.layers.DenseLayer(network, n_units=800, |
23 | | - act = tf.nn.relu, name='relu2') |
| 23 | + act = tf.nn.relu, name='relu2') |
24 | 24 | network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3') |
25 | 25 | # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to |
26 | 26 | # speed up computation, so we use identity here. |
27 | 27 | # see tf.nn.sparse_softmax_cross_entropy_with_logits() |
28 | 28 | network = tl.layers.DenseLayer(network, n_units=10, |
29 | | - act = tf.identity, |
30 | | - name='output') |
| 29 | + act = tf.identity, name='output') |
31 | 30 |
|
32 | 31 | # define cost function and metric. |
33 | 32 | y = network.outputs |
|
38 | 37 |
|
39 | 38 | # define the optimizer |
40 | 39 | train_params = network.all_params |
41 | | -train_op = tf.train.AdamOptimizer(learning_rate=0.0001, beta1=0.9, beta2=0.999, |
42 | | - epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params) |
| 40 | +train_op = tf.train.AdamOptimizer(learning_rate=0.0001 |
| 41 | + ).minimize(cost, var_list=train_params) |
43 | 42 |
|
44 | 43 | # initialize all variables in the session |
45 | 44 | tl.layers.initialize_global_variables(sess) |
|
0 commit comments