1
1
import time
2
-
3
2
import numpy as np
4
-
5
3
import tensorflow as tf
6
4
import tensorlayer as tl
7
5
from tensorlayer .layers import Dense , Dropout , Input
@@ -19,7 +17,6 @@ class CustomModel(Model):
19
17
20
18
def __init__ (self ):
21
19
super (CustomModel , self ).__init__ ()
22
-
23
20
self .dropout1 = Dropout (keep = 0.8 ) #(self.innet)
24
21
self .dense1 = Dense (n_units = 800 , act = tf .nn .relu , in_channels = 784 ) #(self.dropout1)
25
22
self .dropout2 = Dropout (keep = 0.8 ) #(self.dense1)
@@ -52,27 +49,20 @@ def forward(self, x, foo=None):
52
49
for epoch in range (n_epoch ): ## iterate the dataset n_epoch times
53
50
start_time = time .time ()
54
51
## iterate over the entire training set once (shuffle the data via training)
55
-
56
52
for X_batch , y_batch in tl .iterate .minibatches (X_train , y_train , batch_size , shuffle = True ):
57
-
58
53
MLP .train () # enable dropout
59
-
60
54
with tf .GradientTape () as tape :
61
55
## compute outputs
62
56
_logits = MLP (X_batch , foo = 1 )
63
57
## compute loss and update model
64
58
_loss = tl .cost .cross_entropy (_logits , y_batch , name = 'train_loss' )
65
-
66
59
grad = tape .gradient (_loss , train_weights )
67
60
optimizer .apply_gradients (zip (grad , train_weights ))
68
61
69
62
## use training and evaluation sets to evaluate the model every print_freq epoch
70
63
if epoch + 1 == 1 or (epoch + 1 ) % print_freq == 0 :
71
-
72
64
MLP .eval () # disable dropout
73
-
74
65
print ("Epoch {} of {} took {}" .format (epoch + 1 , n_epoch , time .time () - start_time ))
75
-
76
66
train_loss , train_acc , n_iter = 0 , 0 , 0
77
67
for X_batch , y_batch in tl .iterate .minibatches (X_train , y_train , batch_size , shuffle = False ):
78
68
_logits = MLP (X_batch , foo = 1 )
@@ -81,7 +71,6 @@ def forward(self, x, foo=None):
81
71
n_iter += 1
82
72
print (" train foo=1 loss: {}" .format (train_loss / n_iter ))
83
73
print (" train foo=1 acc: {}" .format (train_acc / n_iter ))
84
-
85
74
val_loss , val_acc , n_iter = 0 , 0 , 0
86
75
for X_batch , y_batch in tl .iterate .minibatches (X_val , y_val , batch_size , shuffle = False ):
87
76
_logits = MLP (X_batch , foo = 1 ) # is_train=False, disable dropout
@@ -90,7 +79,6 @@ def forward(self, x, foo=None):
90
79
n_iter += 1
91
80
print (" val foo=1 loss: {}" .format (val_loss / n_iter ))
92
81
print (" val foo=1 acc: {}" .format (val_acc / n_iter ))
93
-
94
82
val_loss , val_acc , n_iter = 0 , 0 , 0
95
83
for X_batch , y_batch in tl .iterate .minibatches (X_val , y_val , batch_size , shuffle = False ):
96
84
_logits = MLP (X_batch ) # is_train=False, disable dropout
0 commit comments