-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy path1. CNN(NIN)_cifar10_Kreas.py
More file actions
128 lines (92 loc) · 4.27 KB
/
1. CNN(NIN)_cifar10_Kreas.py
File metadata and controls
128 lines (92 loc) · 4.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# coding: utf-8
# In[1]:
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
# In[2]:
batch_size = 128
num_classes = 10
epochs = 200
data_augmentation = True
# In[3]:
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# In[4]:
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
# In[5]:
#layer1
model.add(Conv2D(192, (5, 5), padding='same', data_format='channels_last',
input_shape=x_train.shape[1:],
activation='relu')) #4D tensor with shape: (samples, channels, rows, cols)
model.add(Conv2D(160, (1, 1), data_format='channels_last',activation='relu'))
model.add(Conv2D(96, (1, 1),data_format='channels_last',activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), padding='same', strides=2,data_format='channels_last'))
model.add(Dropout(0.5))
# In[6]:
#layer2
model.add(Conv2D(192, (5, 5), padding='same',data_format='channels_last',activation='relu'))
model.add(Conv2D(192, (1, 1),data_format='channels_last',activation='relu'))
model.add(Conv2D(192, (1, 1),data_format='channels_last',activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), padding='same', strides=2, data_format='channels_last'))
model.add(Dropout(0.5))
# In[7]:
#layer3
model.add(Conv2D(192, (3, 3), padding='same',data_format='channels_last', activation='relu'))
model.add(Conv2D(192, (1, 1),data_format='channels_last', activation='relu'))
model.add(Conv2D(10,(1, 1),data_format='channels_last',activation='relu'))
model.add(AveragePooling2D(pool_size=(8, 8), strides=1,data_format='channels_last'))
model.add(Flatten())
model.add(Activation('softmax'))
# In[ ]:
opt = keras.optimizers.rmsprop(lr=0.1, decay=1e-6)
#opt = keras.optimizers.Adam(lr=0.001)
# In[8]:
# train model
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=True, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=True, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=4/32, # randomly shift images horizontally (fraction of total width)
height_shift_range=4/32, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False # randomly flip images
)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test) )
# In[ ]: