Skip to content

Commit 93501a5

Browse files
committed
add examples
1 parent 649fba4 commit 93501a5

File tree

10 files changed

+775
-0
lines changed

10 files changed

+775
-0
lines changed

examples/mnist/mnist.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
import os
2+
import sys
3+
file_path = os.path.dirname(os.path.realpath(__file__))
4+
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
5+
sys.path.append(lib_path)
6+
7+
import candle_keras as candle
8+
9+
# thread optimization
10+
import os
11+
from keras import backend as K
12+
if K.backend() == 'tensorflow' and 'NUM_INTRA_THREADS' in os.environ:
13+
import tensorflow as tf
14+
sess = tf.Session(config=tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
15+
intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS'])))
16+
K.set_session(sess)
17+
18+
19+
additional_definitions = None
20+
required = None
21+
22+
class MNIST(candle.Benchmark):
23+
def set_locals(self):
24+
if required is not None:
25+
self.required = set(required)
26+
if additional_definitions is not None:
27+
self.additional_definitions = additional_definitions
28+

examples/mnist/mnist_cnn.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
'''Trains a simple convnet on the MNIST dataset.
2+
3+
Gets to 99.25% test accuracy after 12 epochs
4+
(there is still a lot of margin for parameter tuning).
5+
16 seconds per epoch on a GRID K520 GPU.
6+
'''
7+
8+
from __future__ import print_function
9+
import keras
10+
from keras.datasets import mnist
11+
from keras.models import Sequential
12+
from keras.layers import Dense, Dropout, Flatten
13+
from keras.layers import Conv2D, MaxPooling2D
14+
from keras import backend as K
15+
16+
batch_size = 128
17+
num_classes = 10
18+
epochs = 12
19+
20+
# input image dimensions
21+
img_rows, img_cols = 28, 28
22+
23+
# the data, split between train and test sets
24+
(x_train, y_train), (x_test, y_test) = mnist.load_data()
25+
26+
if K.image_data_format() == 'channels_first':
27+
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
28+
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
29+
input_shape = (1, img_rows, img_cols)
30+
else:
31+
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
32+
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
33+
input_shape = (img_rows, img_cols, 1)
34+
35+
x_train = x_train.astype('float32')
36+
x_test = x_test.astype('float32')
37+
x_train /= 255
38+
x_test /= 255
39+
print('x_train shape:', x_train.shape)
40+
print(x_train.shape[0], 'train samples')
41+
print(x_test.shape[0], 'test samples')
42+
43+
# convert class vectors to binary class matrices
44+
y_train = keras.utils.to_categorical(y_train, num_classes)
45+
y_test = keras.utils.to_categorical(y_test, num_classes)
46+
47+
model = Sequential()
48+
model.add(Conv2D(32, kernel_size=(3, 3),
49+
activation='relu',
50+
input_shape=input_shape))
51+
model.add(Conv2D(64, (3, 3), activation='relu'))
52+
model.add(MaxPooling2D(pool_size=(2, 2)))
53+
model.add(Dropout(0.25))
54+
model.add(Flatten())
55+
model.add(Dense(128, activation='relu'))
56+
model.add(Dropout(0.5))
57+
model.add(Dense(num_classes, activation='softmax'))
58+
59+
model.compile(loss=keras.losses.categorical_crossentropy,
60+
optimizer=keras.optimizers.Adadelta(),
61+
metrics=['accuracy'])
62+
63+
model.fit(x_train, y_train,
64+
batch_size=batch_size,
65+
epochs=epochs,
66+
verbose=1,
67+
validation_data=(x_test, y_test))
68+
score = model.evaluate(x_test, y_test, verbose=0)
69+
print('Test loss:', score[0])
70+
print('Test accuracy:', score[1])

examples/mnist/mnist_cnn_candle.py

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
import mnist
2+
import candle_keras as candle
3+
4+
from keras.callbacks import CSVLogger
5+
from keras import backend as K
6+
7+
def initialize_parameters():
8+
mnist_common = mnist.MNIST(mnist.file_path,
9+
'mnist_params.txt',
10+
'keras',
11+
prog='mnist_cnn',
12+
desc='MNIST CNN example'
13+
)
14+
15+
# Initialize parameters
16+
gParameters = candle.initialize_parameters(mnist_common)
17+
csv_logger = CSVLogger('{}/params.log'.format(gParameters))
18+
19+
return gParameters
20+
21+
def run(gParameters):
22+
##########################################
23+
# Your DL start here. See mnist_cnn.py #
24+
##########################################
25+
26+
'''Trains a simple convnet on the MNIST dataset.
27+
28+
Gets to 99.25% test accuracy after 12 epochs
29+
(there is still a lot of margin for parameter tuning).
30+
16 seconds per epoch on a GRID K520 GPU.
31+
'''
32+
33+
# from __future__ import print_function
34+
35+
import keras
36+
from keras.datasets import mnist
37+
from keras.models import Sequential
38+
from keras.layers import Dense, Dropout, Flatten
39+
from keras.layers import Conv2D, MaxPooling2D
40+
from keras import backend as K
41+
42+
batch_size = gParameters['batch_size']
43+
num_classes = 10
44+
epochs = gParameters['epochs']
45+
46+
activation = gParameters['activation']
47+
optimizer = gParameters['optimizer']
48+
49+
# input image dimensions
50+
img_rows, img_cols = 28, 28
51+
52+
# the data, split between train and test sets
53+
(x_train, y_train), (x_test, y_test) = mnist.load_data()
54+
55+
if K.image_data_format() == 'channels_first':
56+
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
57+
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
58+
input_shape = (1, img_rows, img_cols)
59+
else:
60+
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
61+
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
62+
input_shape = (img_rows, img_cols, 1)
63+
64+
x_train = x_train.astype('float32')
65+
x_test = x_test.astype('float32')
66+
x_train /= 255
67+
x_test /= 255
68+
print('x_train shape:', x_train.shape)
69+
print(x_train.shape[0], 'train samples')
70+
print(x_test.shape[0], 'test samples')
71+
72+
# convert class vectors to binary class matrices
73+
y_train = keras.utils.to_categorical(y_train, num_classes)
74+
y_test = keras.utils.to_categorical(y_test, num_classes)
75+
76+
model = Sequential()
77+
model.add(Conv2D(32, kernel_size=(3, 3),
78+
activation='relu',
79+
input_shape=input_shape))
80+
model.add(Conv2D(64, (3, 3), activation='relu'))
81+
model.add(MaxPooling2D(pool_size=(2, 2)))
82+
model.add(Dropout(0.25))
83+
model.add(Flatten())
84+
model.add(Dense(128, activation='relu'))
85+
model.add(Dropout(0.5))
86+
model.add(Dense(num_classes, activation='softmax'))
87+
88+
model.compile(loss=keras.losses.categorical_crossentropy,
89+
optimizer=keras.optimizers.Adadelta(),
90+
metrics=['accuracy'])
91+
92+
history = model.fit(x_train, y_train,
93+
batch_size=batch_size,
94+
epochs=epochs,
95+
verbose=1,
96+
validation_data=(x_test, y_test))
97+
score = model.evaluate(x_test, y_test, verbose=0)
98+
print('Test loss:', score[0])
99+
print('Test accuracy:', score[1])
100+
##########################################
101+
# End of mnist_mlp.py ####################
102+
##########################################
103+
104+
return history
105+
106+
def main():
107+
108+
gParameters = initialize_parameters()
109+
run(gParameters)
110+
111+
if __name__ == '__main__':
112+
main()
113+
try:
114+
K.clear_session()
115+
except AttributeError:
116+
pass

examples/mnist/mnist_mlp.py

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
'''Trains a simple deep NN on the MNIST dataset.
2+
3+
Gets to 98.40% test accuracy after 20 epochs
4+
(there is *a lot* of margin for parameter tuning).
5+
2 seconds per epoch on a K520 GPU.
6+
'''
7+
8+
from __future__ import print_function
9+
10+
import keras
11+
from keras.datasets import mnist
12+
from keras.models import Sequential
13+
from keras.layers import Dense, Dropout
14+
from keras.optimizers import RMSprop
15+
16+
batch_size = 128
17+
num_classes = 10
18+
epochs = 20
19+
20+
# the data, split between train and test sets
21+
(x_train, y_train), (x_test, y_test) = mnist.load_data()
22+
23+
x_train = x_train.reshape(60000, 784)
24+
x_test = x_test.reshape(10000, 784)
25+
x_train = x_train.astype('float32')
26+
x_test = x_test.astype('float32')
27+
x_train /= 255
28+
x_test /= 255
29+
print(x_train.shape[0], 'train samples')
30+
print(x_test.shape[0], 'test samples')
31+
32+
# convert class vectors to binary class matrices
33+
y_train = keras.utils.to_categorical(y_train, num_classes)
34+
y_test = keras.utils.to_categorical(y_test, num_classes)
35+
36+
model = Sequential()
37+
model.add(Dense(512, activation='relu', input_shape=(784,)))
38+
model.add(Dropout(0.2))
39+
model.add(Dense(512, activation='relu'))
40+
model.add(Dropout(0.2))
41+
model.add(Dense(num_classes, activation='softmax'))
42+
43+
model.summary()
44+
45+
model.compile(loss='categorical_crossentropy',
46+
optimizer=RMSprop(),
47+
metrics=['accuracy'])
48+
49+
history = model.fit(x_train, y_train,
50+
batch_size=batch_size,
51+
epochs=epochs,
52+
verbose=1,
53+
validation_data=(x_test, y_test))
54+
score = model.evaluate(x_test, y_test, verbose=0)
55+
print('Test loss:', score[0])
56+
print('Test accuracy:', score[1])

examples/mnist/mnist_mlp_candle.py

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
import mnist
2+
import candle_keras as candle
3+
4+
from keras.callbacks import CSVLogger
5+
from keras import backend as K
6+
7+
def initialize_parameters():
8+
mnist_common = mnist.MNIST(mnist.file_path,
9+
'mnist_params.txt',
10+
'keras',
11+
prog='mnist_mlp',
12+
desc='MNIST example'
13+
)
14+
15+
# Initialize parameters
16+
gParameters = candle.initialize_parameters(mnist_common)
17+
csv_logger = CSVLogger('{}/params.log'.format(gParameters))
18+
19+
return gParameters
20+
21+
def run(gParameters):
22+
##########################################
23+
# Your DL start here. See mnist_mlp.py #
24+
##########################################
25+
'''Trains a simple deep NN on the MNIST dataset.
26+
27+
Gets to 98.40% test accuracy after 20 epochs
28+
(there is *a lot* of margin for parameter tuning).
29+
2 seconds per epoch on a K520 GPU.
30+
'''
31+
32+
# from __future__ import print_function
33+
34+
import keras
35+
from keras.datasets import mnist
36+
from keras.models import Sequential
37+
from keras.layers import Dense, Dropout
38+
from keras.optimizers import RMSprop
39+
40+
batch_size = gParameters['batch_size']
41+
num_classes = 10
42+
epochs = gParameters['epochs']
43+
44+
activation = gParameters['activation']
45+
optimizer = gParameters['optimizer']
46+
47+
# the data, split between train and test sets
48+
(x_train, y_train), (x_test, y_test) = mnist.load_data()
49+
50+
x_train = x_train.reshape(60000, 784)
51+
x_test = x_test.reshape(10000, 784)
52+
x_train = x_train.astype('float32')
53+
x_test = x_test.astype('float32')
54+
x_train /= 255
55+
x_test /= 255
56+
print(x_train.shape[0], 'train samples')
57+
print(x_test.shape[0], 'test samples')
58+
59+
# convert class vectors to binary class matrices
60+
y_train = keras.utils.to_categorical(y_train, num_classes)
61+
y_test = keras.utils.to_categorical(y_test, num_classes)
62+
63+
model = Sequential()
64+
model.add(Dense(512, activation=activation, input_shape=(784,)))
65+
model.add(Dropout(0.2))
66+
model.add(Dense(512, activation=activation))
67+
model.add(Dropout(0.2))
68+
model.add(Dense(num_classes, activation='softmax'))
69+
70+
model.summary()
71+
72+
model.compile(loss='categorical_crossentropy',
73+
optimizer=optimizer,
74+
metrics=['accuracy'])
75+
76+
history = model.fit(x_train, y_train,
77+
batch_size=batch_size,
78+
epochs=epochs,
79+
verbose=1,
80+
validation_data=(x_test, y_test))
81+
score = model.evaluate(x_test, y_test, verbose=0)
82+
print('Test loss:', score[0])
83+
print('Test accuracy:', score[1])
84+
##########################################
85+
# End of mnist_mlp.py ####################
86+
##########################################
87+
return history
88+
89+
def main():
90+
gParameters = initialize_parameters()
91+
run(gParameters)
92+
93+
if __name__ == '__main__':
94+
main()
95+
try:
96+
K.clear_session()
97+
except AttributeError:
98+
pass

examples/mnist/mnist_params.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
[Global_Params]
2+
epochs=20
3+
batch_size=128
4+
activation='relu'
5+
optimizer='rmsprop'

0 commit comments

Comments
 (0)