-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathMode_Detection_CNN.py
More file actions
226 lines (174 loc) · 9.27 KB
/
Mode_Detection_CNN.py
File metadata and controls
226 lines (174 loc) · 9.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
#!/usr/bin/env python
"""
Mode Detection with Deep Neural Network
Implemented in Tensorflow Library (Version 1.6 installed with Anaconda on Windows 10)
The code read the data files from PostgreSQL database
Please find the 'points.csv' and 'labels.csv' on Github and import them into a PostgreSQL db, or
modify the code to read all the data from csv files directly.
"""
# ==============================================================================
__author__ = "Ali Yazdizadeh"
__date__ = "February 2018"
__email__ = "ali.yazdizadeh@mail.concordia.ca"
__python_version__ = "3.5.4"
# ==============================================================================
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import time
start_time = time.time()
minibatch_size = 16
seg_size = 70
# Number of channels
num_channels = 5
# Number of classes
num_classes = 4
#Network architecture
num_channels_ensemble = [5]
num_filters_ensemble = []
filters_size_ensemble = []
num_stride_maxpool_ensemble = []
num_stride_conv2d_ensemble = []
maxpool_size_ensemble = []
num_layers_ensemble = [5]
num_networks = len(num_layers_ensemble)
filters_size_ensemble.append([8,8,8,8,8])
num_filters_ensemble.append([96,256,384,384,256])
maxpool_size_ensemble.append([8,8,8,8,8])
for i in range(len(num_layers_ensemble)):
num_stride_conv2d_ensemble.append([2 for k in range(0, num_layers_ensemble[i])])
for i in range(len(num_layers_ensemble)):
num_stride_maxpool_ensemble.append([2 for k in range(0, num_layers_ensemble[i])])
weights_ensemble = []
for i in range(len(filters_size_ensemble)):
filters_size = filters_size_ensemble[i]
num_filters = num_filters_ensemble[i]
weights = []
for index, f in enumerate(filters_size):
if index == 0:
weights.append([f, num_channels, num_filters[index]])
else:
weights.append([f, num_filters[index - 1], num_filters[index]])
weights_ensemble.append(weights)
def parameters_weights():
num_layers_ensemble = [5]
filters_size_ensemble.append([8, 8, 8, 8, 8])
num_filters_ensemble.append([96, 256, 384, 384, 256])
maxpool_size_ensemble.append([8, 8, 8, 8, 8])
for i in range(len(num_layers_ensemble)):
num_stride_conv2d_ensemble.append([2 for k in range(0, num_layers_ensemble[i])])
for i in range(len(num_layers_ensemble)):
num_stride_maxpool_ensemble.append([2 for k in range(0, num_layers_ensemble[i])])
weights_ensemble = []
for i in range(len(filters_size_ensemble)):
filters_size = filters_size_ensemble[i]
num_filters = num_filters_ensemble[i]
weights = []
for index, f in enumerate(filters_size):
if index == 0:
weights.append([f, num_channels, num_filters[index]])
else:
weights.append([f, num_filters[index - 1], num_filters[index]])
weights_ensemble.append(weights)
return num_layers_ensemble, filters_size_ensemble, num_filters_ensemble, maxpool_size_ensemble, num_stride_conv2d_ensemble, num_stride_maxpool_ensemble, weights_ensemble
######################split data to train-test######################
def split_train_test(X_origin, Y_orig):
X_train_orig, X_test_orig, Y_train_orig, Y_test_orig = train_test_split(X_origin, Y_orig, test_size=0.20,
random_state=None)
return (X_train_orig, X_test_orig, Y_train_orig, Y_test_orig)
######################Convert labes vector to one-hot######################
def convert_to_one_hot(Y, C):
Y_onehot = np.zeros(Y.shape[0], dtype=[('uuid', 'S64'), ('trip_id', 'int8'), ('segment_id', 'int8'),
('class_label', '(4,)int8')])
Y_onehot = np.rec.array(Y_onehot)
Y_onehot.uuid = Y.uuid
Y_onehot.trip_id = Y.trip_id
Y_onehot.segment_id = Y.segment_id
Y_onehot.class_label = np.eye(C)[Y.class_label.reshape(-1)]
return Y_onehot
################initialize the parameters#################
def initialize_parameters(weights):
"""
Initializes weight parameters to build a neural network with tensorflow. For example, the shapes could be:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
W3 : ....
Returns:
parameters -- a dictionary of tensors containing W1, W2, W3 , ...
"""
# define the parameters for conv layers
parameters = {}
for index, current_layer in enumerate(weights):
# declare 'W's
globals()['W{}'.format(index + 1)] = tf.get_variable('W{}'.format(index + 1),
current_layer,
initializer=tf.contrib.layers.xavier_initializer())
parameters['W{}'.format(index + 1)] = globals()['W{}'.format(index + 1)]
return parameters
####################Forward propagation in tensorflow#########################
def forward_propagation(X, parameters, num_stride_conv2d, maxpool_size, num_stride_maxpool):
# Retrieve the parameters from the dictionary "parameters"
for index, param in enumerate(parameters):
# print(param)
# print(index, 'index is')
# print('num_stride_conv2d:',num_stride_conv2d[index])
# print('num_stride_maxpool:', num_stride_maxpool[index])
# Retrieve the parameters from the dictionary "parameters"
if index == 0:
globals()['W{}'.format(index + 1)] = parameters['W{}'.format(index + 1)]
# CONV2D: stride from num_stride_conv2d, padding 'SAME'
globals()['Z{}'.format(index + 1)] = tf.nn.conv1d(X, filters=globals()['W{}'.format(index + 1)]
, stride=num_stride_conv2d[index],
padding='SAME')
# RELU
globals()['A{}'.format(index + 1)] = tf.nn.leaky_relu(globals()['Z{}'.format(index + 1)], alpha=0.02)
# tf.nn.relu(globals()['Z{}'.format(index + 1)])
# filter = tf.get_variable('weights', [5, 5, 1, 64],
# initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32),
# dtype=tf.float32)
# MAXPOOL: window size form stride from num_stride_maxpool, sride is the same size as window size, padding 'SAME'
globals()['P{}'.format(index + 1)] = tf.layers.max_pooling1d(globals()['A{}'.format(index + 1)],
pool_size=maxpool_size[index],
strides=num_stride_maxpool[index],
padding='SAME')
else:
globals()['W{}'.format(index + 1)] = parameters['W{}'.format(index + 1)]
# CONV2D: stride from num_stride_conv2d, padding 'SAME'
globals()['Z{}'.format(index + 1)] = tf.nn.conv1d(globals()['P{}'.format(index)],
filters=globals()['W{}'.format(index + 1)]
, stride=num_stride_conv2d[index], padding='SAME')
# RELU
globals()['A{}'.format(index + 1)] = tf.nn.leaky_relu(globals()['Z{}'.format(index + 1)], alpha=0.02)
# tf.nn.relu(globals()['Z{}'.format(index + 1)])
# MAXPOOL: window size form stride from num_stride_maxpool, sride is the same size as window size, padding 'SAME'
globals()['P{}'.format(index + 1)] = tf.layers.max_pooling1d(globals()['A{}'.format(index + 1)],
pool_size=maxpool_size[index],
strides=num_stride_maxpool[index],
padding='SAME')
# FLATTEN
globals()['P{}'.format(len(parameters))] = tf.contrib.layers.flatten(globals()['P{}'.format(len(parameters))])
# one fully connected layer
globals()['Z{}'.format(len(parameters) + 1)] = tf.contrib.layers.fully_connected(
globals()['P{}'.format(len(parameters))], num_classes, activation_fn=None)
for index, param in enumerate(parameters):
print(globals()['Z{}'.format(index + 1)])
print(globals()['Z{}'.format(len(parameters) + 1)])
print(globals()['P{}'.format(len(parameters))])
final_Z = globals()['Z{}'.format(len(parameters) + 1)]
return final_Z
####################Computing Cost with softmax_cross_entropy in tensorflow#########################
def compute_cost(final_Z, Y, cl_weights):
"""
class_weights
Computes the cost
Arguments:
Final_Z -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as final_Z
Returns:
cost - Tensor of the cost function
"""
# without weights
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=final_Z, labels=Y))
# with weights
cost = tf.reduce_mean(tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=final_Z, weights=cl_weights))
return cost