-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFFNN3.py
More file actions
241 lines (173 loc) · 8.03 KB
/
FFNN3.py
File metadata and controls
241 lines (173 loc) · 8.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
from scipy.special import softmax
import numpy as np
import matplotlib.pyplot as plt
"""
Notação:
x -> input
L -> label
y -> rede output
A informação do numero de nuronios está na matriz dos pesos, de forma que o número de linhas da matriz W representa o umero de
nuronios da camada e o numero de colunas dessa matrix representa o numero de neuronios da cama anterior. No caso da primeira
camada o numero de colunas na matriz W representa a quantidade de dados no vertor de input
oraganização e fluxo da rede:
repete até que um parametro arbitrario respeite uma condição. z_1,z_2,...,z_n definem o número de camadas da rede.
z_1 = activation( (w_1 * X) + b_1 )
z_2 = actvation( (w_1 * Z_1) + b_2 )
.
.
.
z_n = actvation( (w_n * Z_n-1) + b_n )
y = actvation( (w_y * Z_n) + b_y )
loss = diferença( y, L )
atualização dos pessos (conjunto das matrizes W)
"""
class CreateModel:
def __init__(self):
self.bias = []
self.layers = []
self.activations = []
self.df_activations = []
self.w_step = 0.1
self.b_step = 0.1
### metodo explicito
def add_layer(self,N_OF_NEURONS,SHAPE=False):
"""
este metodo serve para adicionar uma nova camada na rede. N_OF_NEURONS é o numero de nuronios
só é necessario adicionar o parametro SHAPE quando for a primeira camada. SHAPE deve informar o formato do input
"""
if SHAPE:
self.layers.append(np.random.uniform( -1.0,1.0, (SHAPE[1], N_OF_NEURONS) ).astype('float32'))
self.bias.append(np.random.uniform(-1.0,1.0, (N_OF_NEURONS,) ).astype('float32'))
else:
self.layers.append(np.random.uniform(-1.0,1.0, (self.layers[-1].shape[1], N_OF_NEURONS) ).astype('float32'))
self.bias.append(np.random.uniform(-1.0,1.0, (N_OF_NEURONS,) ).astype('float32'))
### metodo explicito
def LoadNet(self, NET):
""" NET deve ser da forma [LAYERS, BIAS, ACTIVATIONS] """
if len(NET) == 3:
self.layers = NET[0]
self.bias = NET[1]
self.activations = NET[2]
else:
raise Exception("NET size doesnt mach")
### metodo explicito
def add_activation(self,activation = "relu"):
"""Este metodo serve para adicionarmos um tipo de ativação de cada camada suas respectivas derivadas"""
if activation is "relu":
self.activations.append(self.relu)
self.df_activations.append(self.df_relu)
elif activation is "sigmoid":
self.activations.append(self.sigmoid)
self.df_activations.append(self.df_sigmoid)
elif activation is "iden":
self.activations.append(self.iden)
self.df_activations.append(self.df_iden)
elif activation is "softmax":
self.activations.append(self.Softmax)
self.df_activations.append(self.df_Softmax)
else:
raise Exception('Non-supported activation function')
# estas são as duas funções de ativação
### metodo implicito
def Softmax(self,x):
return softmax(x,axis=1)
def df_Softmax(self, x):
return softmax(x)*(1.0-softmax(x,axis=1))
def relu(self,x):
return x*(x>0)
def df_relu(self, x):
return 1*(x>0)
def sigmoid(self,x):
return 1./(1.+np.exp(-x))
def df_sigmoid(self, x):
return np.exp(-x)/((1+np.exp(-x))**2)
def iden(self,x):
return x
def cost(self,d,t):
return ((d-t)**2).sum() / len(d)
def Predict(self, data):
self.propagations = []
#calcula z da primeira camda e as derivadas com relação a z
Z = np.dot(data, self.layers[0]) + self.bias[0]
z_atual = self.activations[0](Z)
self.propagations.append(z_atual)
#print(z_atual)
for i in range( 1, len(self.layers)):
Z = np.dot(z_atual, self.layers[i]) + self.bias[i]
z_atual = self.activations[i](Z)
self.propagations.append(z_atual)
return z_atual
### metodo implicito
def forward_propagation(self, data):
self.derivatives = []
self.propagations = []
#calcula z da primeira camda e as derivadas com relação a z
Z = np.dot(data, self.layers[0]) + self.bias[0]
self.derivatives.append(self.df_activations[0](Z))
z_atual = self.activations[0](Z)
self.propagations.append(z_atual)
#print(z_atual)
for i in range( 1, len(self.layers)):
Z = np.dot(z_atual, self.layers[i]) + self.bias[i]
self.derivatives.append(self.df_activations[i](Z))
z_proximo = self.activations[i](Z)
z_atual = z_proximo
self.propagations.append(z_atual)
### metodo explicito
def back_propagation(self,data, target, batch_size):
self.dc_dw = []
self.dc_db = []
# multiplicação entrada por entrada
delta = ( self.propagations[-1] - target)*self.derivatives[-1]
self.dc_dw.append( np.dot(np.transpose(self.propagations[-2]), delta )/batch_size )
self.dc_db.append( delta.sum(0)/batch_size )
# vai do penultimo layer até o primeiro, o primeiro layer pe o layer apos o input!
# [n-1,n-2,...,0]
for i in np.arange(len(self.layers)-1, 1,-1):
# 2
delta = np.dot(delta, np.transpose(self.layers[i]))*self.derivatives[i-1]
self.dc_dw.append( np.dot(np.transpose(self.propagations[i-2]), delta )/batch_size )
self.dc_db.append( delta.sum(0)/batch_size )
delta = np.dot(delta, np.transpose(self.layers[1]))*self.derivatives[0]
self.dc_dw.append( np.dot(np.transpose(data),delta)/batch_size )
self.dc_db.append( delta.sum(0)/batch_size )
### atualiza os pesos
for i in range(len(self.layers)):
self.layers[i] -= self.w_step*self.dc_dw[-i-1]
self.bias[i] -= self.b_step*self.dc_db[-i-1]
return self.cost( self.Predict(data), target )
def fit(self, x, target, epochs, batch_size):
#x and targe are the full datasets
self.x = x
self.target = target
d_size = len(self.x)
if d_size%batch_size or len(self.target)%batch_size:
raise Exception(
'Data and Target size must be a multiple of batch_size (data_size %% batch_size == 0 ) ')
elif len(self.x) != len(self.target):
raise Exception(
'Data and target must be the same size')
else:
batches = d_size//batch_size
for i in range(epochs):
# shuffle data every epoch
indx = np.random.permutation(d_size)
self.x, self.target = self.x[indx], self.target[indx]
#split data and target in epochs with batch size iquals to size/batch
target_splited = np.split(self.target, batches)
data_splited = np.split(self.x, batches)
for j in range(batches):
self.forward_propagation(data_splited[j])
cost = self.back_propagation(data_splited[j], target_splited[j], batch_size)
if cost < 1.0e-4:
self.SaveNet(f'2dnet/2dNET_epoch{i}_{j}')
print(f'Saving in Empoch{i}_{j}, c={cost}')
if ((i+1)%100 == 0):
#self.SaveNet(f'2dnet/2dNET_epoch{i}')
#print(f'Saving in empoch{i}, c={cost}')
print(f'Epoch: {i+1} of {epochs}')
#print( '['+'#'*(i+1)+' '*(epochs-1-i)+']' )
print(f'loss: { cost }\n')
def SaveNet(self, NAME):
model_data = [self.layers, self.bias, self.activations]
np.save(f'{NAME}.npy', model_data)