-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathoptimizer_functions.py
More file actions
83 lines (66 loc) · 3.37 KB
/
optimizer_functions.py
File metadata and controls
83 lines (66 loc) · 3.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import numpy as np
class GradientDescent:
def minimize(self, w, b, dW, dB, vW, vB, learning_rate=0.01):
"""Implements Gradient Descent to find minima of cost function
Parameters:
- w (numpy array): weights matrix
- b (numpy array): bias matrix
- dW (numpy array): gradient of weights matrix wrt cost function
- dB (numpy array): gradient of bias matrix wrt cost function
- learning_rate (double): learning rate used to update weights
Returns:
- w_updated (numpy array): updated weights
- b_updated (numpy array): updated bias
"""
w_updated = w - learning_rate * dW
b_updated = b - learning_rate * dB
return w_updated, b_updated, vW, vB
class Momentum:
def minimize(self, w, b, dW, dB, vW, vB, learning_rate=0.01, beta=0.9):
"""Implements Gradient Descent with Momentum to find minima of cost function
Parameters:
- w (numpy array): weights matrix
- b (numpy array): bias matrix
- dW (numpy array): gradient of weights matrix wrt cost function
- dB (numpy array): gradient of bias matrix wrt cost function
- learning_rate (double): learning rate used to update weights
- beta (double): Momentum term for smoothing
- vW (numpy array): holds the state of the optimizer for previous iteration (weights)
- vB (numpy array): holds the state of the optimizer for previous iterations (biases)
Returns:
- w_updated (numpy array): updated weights
- b_updated (numpy array): updated bias
- vW (numpy array): updated state of the optimizer for current iteration (weights)
- vB (numpy array): updated state of the optimizer for current iteration (biases)
"""
vW = beta * vW + (1 - beta) * dW
vB = beta * vB + (1 - beta) * dB
w_updated = w - learning_rate * vW
b_updated = b - learning_rate * vB
return w_updated, b_updated, vW, vB
class RMSProp:
def minimize(self, w, b, dW, dB, sW, sB, learning_rate=0.01, beta=0.9,epsilon=1e-07):
"""Implements Gradient Descent with RMSprop to find minima of cost function
Parameters:
- w (numpy array): weights matrix
- b (numpy array): bias matrix
- dW (numpy array): gradient of weights matrix wrt cost function
- dB (numpy array): gradient of bias matrix wrt cost function
- learning_rate (double): learning rate used to update weights
- beta (double): Momentum term for smoothing
- sW (numpy array): holds the state of the optimizer for previous iteration (weights)
- sB (numpy array): holds the state of the optimizer for previous iterations (biases)
- epsilon(double): a small constant for numerical stability
Returns:
- w_updated (numpy array): updated weights
- b_updated (numpy array): updated bias
- sW (numpy array): updated state of the optimizer for current iteration (weights)
- sB (numpy array): updated state of the optimizer for current iteration (biases)
"""
sW = beta*sW + (1-beta)*np.square(dW)
sB = beta*sB + (1-beta)*np.square(dB)
w_updated = w - (learning_rate*dW)/(np.sqrt(sW)+epsilon)
b_updated = b - (learning_rate*dB)/(np.sqrt(sB)+epsilon)
return w_updated, b_updated, sW, sB
def Adam(w, b, dW, dB, learning_rate, beta1, beta2, epsilon):
pass