-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathLR_miniBatch_datasetDataLoader.py
More file actions
executable file
·109 lines (82 loc) · 3.59 KB
/
LR_miniBatch_datasetDataLoader.py
File metadata and controls
executable file
·109 lines (82 loc) · 3.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/env python
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
#set the random seed for the script
np.random.seed(0)
#create noisy data
class noisyLineData(Dataset):
def __init__(self, N=100, slope=2, intercept=3, stdDev=50):
self.x = torch.linspace(-100,100,N)
self.y = slope*self.x + intercept + np.random.normal(0, stdDev, N) #can use numpy for random
def __getitem__(self, index):
return self.x[index], self.y[index]
def __len__(self):
return len(self.x)
data = noisyLineData()
trainloader = DataLoader(dataset = data, batch_size = 20)
#Global fit parameters w and b, and the prediction vector yhat
modelParams = {'w': torch.tensor(float(0),requires_grad = True),
'b': torch.tensor(float(0),requires_grad = True) }
#Learning parameters
lr = 1e-4 #learning Rate
epochs = 4 # number of iterations
#general function which use and modify the global parameters
def forward(x, modelParams):
"""Forward step is the prediction step. Where the input data (x) is multiplied
by the parameters (w) and added to the bias (b), resulting in th prediction, yhat."""
return x * modelParams['w'] + modelParams['b']
def criterion(yhat, y):
"""Criterion is a measure of the error betwen the prediction (yhat) and the data (y).
L2 error is likely the most common. Greater the power, the more weight to outliers. """
return torch.mean( ( yhat - y )**2 )
def backward(loss, modelParams, lr):
"""The backward step is the the optimization step. We calculate the gradient of the
loss w.r.t. the model parameters (w and b) and travel in the negative gradient direction
to minimize the loss. Simple Gradient Descent. """
# tells the tree to calculate the parial derivates of the loss wrt all of the
#contriubuting tensors with the "requires_grad = True" in their constructor.
loss.backward()
#gradient descent
modelParams['w'].data = modelParams['w'].data - lr * modelParams['w'].grad.data
modelParams['b'].data = modelParams['b'].data - lr * modelParams['b'].grad.data
#must zero out the gradient otherwise pytorch accumulates the gradient.
modelParams['w'].grad.data.zero_()
modelParams['b'].grad.data.zero_()
##lists to save the parameters and errors
params = []
error = []
error_epoch = []
for epoch in range(epochs):
yhat_total = forward(data.x, modelParams)
error_epoch.append(float(criterion(yhat_total, data.y).data))
params.append([modelParams['w'].data, modelParams['b'].data, epoch]) # saving data
# mini-batch or stochastic gradient descent
for x,y in trainloader:
yhat = forward(x, modelParams) #major step 1/3
loss = criterion(yhat, y) #major step 2/3
error.append(loss.data) #saving data
backward(loss, modelParams, lr) #major step 3/3
#saving data
params.append( [modelParams['w'].data, modelParams['b'].data, epochs] )
error.append( criterion( yhat, y ).data )
params = np.array(params)
error = np.array(error)
#Simple display of the learning
print(error)
print(error_epoch)
plt.figure()
plt.plot(data.x.numpy(), data.y.numpy(), 'xk', label="data")
for param in params:
plt.plot(data.x.numpy(),param[0]*data.x.numpy()+param[1], label = f'epoch {int(param[2])}')
plt.legend()
plt.title("mini-batch gradient descent with PyTorch")
plt.xlabel('x')
plt.ylabel('y')
plt.savefig('./figs/LR_miniBatch_datasetDataLoader.png')
plt.figure()
plt.plot(error)
plt.title("mini-batch gradient descent with PyTorch")
plt.xlabel('batch')
plt.ylabel('Loss')