|
| 1 | +## Regression in ML |
| 2 | + |
| 3 | +- Types of Regressions in ML |
| 4 | + - Huber |
| 5 | + - Quantile |
| 6 | + - Log Cosh |
| 7 | + - Mean Absolute Error/ L1 |
| 8 | + - Mean Squared Error/ L2 |
| 9 | + - Mean Squared Logarithm Error/ L2 |
| 10 | + |
| 11 | + |
| 12 | +#### Logistic Regression in Python |
| 13 | + |
| 14 | +- A logistic regression function is a sigmoid function essentially |
| 15 | +- and my I plug in the ordinary linear equation for x i.e. y=mx+b |
| 16 | +- The output is a probability of something happening |
| 17 | +- 1/(1+e^(-wx+b)) where w is my weight and b is my bais |
| 18 | +- w and b are called my parameters |
| 19 | +- Prior to knowing w and b I must have a cost function |
| 20 | +- I must calculate my derivative and learning rate |
| 21 | + |
| 22 | + |
| 23 | +### Implementation |
| 24 | + |
| 25 | +```python |
| 26 | +import numpy as np |
| 27 | + |
| 28 | +class LogisticReg(): |
| 29 | + def __init__(self, lear_rate=0.001, num_of_iters=1000): |
| 30 | + #storing the args |
| 31 | + self.lear_rate = lear_rate |
| 32 | + self.num_of_iters = num_of_iters |
| 33 | + #creating the weights |
| 34 | + self.weights = None |
| 35 | + self.bias = None |
| 36 | + |
| 37 | + ''' |
| 38 | + fit method that takes a training sample |
| 39 | + and training labels |
| 40 | + ''' |
| 41 | + def fit(self, X, y): |
| 42 | + ''' |
| 43 | + X is a numpy I and d vector of size m by n |
| 44 | + where m is the number of samples |
| 45 | + and n is the number of features for each |
| 46 | + sample |
| 47 | + 1. I must initialize the weights i.e. the parameters |
| 48 | + ''' |
| 49 | + num_ofsamps, num_offeatures = X.shape |
| 50 | + self.weights = np.zeros(num_offeatures) |
| 51 | + self.bias = 0 |
| 52 | + |
| 53 | + # gradient descent to iteratively update my weights |
| 54 | + for el in range(self.num_of_iters): |
| 55 | + linear_model = np.dot(X, self.weights) + self.bias |
| 56 | + y_predic = self._sigmoidfunc(linear_model) |
| 57 | + |
| 58 | + #must take the derivative of the weight |
| 59 | + dw = ( 1 / num_ofsamps ) * np.dot(X.T, (y_predic-y)) |
| 60 | + |
| 61 | + db = ( 1 / num_ofsamps ) * np.sum(y_predic - y) |
| 62 | + |
| 63 | + # must update my parameters |
| 64 | + |
| 65 | + self.weights -= self.lear_rate * dw |
| 66 | + |
| 67 | + self.bias -= self.lear_rate * db |
| 68 | + |
| 69 | + |
| 70 | + ''' |
| 71 | + predict method |
| 72 | + ``` |
| 73 | +
|
| 74 | + #if >0.5 then +1..... if <0.5 then +0 |
| 75 | + def predict(self, X): |
| 76 | + #approximate my data with a linear model |
| 77 | + linear_model = np.dot(X, self.weights) + self.bias |
| 78 | + y_predic = self._sigmoidfunc(linear_model) |
| 79 | + y_predic_class = [1 if i > 0.5 else 0 for i in y_predic] |
| 80 | + #return the predicted class |
| 81 | + return y_predic_class |
| 82 | +
|
| 83 | +
|
| 84 | +
|
| 85 | + |
| 86 | + #private method |
| 87 | + def _sigmoidfunc(self, x): |
| 88 | + return 1 / (1 + np.exp(-x)) |
| 89 | + |
| 90 | +``` |
0 commit comments