@@ -15,68 +15,68 @@ def __init__(
1515 self .theta : np .ndarray = None
1616
1717 def feature_scaling (
18- self , x : np .ndarray
18+ self , features : np .ndarray
1919 ) -> tuple [np .ndarray , np .ndarray , np .ndarray ]:
20- mean = np .mean (x , axis = 0 )
21- std = np .std (x , axis = 0 )
20+ mean = np .mean (features , axis = 0 )
21+ std = np .std (features , axis = 0 )
2222
2323 # avoid division by zero for constant features (std = 0)
2424 std [std == 0 ] = 1 # set std=1 for constant features to avoid NaN
2525
26- x_scaled = (x - mean ) / std
27- return x_scaled , mean , std
26+ features_scaled = (features - mean ) / std
27+ return features_scaled , mean , std
2828
29- def fit (self , x : np .ndarray , y : np .ndarray ) -> None :
30- x_scaled , mean , std = self .feature_scaling (x )
31- m , n = x_scaled .shape
29+ def fit (self , features : np .ndarray , target : np .ndarray ) -> None :
30+ features_scaled , mean , std = self .feature_scaling (features )
31+ m , n = features_scaled .shape
3232 self .theta = np .zeros (n ) # initializing weights to zeros
3333
3434 for _ in range (self .num_iterations ):
35- predictions = x_scaled .dot (self .theta )
36- error = predictions - y
35+ predictions = features_scaled .dot (self .theta )
36+ error = predictions - target
3737
3838 # computing gradient with L2 regularization
3939 gradient = (
40- x_scaled .T .dot (error ) + self .regularization_param * self .theta
40+ features_scaled .T .dot (error ) + self .regularization_param * self .theta
4141 ) / m
4242 self .theta -= self .alpha * gradient # updating weights
4343
44- def predict (self , x : np .ndarray ) -> np .ndarray :
45- x_scaled , _ , _ = self .feature_scaling (x )
46- return x_scaled .dot (self .theta )
44+ def predict (self , features : np .ndarray ) -> np .ndarray :
45+ features_scaled , _ , _ = self .feature_scaling (features )
46+ return features_scaled .dot (self .theta )
4747
48- def compute_cost (self , x : np .ndarray , y : np .ndarray ) -> float :
49- x_scaled , _ , _ = self .feature_scaling (x )
50- m = len (y )
48+ def compute_cost (self , features : np .ndarray , target : np .ndarray ) -> float :
49+ features_scaled , _ , _ = self .feature_scaling (features )
50+ m = len (target )
5151
52- predictions = x_scaled .dot (self .theta )
53- cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 ) + (
52+ predictions = features_scaled .dot (self .theta )
53+ cost = (1 / (2 * m )) * np .sum ((predictions - target ) ** 2 ) + (
5454 self .regularization_param / (2 * m )
5555 ) * np .sum (self .theta ** 2 )
5656 return cost
5757
58- def mean_absolute_error (self , y_true : np .ndarray , y_pred : np .ndarray ) -> float :
59- return np .mean (np .abs (y_true - y_pred ))
58+ def mean_absolute_error (self , target : np .ndarray , predictions : np .ndarray ) -> float :
59+ return np .mean (np .abs (target - predictions ))
6060
6161
6262# Example usage
6363if __name__ == "__main__" :
6464 data = pd .read_csv ("ADRvsRating.csv" )
65- x = data [["Rating" ]].to_numpy ()
66- y = data ["ADR" ].to_numpy ()
67- y = (y - np .mean (y )) / np .std (y )
65+ features_matrix = data [["Rating" ]].to_numpy ()
66+ target = data ["ADR" ].to_numpy ()
67+ target = (target - np .mean (target )) / np .std (target )
6868
6969 # added bias term to the feature matrix
70- x = np .c_ [np .ones (x .shape [0 ]), x ]
70+ x = np .c_ [np .ones (features_matrix .shape [0 ]), features_matrix ]
7171
7272 # initialize and train the ridge regression model
7373 model = RidgeRegression (alpha = 0.01 , regularization_param = 0.1 , num_iterations = 1000 )
74- model .fit (x , y )
74+ model .fit (features_matrix , target )
7575
7676 # predictions
77- predictions = model .predict (x )
77+ predictions = model .predict (features_matrix )
7878
7979 # results
8080 print ("Optimized Weights:" , model .theta )
81- print ("Cost:" , model .compute_cost (x , y ))
82- print ("Mean Absolute Error:" , model .mean_absolute_error (y , predictions ))
81+ print ("Cost:" , model .compute_cost (features_matrix , target ))
82+ print ("Mean Absolute Error:" , model .mean_absolute_error (target , predictions ))
0 commit comments