@@ -47,46 +47,46 @@ def feature_scaling(
4747 scaled_features = (features - mean ) / std
4848 return scaled_features , mean , std
4949
50- def fit (self , X : np .ndarray , y : np .ndarray ) -> None :
50+ def fit (self , x : np .ndarray , y : np .ndarray ) -> None :
5151 """
5252 Fit the Ridge Regression model to the training data.
5353
54- :param X : Input features, shape (m, n)
54+ :param x : Input features, shape (m, n)
5555 :param y: Target values, shape (m,)
5656 """
57- X_scaled , mean , std = self .feature_scaling (X ) # Normalize features
58- m , n = X_scaled .shape
57+ x_scaled , mean , std = self .feature_scaling (x ) # Normalize features
58+ m , n = x_scaled .shape
5959 self .theta = np .zeros (n ) # Initialize weights to zeros
6060
61- for i in range (self .iterations ):
62- predictions = X_scaled .dot (self .theta )
61+ for _ in range (self .iterations ):
62+ predictions = x_scaled .dot (self .theta )
6363 error = predictions - y
6464
6565 # Compute gradient with L2 regularization
66- gradient = (X_scaled .T .dot (error ) + self .lambda_ * self .theta ) / m
66+ gradient = (x_scaled .T .dot (error ) + self .lambda_ * self .theta ) / m
6767 self .theta -= self .alpha * gradient # Update weights
6868
69- def predict (self , X : np .ndarray ) -> np .ndarray :
69+ def predict (self , x : np .ndarray ) -> np .ndarray :
7070 """
7171 Predict values using the trained model.
7272
73- :param X : Input features, shape (m, n)
73+ :param x : Input features, shape (m, n)
7474 :return: Predicted values, shape (m,)
7575 """
76- X_scaled , _ , _ = self .feature_scaling (X ) # Scale features using training data
77- return X_scaled .dot (self .theta )
76+ x_scaled , _ , _ = self .feature_scaling (x ) # Scale features using training data
77+ return x_scaled .dot (self .theta )
7878
79- def compute_cost (self , X : np .ndarray , y : np .ndarray ) -> float :
79+ def compute_cost (self , x : np .ndarray , y : np .ndarray ) -> float :
8080 """
8181 Compute the cost function with regularization.
8282
83- :param X : Input features, shape (m, n)
83+ :param x : Input features, shape (m, n)
8484 :param y: Target values, shape (m,)
8585 :return: Computed cost
8686 """
87- X_scaled , _ , _ = self .feature_scaling (X ) # Scale features using training data
87+ x_scaled , _ , _ = self .feature_scaling (x ) # Scale features using training data
8888 m = len (y )
89- predictions = X_scaled .dot (self .theta )
89+ predictions = x_scaled .dot (self .theta )
9090 cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 ) + (
9191 self .lambda_ / (2 * m )
9292 ) * np .sum (self .theta ** 2 )
@@ -106,24 +106,24 @@ def mean_absolute_error(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
106106# Example usage
107107if __name__ == "__main__" :
108108 # Load dataset
109- df = pd .read_csv (
109+ data = pd .read_csv (
110110 "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/master/Week1/ADRvsRating.csv"
111111 )
112- X = df [["Rating" ]].values # Feature: Rating
113- y = df ["ADR" ].values # Target: ADR
112+ x = data [["Rating" ]].to_numpy () # Feature: Rating
113+ y = data ["ADR" ].to_numpy () # Target: ADR
114114 y = (y - np .mean (y )) / np .std (y )
115115
116116 # Add bias term (intercept) to the feature matrix
117- X = np .c_ [np .ones (X .shape [0 ]), X ] # Add intercept term
117+ x = np .c_ [np .ones (X .shape [0 ]), x ] # Add intercept term
118118
119119 # Initialize and train the Ridge Regression model
120120 model = RidgeRegression (alpha = 0.01 , lambda_ = 0.1 , iterations = 1000 )
121- model .fit (X , y )
121+ model .fit (x , y )
122122
123123 # Predictions
124- predictions = model .predict (X )
124+ predictions = model .predict (x )
125125
126126 # Results
127127 print ("Optimized Weights:" , model .theta )
128- print ("Cost:" , model .compute_cost (X , y ))
128+ print ("Cost:" , model .compute_cost (x , y ))
129129 print ("Mean Absolute Error:" , model .mean_absolute_error (y , predictions ))
0 commit comments