@@ -47,47 +47,73 @@ def feature_scaling(
4747 scaled_features = (features - mean ) / std
4848 return scaled_features , mean , std
4949
50- def fit (self , x : np .ndarray , y : np .ndarray ) -> None :
50+ def fit (self , features : np .ndarray , target : np .ndarray ) -> None :
5151 """
5252 Fit the Ridge Regression model to the training data.
5353
54- :param x: Input features, shape (m, n)
55- :param y: Target values, shape (m,)
54+ :param features: Input features, shape (m, n)
55+ :param target: Target values, shape (m,)
56+
57+ Example:
58+ >>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
59+ >>> features = np.array([[1, 2], [2, 3], [4, 6]])
60+ >>> target = np.array([1, 2, 3])
61+ >>> rr.fit(features, target)
62+ >>> rr.theta is not None
63+ True
5664 """
57- x_scaled , mean , std = self .feature_scaling (x ) # Normalize features
58- m , n = x_scaled .shape
65+ features_scaled , mean , std = self .feature_scaling (features ) # Normalize features
66+ m , n = features_scaled .shape
5967 self .theta = np .zeros (n ) # Initialize weights to zeros
6068
61- for _ in range (self .iterations ):
62- predictions = x_scaled .dot (self .theta )
63- error = predictions - y
69+ for i in range (self .iterations ):
70+ predictions = features_scaled .dot (self .theta )
71+ error = predictions - target
6472
6573 # Compute gradient with L2 regularization
66- gradient = (x_scaled .T .dot (error ) + self .lambda_ * self .theta ) / m
74+ gradient = (features_scaled .T .dot (error ) + self .lambda_ * self .theta ) / m
6775 self .theta -= self .alpha * gradient # Update weights
6876
69- def predict (self , x : np .ndarray ) -> np .ndarray :
77+ def predict (self , features : np .ndarray ) -> np .ndarray :
7078 """
7179 Predict values using the trained model.
7280
73- :param x : Input features, shape (m, n)
81+ :param features : Input features, shape (m, n)
7482 :return: Predicted values, shape (m,)
75- """
76- x_scaled , _ , _ = self .feature_scaling (x ) # Scale features using training data
77- return x_scaled .dot (self .theta )
7883
79- def compute_cost (self , x : np .ndarray , y : np .ndarray ) -> float :
84+ Example:
85+ >>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
86+ >>> features = np.array([[1, 2], [2, 3], [4, 6]])
87+ >>> target = np.array([1, 2, 3])
88+ >>> rr.fit(features, target)
89+ >>> predictions = rr.predict(features)
90+ >>> predictions.shape == target.shape
91+ True
92+ """
93+ features_scaled , _ , _ = self .feature_scaling (features ) # Scale features using training data
94+ return features_scaled .dot (self .theta )
95+
96+ def compute_cost (self , features : np .ndarray , target : np .ndarray ) -> float :
8097 """
8198 Compute the cost function with regularization.
8299
83- :param x : Input features, shape (m, n)
84- :param y : Target values, shape (m,)
100+ :param features : Input features, shape (m, n)
101+ :param target : Target values, shape (m,)
85102 :return: Computed cost
103+
104+ Example:
105+ >>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
106+ >>> features = np.array([[1, 2], [2, 3], [4, 6]])
107+ >>> target = np.array([1, 2, 3])
108+ >>> rr.fit(features, target)
109+ >>> cost = rr.compute_cost(features, target)
110+ >>> isinstance(cost, float)
111+ True
86112 """
87- x_scaled , _ , _ = self .feature_scaling (x ) # Scale features using training data
88- m = len (y )
89- predictions = x_scaled .dot (self .theta )
90- cost = (1 / (2 * m )) * np .sum ((predictions - y ) ** 2 ) + (
113+ features_scaled , _ , _ = self .feature_scaling (features ) # Scale features using training data
114+ m = len (target )
115+ predictions = features_scaled .dot (self .theta )
116+ cost = (1 / (2 * m )) * np .sum ((predictions - target ) ** 2 ) + (
91117 self .lambda_ / (2 * m )
92118 ) * np .sum (self .theta ** 2 )
93119 return cost
@@ -99,6 +125,14 @@ def mean_absolute_error(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
99125 :param y_true: Actual target values, shape (m,)
100126 :param y_pred: Predicted target values, shape (m,)
101127 :return: MAE
128+
129+ Example:
130+ >>> rr = RidgeRegression(alpha=0.01, lambda_=0.1, iterations=10)
131+ >>> y_true = np.array([1, 2, 3])
132+ >>> y_pred = np.array([1.1, 2.1, 2.9])
133+ >>> mae = rr.mean_absolute_error(y_true, y_pred)
134+ >>> isinstance(mae, float)
135+ True
102136 """
103137 return np .mean (np .abs (y_true - y_pred ))
104138
0 commit comments