@@ -104,12 +104,72 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float:
104
104
105
105
106
106
def log_likelihood (x , y , weights ):
107
+ """
108
+ Compute the logistic-regression log-likelihood:
109
+
110
+ Parameters:
111
+ x (ndarray): shape (n_samples, n_features) feature matrix
112
+ y (ndarray): shape (n_samples,) binary labels (0 or 1)
113
+ weights (ndarray): shape (n_features,) parameter vector
114
+
115
+ Examples:
116
+ >>> x = np.array([[0, 0, 0]])
117
+ >>> y = np.array([0, 0, 0])
118
+ >>> w = np.array([0, 0, 0])
119
+ >>> log_likelihood(x, y, w)
120
+ -2.0794415416798357
121
+
122
+ >>> x = np.array([[1]])
123
+ >>> y = np.array([1])
124
+ >>> w = np.array([1])
125
+ >>> log_likelihood(x, y, w)
126
+ -0.3132616875182228
127
+
128
+ >>> x = np.zeros((2, 2))
129
+ >>> y = np.zeros(2)
130
+ >>> w = np.array([1, 2])
131
+ >>> log_likelihood(x, y, w)
132
+ -1.3862943611198906
133
+ """
107
134
scores = np .dot (x , weights )
108
135
return np .sum (y * scores - np .log (1 + np .exp (scores )))
109
136
110
137
111
- # here alpha is the learning rate, X is the feature matrix,y is the target matrix
112
138
def logistic_reg (alpha , x , y , max_iterations = 70000 ):
139
+ """
140
+ Trains a logistic regression model using gradient descent.
141
+
142
+ Parameters
143
+ ----------
144
+ alpha : float
145
+ Learning rate (step size) for gradient descent updates.
146
+ x : ndarray of shape (n_samples, n_features)
147
+ Feature matrix where each row corresponds to one training example.
148
+ y : ndarray of shape (n_samples,)
149
+ Binary target labels (0 or 1) for each training example.
150
+ max_iterations : int, optional
151
+ Maximum number of gradient descent iterations to perform (default: 70000).
152
+
153
+ Returns
154
+ -------
155
+ theta : ndarray of shape (n_features,)
156
+ Learned weight vector after optimization.
157
+
158
+ Examples:
159
+ >>> alpha = 0.001
160
+ >>> x = np.ones((2, 2))
161
+ >>> y = np.ones(2)
162
+ >>> logistic_reg(alpha, x, y, max_iterations=0)
163
+ array([0., 0.])
164
+
165
+ >>> alpha = 0
166
+ >>> x = np.ones((2, 2))
167
+ >>> y = np.ones(2)
168
+ >>> theta = logistic_reg(alpha, x, y, max_iterations=1)
169
+ loss: 0.6931471805599453
170
+ >>> theta
171
+ array([0., 0.])
172
+ """
113
173
theta = np .zeros (x .shape [1 ])
114
174
115
175
for iterations in range (max_iterations ):
@@ -121,7 +181,7 @@ def logistic_reg(alpha, x, y, max_iterations=70000):
121
181
h = sigmoid_function (z )
122
182
j = cost_function (h , y )
123
183
if iterations % 100 == 0 :
124
- print (f"loss: { j } \t " ) # printing the loss after every 100 iterations
184
+ print (f"loss: { j } " ) # printing the loss after every 100 iterations
125
185
return theta
126
186
127
187
0 commit comments