Skip to content

Commit 85e26c1

Browse files
author
hoodaty
committed
Revert "Altered Quadratic class"
This reverts commit 4563f22.
1 parent 0f21cdf commit 85e26c1

File tree

1 file changed

+29
-66
lines changed

1 file changed

+29
-66
lines changed

skglm/datafits/single_task.py

Lines changed: 29 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,7 @@ class Quadratic(BaseDatafit):
2727
"""
2828

2929
def __init__(self):
30-
self.sample_weights = None
31-
32-
def set_sample_weights(self, sample_weights):
33-
self.sample_weights = sample_weights
30+
pass
3431

3532
def get_spec(self):
3633
spec = (
@@ -43,34 +40,24 @@ def params_to_dict(self):
4340

4441
def get_lipschitz(self, X, y):
4542
n_features = X.shape[1]
46-
if self.sample_weights is None:
47-
lipschitz = np.zeros(n_features, dtype=X.dtype)
48-
for j in range(n_features):
49-
lipschitz[j] = (X[:, j] ** 2).sum() / len(y)
50-
else:
51-
lipschitz = np.zeros(n_features, dtype=X.dtype)
52-
for j in range(n_features):
53-
lipschitz[j] = np.sum(self.sample_weights * (X[:, j] ** 2))/len(y)
43+
44+
lipschitz = np.zeros(n_features, dtype=X.dtype)
45+
for j in range(n_features):
46+
lipschitz[j] = (X[:, j] ** 2).sum() / len(y)
47+
5448
return lipschitz
5549

5650
def get_lipschitz_sparse(self, X_data, X_indptr, X_indices, y):
5751
n_features = len(X_indptr) - 1
58-
if self.sample_weights is None:
59-
lipschitz = np.zeros(n_features, dtype=X_data.dtype)
60-
for j in range(n_features):
61-
nrm2 = 0.
62-
for idx in range(X_indptr[j], X_indptr[j + 1]):
63-
nrm2 += X_data[idx] ** 2
64-
65-
lipschitz[j] = nrm2 / len(y)
66-
else:
67-
lipschitz = np.zeros(n_features, dtype=X_data.dtype)
68-
for j in range(n_features):
69-
nrm2 = 0.
70-
for idx in range(X_indptr[j], X_indptr[j + 1]):
71-
nrm2 += self.sample_weights[idx]*X_data[idx] ** 2
72-
73-
lipschitz[j] = nrm2 / len(y)
52+
lipschitz = np.zeros(n_features, dtype=X_data.dtype)
53+
54+
for j in range(n_features):
55+
nrm2 = 0.
56+
for idx in range(X_indptr[j], X_indptr[j + 1]):
57+
nrm2 += X_data[idx] ** 2
58+
59+
lipschitz[j] = nrm2 / len(y)
60+
7461
return lipschitz
7562

7663
def initialize(self, X, y):
@@ -88,57 +75,33 @@ def initialize_sparse(self, X_data, X_indptr, X_indices, y):
8875
self.Xty[j] = xty
8976

9077
def get_global_lipschitz(self, X, y):
91-
if self.sample_weights is None:
92-
return norm(X, ord=2) ** 2 / len(y)
93-
else:
94-
return (norm(X@self.sample_weights, axis=1, ord=2) ** 2)/len(y)
78+
return norm(X, ord=2) ** 2 / len(y)
9579

9680
def get_global_lipschitz_sparse(self, X_data, X_indptr, X_indices, y):
97-
if self.sample_weights is None:
98-
return spectral_norm(X_data, X_indptr, X_indices, len(y)) ** 2 / len(y)
99-
else:
100-
return spectral_norm(X_data@self.sample_weights, X_indptr, X_indices, len(y)) ** 2 /len(y)
81+
return spectral_norm(X_data, X_indptr, X_indices, len(y)) ** 2 / len(y)
10182

10283
def value(self, y, w, Xw):
103-
if self.sample_weights is None:
104-
return np.sum((y - Xw) ** 2) / (2 * len(Xw))
105-
else:
106-
return np.sum(self.sample_weights * (y - Xw) ** 2) / (2 * len(Xw))
84+
return np.sum((y - Xw) ** 2) / (2 * len(Xw))
10785

10886
def gradient_scalar(self, X, y, w, Xw, j):
109-
if self.sample_weights is None:
110-
return (X[:, j] @ Xw - self.Xty[j]) / len(Xw)
111-
else:
112-
return self.sample_weights * (X[:, j] @ Xw - self.Xty[j])
87+
return (X[:, j] @ Xw - self.Xty[j]) / len(Xw)
11388

11489
def gradient_scalar_sparse(self, X_data, X_indptr, X_indices, y, Xw, j):
11590
XjTXw = 0.
116-
if self.sample_weights is None:
117-
for i in range(X_indptr[j], X_indptr[j+1]):
118-
XjTXw += X_data[i] * Xw[X_indices[i]]
119-
return (XjTXw - self.Xty[j]) / len(Xw)
120-
else:
121-
XjTXw_weighted = 0.
122-
for i in range(X_indptr[j], X_indptr[j+1]):
123-
XjTXw_weighted += X_data[i] * Xw[X_indices[i]] * self.sample_weights[X_indices[i]]
124-
return (XjTXw_weighted - self.Xty[j]) / len(Xw)
91+
for i in range(X_indptr[j], X_indptr[j+1]):
92+
XjTXw += X_data[i] * Xw[X_indices[i]]
93+
return (XjTXw - self.Xty[j]) / len(Xw)
12594

126-
def full_grad_sparse(self, X_data, X_indptr, X_indices, y, Xw):
95+
def full_grad_sparse(
96+
self, X_data, X_indptr, X_indices, y, Xw):
12797
n_features = X_indptr.shape[0] - 1
12898
n_samples = y.shape[0]
12999
grad = np.zeros(n_features, dtype=Xw.dtype)
130-
if self.sample_weights is None:
131-
for j in range(n_features):
132-
XjTXw = 0.
133-
for i in range(X_indptr[j], X_indptr[j + 1]):
134-
XjTXw += X_data[i] * Xw[X_indices[i]]
135-
grad[j] = (XjTXw - self.Xty[j]) / n_samples
136-
else:
137-
for j in range(n_features):
138-
XjTXw_weighted = 0.
139-
for i in range(X_indptr[j], X_indptr[j + 1]):
140-
XjTXw_weighted += X_data[i] * Xw[X_indices[i]] * self.sample_weights[X_indices[i]]
141-
grad[j] = (XjTXw_weighted - self.Xty[j]) / n_samples
100+
for j in range(n_features):
101+
XjTXw = 0.
102+
for i in range(X_indptr[j], X_indptr[j + 1]):
103+
XjTXw += X_data[i] * Xw[X_indices[i]]
104+
grad[j] = (XjTXw - self.Xty[j]) / n_samples
142105
return grad
143106

144107
def intercept_update_step(self, y, Xw):

0 commit comments

Comments
 (0)