Skip to content

Commit 4563f22

Browse files
author
hoodaty
committed
Altered Quadratic class
1 parent 3762627 commit 4563f22

File tree

1 file changed

+66
-29
lines changed

1 file changed

+66
-29
lines changed

skglm/datafits/single_task.py

Lines changed: 66 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,10 @@ class Quadratic(BaseDatafit):
2727
"""
2828

2929
def __init__(self):
30-
pass
30+
self.sample_weights = None
31+
32+
def set_sample_weights(self, sample_weights):
33+
self.sample_weights = sample_weights
3134

3235
def get_spec(self):
3336
spec = (
@@ -40,24 +43,34 @@ def params_to_dict(self):
4043

4144
def get_lipschitz(self, X, y):
4245
n_features = X.shape[1]
43-
44-
lipschitz = np.zeros(n_features, dtype=X.dtype)
45-
for j in range(n_features):
46-
lipschitz[j] = (X[:, j] ** 2).sum() / len(y)
47-
46+
if self.sample_weights is None:
47+
lipschitz = np.zeros(n_features, dtype=X.dtype)
48+
for j in range(n_features):
49+
lipschitz[j] = (X[:, j] ** 2).sum() / len(y)
50+
else:
51+
lipschitz = np.zeros(n_features, dtype=X.dtype)
52+
for j in range(n_features):
53+
lipschitz[j] = np.sum(self.sample_weights * (X[:, j] ** 2))/len(y)
4854
return lipschitz
4955

5056
def get_lipschitz_sparse(self, X_data, X_indptr, X_indices, y):
5157
n_features = len(X_indptr) - 1
52-
lipschitz = np.zeros(n_features, dtype=X_data.dtype)
53-
54-
for j in range(n_features):
55-
nrm2 = 0.
56-
for idx in range(X_indptr[j], X_indptr[j + 1]):
57-
nrm2 += X_data[idx] ** 2
58-
59-
lipschitz[j] = nrm2 / len(y)
60-
58+
if self.sample_weights is None:
59+
lipschitz = np.zeros(n_features, dtype=X_data.dtype)
60+
for j in range(n_features):
61+
nrm2 = 0.
62+
for idx in range(X_indptr[j], X_indptr[j + 1]):
63+
nrm2 += X_data[idx] ** 2
64+
65+
lipschitz[j] = nrm2 / len(y)
66+
else:
67+
lipschitz = np.zeros(n_features, dtype=X_data.dtype)
68+
for j in range(n_features):
69+
nrm2 = 0.
70+
for idx in range(X_indptr[j], X_indptr[j + 1]):
71+
nrm2 += self.sample_weights[idx]*X_data[idx] ** 2
72+
73+
lipschitz[j] = nrm2 / len(y)
6174
return lipschitz
6275

6376
def initialize(self, X, y):
@@ -75,33 +88,57 @@ def initialize_sparse(self, X_data, X_indptr, X_indices, y):
7588
self.Xty[j] = xty
7689

7790
def get_global_lipschitz(self, X, y):
78-
return norm(X, ord=2) ** 2 / len(y)
91+
if self.sample_weights is None:
92+
return norm(X, ord=2) ** 2 / len(y)
93+
else:
94+
return (norm(X@self.sample_weights, axis=1, ord=2) ** 2)/len(y)
7995

8096
def get_global_lipschitz_sparse(self, X_data, X_indptr, X_indices, y):
81-
return spectral_norm(X_data, X_indptr, X_indices, len(y)) ** 2 / len(y)
97+
if self.sample_weights is None:
98+
return spectral_norm(X_data, X_indptr, X_indices, len(y)) ** 2 / len(y)
99+
else:
100+
return spectral_norm(X_data@self.sample_weights, X_indptr, X_indices, len(y)) ** 2 /len(y)
82101

83102
def value(self, y, w, Xw):
84-
return np.sum((y - Xw) ** 2) / (2 * len(Xw))
103+
if self.sample_weights is None:
104+
return np.sum((y - Xw) ** 2) / (2 * len(Xw))
105+
else:
106+
return np.sum(self.sample_weights * (y - Xw) ** 2) / (2 * len(Xw))
85107

86108
def gradient_scalar(self, X, y, w, Xw, j):
87-
return (X[:, j] @ Xw - self.Xty[j]) / len(Xw)
109+
if self.sample_weights is None:
110+
return (X[:, j] @ Xw - self.Xty[j]) / len(Xw)
111+
else:
112+
return self.sample_weights * (X[:, j] @ Xw - self.Xty[j])
88113

89114
def gradient_scalar_sparse(self, X_data, X_indptr, X_indices, y, Xw, j):
90115
XjTXw = 0.
91-
for i in range(X_indptr[j], X_indptr[j+1]):
92-
XjTXw += X_data[i] * Xw[X_indices[i]]
93-
return (XjTXw - self.Xty[j]) / len(Xw)
116+
if self.sample_weights is None:
117+
for i in range(X_indptr[j], X_indptr[j+1]):
118+
XjTXw += X_data[i] * Xw[X_indices[i]]
119+
return (XjTXw - self.Xty[j]) / len(Xw)
120+
else:
121+
XjTXw_weighted = 0.
122+
for i in range(X_indptr[j], X_indptr[j+1]):
123+
XjTXw_weighted += X_data[i] * Xw[X_indices[i]] * self.sample_weights[X_indices[i]]
124+
return (XjTXw_weighted - self.Xty[j]) / len(Xw)
94125

95-
def full_grad_sparse(
96-
self, X_data, X_indptr, X_indices, y, Xw):
126+
def full_grad_sparse(self, X_data, X_indptr, X_indices, y, Xw):
97127
n_features = X_indptr.shape[0] - 1
98128
n_samples = y.shape[0]
99129
grad = np.zeros(n_features, dtype=Xw.dtype)
100-
for j in range(n_features):
101-
XjTXw = 0.
102-
for i in range(X_indptr[j], X_indptr[j + 1]):
103-
XjTXw += X_data[i] * Xw[X_indices[i]]
104-
grad[j] = (XjTXw - self.Xty[j]) / n_samples
130+
if self.sample_weights is None:
131+
for j in range(n_features):
132+
XjTXw = 0.
133+
for i in range(X_indptr[j], X_indptr[j + 1]):
134+
XjTXw += X_data[i] * Xw[X_indices[i]]
135+
grad[j] = (XjTXw - self.Xty[j]) / n_samples
136+
else:
137+
for j in range(n_features):
138+
XjTXw_weighted = 0.
139+
for i in range(X_indptr[j], X_indptr[j + 1]):
140+
XjTXw_weighted += X_data[i] * Xw[X_indices[i]] * self.sample_weights[X_indices[i]]
141+
grad[j] = (XjTXw_weighted - self.Xty[j]) / n_samples
105142
return grad
106143

107144
def intercept_update_step(self, y, Xw):

0 commit comments

Comments
 (0)