Skip to content

Commit 0c841dd

Browse files
committed
Fixing linting
1 parent 7f24a7d commit 0c841dd

File tree

6 files changed

+36
-53
lines changed

6 files changed

+36
-53
lines changed

src/skmatter/decomposition/_pcov.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,12 @@
33

44
import numpy as np
55
from numpy.linalg import LinAlgError
6-
7-
from scipy.linalg import sqrtm as MatrixSqrt
86
from scipy import linalg
7+
from scipy.linalg import sqrtm as MatrixSqrt
98
from scipy.sparse.linalg import svds
10-
119
from sklearn.decomposition._base import _BasePCA
12-
from sklearn.linear_model._base import LinearModel
1310
from sklearn.decomposition._pca import _infer_dimension
11+
from sklearn.linear_model._base import LinearModel
1412
from sklearn.utils import check_random_state
1513
from sklearn.utils._arpack import _init_arpack_v0
1614
from sklearn.utils.extmath import randomized_svd, stable_cumsum, svd_flip

src/skmatter/decomposition/_pcovc.py

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,18 @@
22
from sklearn import clone
33
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
44
from sklearn.linear_model import (
5+
LogisticRegression,
6+
LogisticRegressionCV,
57
Perceptron,
68
RidgeClassifier,
79
RidgeClassifierCV,
8-
LogisticRegression,
9-
LogisticRegressionCV,
1010
SGDClassifier,
1111
)
1212
from sklearn.linear_model._base import LinearClassifierMixin
1313
from sklearn.svm import LinearSVC
1414
from sklearn.utils import check_array
15-
from sklearn.utils.validation import check_is_fitted, validate_data
1615
from sklearn.utils.multiclass import check_classification_targets, type_of_target
16+
from sklearn.utils.validation import check_is_fitted, validate_data
1717

1818
from skmatter.decomposition import _BasePCov
1919
from skmatter.utils import check_cl_fit
@@ -218,29 +218,28 @@ def __init__(
218218
self.classifier = classifier
219219

220220
def fit(self, X, Y, W=None):
221-
r"""Fit the model with X and Y. Depending on the dimensions of X, calls either
222-
`_fit_feature_space` or `_fit_sample_space`
221+
r"""Fit the model with X and Y. Depending on the dimensions of X,
222+
calls either `_fit_feature_space` or `_fit_sample_space`.
223223
224224
Parameters
225225
----------
226226
X : numpy.ndarray, shape (n_samples, n_features)
227-
Training data, where n_samples is the number of samples and n_features is
228-
the number of features.
227+
Training data, where n_samples is the number of samples and
228+
n_features is the number of features.
229229
230230
It is suggested that :math:`\mathbf{X}` be centered by its column-
231-
means and scaled. If features are related, the matrix should be scaled
232-
to have unit variance, otherwise :math:`\mathbf{X}` should be
233-
scaled so that each feature has a variance of 1 / n_features.
231+
means and scaled. If features are related, the matrix should be
232+
scaled to have unit variance, otherwise :math:`\mathbf{X}` should
233+
be scaled so that each feature has a variance of 1 / n_features.
234234
235235
Y : numpy.ndarray, shape (n_samples,)
236236
Training data, where n_samples is the number of samples.
237237
238238
W : numpy.ndarray, shape (n_features, n_properties)
239-
Classification weights, optional when classifier=`precomputed`. If not
240-
passed, it is assumed that the weights will be taken from a linear classifier
241-
fit between X and Y
239+
Classification weights, optional when classifier=`precomputed`. If
240+
not passed, it is assumed that the weights will be taken from a
241+
linear classifier fit between X and Y
242242
"""
243-
244243
X, Y = validate_data(self, X, Y, y_numeric=False)
245244
check_classification_targets(Y)
246245
self.classes_ = np.unique(Y)
@@ -280,7 +279,8 @@ def fit(self, X, Y, W=None):
280279
# If precomputed, use default classifier to predict Y from T
281280
classifier = LogisticRegression()
282281
if W is None:
283-
W = LogisticRegression().fit(X, Y).coef_.T.reshape(X.shape[1], -1)
282+
W = LogisticRegression().fit(X, Y).coef_.T
283+
W = W.reshape(X.shape[1], -1)
284284

285285
Z = X @ W
286286

@@ -289,8 +289,8 @@ def fit(self, X, Y, W=None):
289289
else:
290290
self._fit_sample_space(X, Y, Z, W)
291291

292-
# instead of using linear regression solution, refit with the classifier
293-
# and steal weights to get pxz and ptz
292+
# instead of using linear regression solution, refit with the
293+
# classifier and steal weights to get pxz and ptz
294294

295295
self.classifier_ = clone(classifier).fit(X @ self.pxt_, Y)
296296

@@ -404,8 +404,8 @@ def predict(self, X=None, T=None):
404404
def transform(self, X=None):
405405
"""Apply dimensionality reduction to X.
406406
407-
``X`` is projected on the first principal components as determined by the
408-
modified PCovC distances.
407+
``X`` is projected on the first principal components as determined by
408+
the modified PCovC distances.
409409
410410
Parameters
411411
----------

src/skmatter/decomposition/_pcovr.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
import numpy as np
2-
3-
from sklearn.utils import check_array
2+
from sklearn.base import MultiOutputMixin, RegressorMixin
43
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV
4+
from sklearn.utils import check_array
55
from sklearn.utils.validation import check_is_fitted, validate_data
6-
from sklearn.base import MultiOutputMixin, RegressorMixin
76

87
from skmatter.decomposition import _BasePCov
98
from skmatter.utils import check_lr_fit

src/skmatter/utils/_pcovc_utils.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
from copy import deepcopy
2+
3+
import numpy as np
24
from sklearn import clone
3-
from sklearn.utils.validation import check_is_fitted, validate_data
45
from sklearn.exceptions import NotFittedError
5-
import numpy as np
6+
from sklearn.utils.validation import check_is_fitted, validate_data
67

78

89
def check_cl_fit(classifier, X, y):
@@ -22,8 +23,8 @@ def check_cl_fit(classifier, X, y):
2223
Returns
2324
-------
2425
fitted_classifier : object
25-
The fitted classifier. If input classifier was already fitted and compatible with
26-
the data, returns a deep copy. Otherwise returns a newly fitted classifier.
26+
The fitted classifier. If input classifier was already fitted and compatible
27+
with the data, returns a deep copy. Otherwise returns a newly fitted classifier.
2728
2829
Raises
2930
------
@@ -65,4 +66,4 @@ def check_cl_fit(classifier, X, y):
6566
fitted_classifier = clone(classifier)
6667
fitted_classifier.fit(X, y)
6768

68-
return fitted_classifier
69+
return fitted_classifier

tests/test_check_estimators.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from sklearn.utils.estimator_checks import parametrize_with_checks
22

3-
from skmatter.decomposition import KernelPCovR, PCovR, PCovC
3+
from skmatter.decomposition import KernelPCovR, PCovC, PCovR
44
from skmatter.feature_selection import CUR as fCUR
55
from skmatter.feature_selection import FPS as fFPS
66
from skmatter.feature_selection import PCovCUR as fPCovCUR

tests/test_pcovc.py

Lines changed: 6 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from sklearn.datasets import load_breast_cancer as get_dataset
77
from sklearn.decomposition import PCA
88
from sklearn.linear_model import LogisticRegression
9-
109
from sklearn.naive_bayes import GaussianNB
1110
from sklearn.preprocessing import StandardScaler
1211
from sklearn.utils.validation import check_X_y
@@ -231,18 +230,6 @@ def test_spaces_equivalent(self):
231230
n_components=2, mixing=alpha, tol=1e-12, space="feature"
232231
)
233232
pcovc_fs.fit(self.X, self.Y)
234-
235-
# if(alpha > 0.5):
236-
# print(np.isclose(
237-
# pcovc_ss.transform(self.X),
238-
# pcovc_fs.transform(self.X),
239-
# self.error_tol
240-
# ))
241-
242-
# failing for all alpha values
243-
# so these are similar (within approximately 0.001), but not exactly the same.
244-
# I think this is because transform and inverse_transform depend on Pxt and Ptx,
245-
# which in turn depend on Z, which is a matrix of class likelihoods (so maybe there is some rounding problems)
246233
self.assertTrue(
247234
np.allclose(
248235
pcovc_ss.inverse_transform(pcovc_ss.transform(self.X)),
@@ -301,8 +288,6 @@ def test_bad_n_components(self):
301288
pcovc = self.model(
302289
n_components="mle", classifier=LogisticRegression(), svd_solver="full"
303290
)
304-
# changed X[:2], Y[:2] to X[:20], Y[:20] since first two rows of classes only had class 1 as target,
305-
# thus error was thrown
306291
pcovc.fit(self.X[:20], self.Y[:20])
307292
self.assertEqual(
308293
str(cm.exception),
@@ -401,13 +386,13 @@ def test_centering(self):
401386
pcovc.fit(X, self.Y)
402387
self.assertEqual(
403388
str(w[0].message),
404-
"This class does not automatically center data, and your data mean is "
405-
"greater than the supplied tolerance.",
389+
"This class does not automatically center data, and your data "
390+
"mean is greater than the supplied tolerance.",
406391
)
407392

408393
def test_T_shape(self):
409-
"""Check that PCovC returns a latent space projection consistent with the shape
410-
of the input matrix.
394+
"""Check that PCovC returns a latent space projection consistent with
395+
the shape of the input matrix.
411396
"""
412397
n_components = 5
413398
pcovc = self.model(n_components=n_components, tol=1e-12)
@@ -417,8 +402,8 @@ def test_T_shape(self):
417402
self.assertTrue(T.shape[-1] == n_components)
418403

419404
def test_Z_shape(self):
420-
"""Check that PCovC returns an evidence matrix consistent with the number of samples
421-
and the number of classes.
405+
"""Check that PCovC returns an evidence matrix consistent with the
406+
number of samples and the number of classes.
422407
"""
423408
n_components = 5
424409
pcovc = self.model(n_components=n_components, tol=1e-12)

0 commit comments

Comments
 (0)