22from  sklearn  import  clone 
33from  sklearn .discriminant_analysis  import  LinearDiscriminantAnalysis 
44from  sklearn .linear_model  import  (
5+     LogisticRegression ,
6+     LogisticRegressionCV ,
57    Perceptron ,
68    RidgeClassifier ,
79    RidgeClassifierCV ,
8-     LogisticRegression ,
9-     LogisticRegressionCV ,
1010    SGDClassifier ,
1111)
1212from  sklearn .linear_model ._base  import  LinearClassifierMixin 
1313from  sklearn .svm  import  LinearSVC 
1414from  sklearn .utils  import  check_array 
15- from  sklearn .utils .validation  import  check_is_fitted , validate_data 
1615from  sklearn .utils .multiclass  import  check_classification_targets , type_of_target 
16+ from  sklearn .utils .validation  import  check_is_fitted , validate_data 
1717
1818from  skmatter .decomposition  import  _BasePCov 
1919from  skmatter .utils  import  check_cl_fit 
@@ -218,29 +218,28 @@ def __init__(
218218        self .classifier  =  classifier 
219219
220220    def  fit (self , X , Y , W = None ):
221-         r"""Fit the model with X and Y. Depending on the dimensions of X, calls either  
222-         `_fit_feature_space` or `_fit_sample_space` 
221+         r"""Fit the model with X and Y. Depending on the dimensions of X, 
222+         calls either  `_fit_feature_space` or `_fit_sample_space`.  
223223
224224        Parameters 
225225        ---------- 
226226        X : numpy.ndarray, shape (n_samples, n_features) 
227-             Training data, where n_samples is the number of samples and n_features is  
228-             the number of features. 
227+             Training data, where n_samples is the number of samples and 
228+             n_features is  the number of features. 
229229
230230            It is suggested that :math:`\mathbf{X}` be centered by its column- 
231-             means and scaled. If features are related, the matrix should be scaled  
232-             to have unit variance, otherwise :math:`\mathbf{X}` should be  
233-             scaled so that each feature has a variance of 1 / n_features. 
231+             means and scaled. If features are related, the matrix should be 
232+             scaled  to have unit variance, otherwise :math:`\mathbf{X}` should 
233+             be  scaled so that each feature has a variance of 1 / n_features. 
234234
235235        Y : numpy.ndarray, shape (n_samples,) 
236236            Training data, where n_samples is the number of samples. 
237237
238238        W : numpy.ndarray, shape (n_features, n_properties) 
239-             Classification weights, optional when classifier=`precomputed`. If not  
240-             passed, it is assumed that the weights will be taken from a linear classifier  
241-             fit between X and Y 
239+             Classification weights, optional when classifier=`precomputed`. If 
240+             not  passed, it is assumed that the weights will be taken from a 
241+             linear classifier  fit between X and Y 
242242        """ 
243- 
244243        X , Y  =  validate_data (self , X , Y , y_numeric = False )
245244        check_classification_targets (Y )
246245        self .classes_  =  np .unique (Y )
@@ -280,7 +279,8 @@ def fit(self, X, Y, W=None):
280279            # If precomputed, use default classifier to predict Y from T 
281280            classifier  =  LogisticRegression ()
282281            if  W  is  None :
283-                 W  =  LogisticRegression ().fit (X , Y ).coef_ .T .reshape (X .shape [1 ], - 1 )
282+                 W  =  LogisticRegression ().fit (X , Y ).coef_ .T 
283+                 W  =  W .reshape (X .shape [1 ], - 1 )
284284
285285        Z  =  X  @ W 
286286
@@ -289,8 +289,8 @@ def fit(self, X, Y, W=None):
289289        else :
290290            self ._fit_sample_space (X , Y , Z , W )
291291
292-         # instead of using linear regression solution, refit with the classifier  
293-         # and steal weights to get pxz and ptz 
292+         # instead of using linear regression solution, refit with the 
293+         # classifier  and steal weights to get pxz and ptz 
294294
295295        self .classifier_  =  clone (classifier ).fit (X  @ self .pxt_ , Y )
296296
@@ -404,8 +404,8 @@ def predict(self, X=None, T=None):
404404    def  transform (self , X = None ):
405405        """Apply dimensionality reduction to X. 
406406
407-         ``X`` is projected on the first principal components as determined by the  
408-         modified PCovC distances. 
407+         ``X`` is projected on the first principal components as determined by 
408+         the  modified PCovC distances. 
409409
410410        Parameters 
411411        ---------- 
0 commit comments