@@ -39,7 +39,7 @@ class KernelPCovR(_BasePCA, LinearModel):
39
39
Parameters
40
40
----------
41
41
mixing : float, default=0.5
42
- mixing parameter, as described in PCovR as :math:`{\\ alpha}`
42
+ mixing parameter, as described in PCovR as :math:`{\alpha}`
43
43
n_components : int, float or str, default=None
44
44
Number of components to keep.
45
45
if n_components is not set all components are kept::
@@ -64,7 +64,7 @@ class KernelPCovR(_BasePCA, LinearModel):
64
64
run randomized SVD by the method of Halko et al.
65
65
regressor : {instance of `sklearn.kernel_ridge.KernelRidge`, `precomputed`, None}, default=None
66
66
The regressor to use for computing
67
- the property predictions :math:`\\ hat{\ \mathbf{Y}}`.
67
+ the property predictions :math:`\hat{\mathbf{Y}}`.
68
68
A pre-fitted regressor may be provided.
69
69
If the regressor is not `None`, its kernel parameters
70
70
(`kernel`, `gamma`, `degree`, `coef0`, and `kernel_params`)
@@ -112,17 +112,17 @@ class KernelPCovR(_BasePCA, LinearModel):
112
112
pseudo-inverse of the latent-space projection, which
113
113
can be used to contruct projectors from latent-space
114
114
pkt_: numpy.ndarray of size :math:`({n_{samples}, n_{components}})`
115
- the projector, or weights, from the input kernel :math:`\\ mathbf{K}`
116
- to the latent-space projection :math:`\\ mathbf{T}`
115
+ the projector, or weights, from the input kernel :math:`\mathbf{K}`
116
+ to the latent-space projection :math:`\mathbf{T}`
117
117
pky_: numpy.ndarray of size :math:`({n_{samples}, n_{properties}})`
118
- the projector, or weights, from the input kernel :math:`\\ mathbf{K}`
119
- to the properties :math:`\\ mathbf{Y}`
118
+ the projector, or weights, from the input kernel :math:`\mathbf{K}`
119
+ to the properties :math:`\mathbf{Y}`
120
120
pty_: numpy.ndarray of size :math:`({n_{components}, n_{properties}})`
121
121
the projector, or weights, from the latent-space projection
122
- :math:`\\ mathbf{T}` to the properties :math:`\ \mathbf{Y}`
122
+ :math:`\mathbf{T}` to the properties :math:`\mathbf{Y}`
123
123
ptx_: numpy.ndarray of size :math:`({n_{components}, n_{features}})`
124
124
the projector, or weights, from the latent-space projection
125
- :math:`\\ mathbf{T}` to the feature matrix :math:`\ \mathbf{X}`
125
+ :math:`\mathbf{T}` to the feature matrix :math:`\mathbf{X}`
126
126
X_fit_: numpy.ndarray of shape (n_samples, n_features)
127
127
The data used to fit the model. This attribute is used to build kernels
128
128
from new data.
@@ -160,7 +160,7 @@ class KernelPCovR(_BasePCA, LinearModel):
160
160
[ 1.11923584, -1.04798016],
161
161
[-1.5635827 , 1.11078662]])
162
162
>>> round(kpcovr.score(X, Y), 5)
163
- -0.52039
163
+ np.float64( -0.52039)
164
164
""" # NoQa: E501
165
165
166
166
def __init__ (
@@ -246,15 +246,15 @@ def fit(self, X, Y, W=None):
246
246
247
247
It is suggested that :math:`\mathbf{X}` be centered by its column-
248
248
means and scaled. If features are related, the matrix should be scaled
249
- to have unit variance, otherwise :math:`\\ mathbf{X}` should be
249
+ to have unit variance, otherwise :math:`\mathbf{X}` should be
250
250
scaled so that each feature has a variance of 1 / n_features.
251
251
Y : numpy.ndarray, shape (n_samples, n_properties)
252
252
Training data, where n_samples is the number of samples and
253
253
n_properties is the number of properties
254
254
255
- It is suggested that :math:`\\ mathbf{X}` be centered by its column-
255
+ It is suggested that :math:`\mathbf{X}` be centered by its column-
256
256
means and scaled. If features are related, the matrix should be scaled
257
- to have unit variance, otherwise :math:`\\ mathbf{Y}` should be
257
+ to have unit variance, otherwise :math:`\mathbf{Y}` should be
258
258
scaled so that each feature has a variance of 1 / n_features.
259
259
W : numpy.ndarray, shape (n_samples, n_properties)
260
260
Regression weights, optional when regressor=`precomputed`. If not
@@ -420,7 +420,7 @@ def inverse_transform(self, T):
420
420
r"""Transform input data back to its original space.
421
421
422
422
.. math::
423
- \mathbf{\\ hat{X}} = \mathbf{T} \mathbf{P}_{TX}
423
+ \mathbf{\hat{X}} = \mathbf{T} \mathbf{P}_{TX}
424
424
= \mathbf{K} \mathbf{P}_{KT} \mathbf{P}_{TX}
425
425
426
426
Similar to KPCA, the original features are not always recoverable,
0 commit comments