Skip to content

Commit 77a8fb3

Browse files
Update docs
1 parent 5ed941f commit 77a8fb3

33 files changed

+1633
-42
lines changed

adapt/feature_based/_cdan.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,9 @@ def _get_default_classifier(name=None, state=None):
3434
@make_insert_doc(["encoder"])
3535
class CDAN(BaseAdaptDeep):
3636
"""
37-
CDAN (Conditional Adversarial Domain Adaptation) is an
38-
unsupervised domain adaptation method on the model of the
37+
CDAN: Conditional Adversarial Domain Adaptation
38+
39+
CDAN is an unsupervised domain adaptation method on the model of the
3940
:ref:`DANN <adapt.feature_based.DANN>`. In CDAN the discriminator
4041
is conditioned on the prediction of the task network for
4142
source and target data. This should , in theory, focus the

adapt/feature_based/_mcd.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,10 @@
1414
@make_insert_doc(["encoder", "task"])
1515
class MCD(BaseAdaptDeep):
1616
"""
17-
MCD: Maximum Classifier Discrepancy is a feature-based domain adaptation
18-
method originally introduced for unsupervised classification DA.
17+
MCD: Maximum Classifier Discrepancy
18+
19+
MCD is a feature-based domain adaptation method originally introduced
20+
for unsupervised classification DA.
1921
2022
The goal of MCD is to find a new representation of the input features which
2123
minimizes the discrepancy between the source and target domains

adapt/feature_based/_mdd.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,10 @@
1414
@make_insert_doc(["encoder", "task"])
1515
class MDD(BaseAdaptDeep):
1616
"""
17-
MDD: Margin Disparity Discrepancy is a feature-based domain adaptation
18-
method originally introduced for unsupervised classification DA.
17+
MDD: Margin Disparity Discrepancy
18+
19+
MDD is a feature-based domain adaptation method originally introduced
20+
for unsupervised classification DA.
1921
2022
The goal of MDD is to find a new representation of the input features which
2123
minimizes the disparity discrepancy between the source and target domains

adapt/feature_based/_tca.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class TCA(BaseAdaptEstimator):
5050
5151
References
5252
----------
53-
.. [1] `[1] <https://www.cse.ust.hk/~qyang/Docs/2009/TCA.pdf>` S. J. Pan, \
53+
.. [1] `[1] <https://www.cse.ust.hk/~qyang/Docs/2009/TCA.pdf>`_ S. J. Pan, \
5454
I. W. Tsang, J. T. Kwok and Q. Yang. "Domain Adaptation via Transfer Component \
5555
Analysis". In IEEE transactions on neural networks 2010
5656
"""

adapt/feature_based/_wdgrl.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,9 @@
1313
@make_insert_doc(["encoder", "task", "discriminator"])
1414
class WDGRL(BaseAdaptDeep):
1515
"""
16-
WDGRL (Wasserstein Distance Guided Representation Learning) is an
17-
unsupervised domain adaptation method on the model of the
16+
WDGRL: Wasserstein Distance Guided Representation Learning
17+
18+
WDGRL is an unsupervised domain adaptation method on the model of the
1819
:ref:`DANN <adapt.feature_based.DANN>`. In WDGRL the discriminator
1920
is used to approximate the Wasserstein distance between the
2021
source and target encoded distributions in the spirit of WGAN.

adapt/instance_based/_balancedweighting.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class BalancedWeighting(BaseAdaptEstimator):
1010
"""
1111
BW : Balanced Weighting
1212
13-
Fit the estimator on source and target labeled data
13+
Fit the estimator :math:`h` on source and target labeled data
1414
according to the modified loss:
1515
1616
.. math::
@@ -26,7 +26,7 @@ class BalancedWeighting(BaseAdaptEstimator):
2626
2727
Parameters
2828
----------
29-
gamma : float
29+
gamma : float (default=0.5)
3030
ratio between 0 and 1 correspond to the importance
3131
given to the target labeled data. When `ratio=1`, the
3232
estimator is only fitted on target data. `ratio=0.5`

adapt/instance_based/_iwc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class IWC(BaseAdaptEstimator):
2828
2929
.. math::
3030
31-
w(x) = \frac{1}{P(x \in Source)} - 1
31+
w(x) = \\frac{1}{P(x \in Source)} - 1
3232
3333
Parameters
3434
----------

adapt/instance_based/_iwn.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,12 @@ def weighted_mmd(Xs, Xt, weights, gamma=1.):
4747
return mxx + myy -2*mxy
4848

4949

50-
@make_insert_doc(["estimator", "weighter"], supervised=True)
50+
@make_insert_doc(["estimator", "weighter"])
5151
class IWN(BaseAdaptDeep):
5252
"""
53-
IWN : Importance Weighting Network is an instance-based method for
54-
unsupervised domain adaptation<.
53+
IWN : Importance Weighting Network
54+
55+
IWN is an instance-based method for unsupervised domain adaptation.
5556
5657
The goal of IWN is to reweight the source instances in order to
5758
minimize the Maximum Mean Discreancy (MMD) between the reweighted
@@ -107,7 +108,7 @@ class IWN(BaseAdaptDeep):
107108
108109
References
109110
----------
110-
.. [1] `[1] <https://arxiv.org/pdf/2209.04215.pdff>`_ A. de Mathelin, F. Deheeger, \
111+
.. [1] `[1] <https://arxiv.org/pdf/2209.04215.pdf>`_ A. de Mathelin, F. Deheeger, \
111112
M. Mougeot and N. Vayatis "Fast and Accurate Importance Weighting for \
112113
Correcting Sample Bias" In ECML-PKDD, 2022.
113114
"""

adapt/instance_based/_rulsif.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515

1616
EPS = np.finfo(float).eps
1717

18+
19+
@make_insert_doc()
1820
class RULSIF(BaseAdaptEstimator):
1921
"""
2022
RULSIF: Relative Unconstrained Least-Squares Importance Fitting
@@ -45,15 +47,18 @@ class RULSIF(BaseAdaptEstimator):
4547
4648
.. math::
4749
48-
\max_{\theta } \frac{1}{2} \\theta^T H \\theta - h^T \\theta +
49-
\frac{\\lambda}{2} \\theta^T \\theta
50+
\max_{\\theta } \\frac{1}{2} \\theta^T H \\theta - h^T \\theta +
51+
\\frac{\\lambda}{2} \\theta^T \\theta
5052
5153
where :
5254
5355
.. math::
5456
55-
H_{ll'} = \\frac{\\alpha}{n_s} \sum_{x_i \\in X_S} K(x_i, x_l) K(x_i, x_l') + \\frac{1-\\alpha}{n_t} \\sum_{x_i \\in X_T} K(x_i, x_l) K(x_i, x_l') \\
56-
h_{l}= \\frac{1}{n_T} \sum_{x_i \\in X_T} K(x_i, x_l)
57+
H_{kl} = \\frac{\\alpha}{n_s} \sum_{x_i \\in X_S} K(x_i, x_k) K(x_i, x_l) + \\frac{1-\\alpha}{n_T} \\sum_{x_i \\in X_T} K(x_i, x_k) K(x_i, x_l)
58+
59+
.. math::
60+
61+
h_{k} = \\frac{1}{n_T} \sum_{x_i \\in X_T} K(x_i, x_k)
5762
5863
The above OP is solved by the closed form expression
5964
@@ -68,7 +73,7 @@ class RULSIF(BaseAdaptEstimator):
6873
6974
.. math::
7075
71-
J = -\\frac{\\alpha}{2|X_S|} \\sum_{x \\in X_S} w(x)^2 - \frac{1-\\alpha}{2|X_T|} \\sum_{x \in X_T} w(x)^2
76+
J = -\\frac{\\alpha}{2|X_S|} \\sum_{x \\in X_S} w(x)^2 - \\frac{1-\\alpha}{2|X_T|} \\sum_{x \in X_T} w(x)^2
7277
7378
Finally, an estimator is fitted using the reweighted labeled source instances.
7479

adapt/instance_based/_ulsif.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -44,22 +44,25 @@ class ULSIF(BaseAdaptEstimator):
4444
4545
.. math::
4646
47-
\max_{\theta } \frac{1}{2} \\theta^T H \\theta - h^T \\theta +
48-
\frac{\\lambda}{2} \\theta^T \\theta
47+
\max_{\\theta } \\frac{1}{2} \\theta^T H \\theta - h^T \\theta +
48+
\\frac{\\lambda}{2} \\theta^T \\theta
4949
5050
where :
5151
5252
.. math::
5353
54-
H_{ll'} = \\frac{1}{n_s} \sum_{x_i \\in X_S} K(x_i, x_l) K(x_i, x_l') \\
55-
h_{l}= \\frac{1}{n_T} \sum_{x_i \\in X_T} K(x_i, x_l)
54+
H_{kl} = \\frac{1}{n_s} \sum_{x_i \\in X_S} K(x_i, x_k) K(x_i, x_l)
55+
56+
.. math::
57+
58+
h_{k} = \\frac{1}{n_T} \sum_{x_i \\in X_T} K(x_i, x_k)
5659
5760
5861
The above OP is solved by the closed form expression
5962
6063
.. math::
6164
62-
\hat{\\theta} = (H+\\lambda I_{n_s})^{(-1)} h
65+
\\hat{\\theta} = (H+\\lambda I_{n_s})^{(-1)} h
6366
6467
Furthemore the method admits a leave one out cross validation score that has a closed form expression
6568
and can be used to select the appropriate parameters of the kernel function :math:`K` (typically, the paramter

0 commit comments

Comments
 (0)