You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: docs/dalib/benchmarks/re_identification.rst
+1-1Lines changed: 1 addition & 1 deletion
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -16,7 +16,7 @@ We adopt cross dataset setting (another one is cross camera setting). The model
16
16
17
17
For a fair comparison, our model is trained with standard cross entropy loss and triplet loss. We adopt modified resnet architecture from `Mutual Mean-Teaching: Pseudo Label Refinery for Unsupervised Domain Adaptation on Person Re-identification (ICLR 2020) <https://arxiv.org/pdf/2001.01526.pdf>`_.
18
18
19
-
As we are given unlabelled samples from target domain, we can utilize clustering algorithms to produce pseudo labels on target domain and then use them as supervision signals to perform self-training. This simple method turns out to be a strong baseline. We use ``Baseline_Cluster`` to represent this baseline in our results.
19
+
As we are given unlabeled samples from target domain, we can utilize clustering algorithms to produce pseudo labels on target domain and then use them as supervision signals to perform self-training. This simple method turns out to be a strong baseline. We use ``Baseline_Cluster`` to represent this baseline in our results.
-[Learning Without Forgetting (LWF, ECCV 2016)](https://arxiv.org/abs/1606.09282)
41
42
-[Bi-tuning of Pre-trained Representations (Bi-Tuning)](https://arxiv.org/abs/2011.06182?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+arxiv%2FQSXk+%28ExcitingAds%21+cs+updates+on+arXiv.org%29)
42
43
43
44
## Experiment and Results
@@ -77,52 +78,52 @@ If you use these methods in your research, please consider citing.
77
78
78
79
```
79
80
@inproceedings{LWF,
80
-
author = {Zhizhong Li and
81
+
author = {Zhizhong Li and
81
82
Derek Hoiem},
82
-
title = {Learning without Forgetting},
83
-
booktitle={ECCV},
84
-
year = {2016},
83
+
title = {Learning without Forgetting},
84
+
booktitle={ECCV},
85
+
year = {2016},
85
86
}
86
87
87
88
@inproceedings{L2SP,
88
-
title={Explicit inductive bias for transfer learning with convolutional networks},
89
-
author={Xuhong, LI and Grandvalet, Yves and Davoine, Franck},
90
-
booktitle={ICML},
91
-
year={2018},
89
+
title={Explicit inductive bias for transfer learning with convolutional networks},
90
+
author={Xuhong, LI and Grandvalet, Yves and Davoine, Franck},
91
+
booktitle={ICML},
92
+
year={2018},
92
93
}
93
94
94
95
@inproceedings{BSS,
95
-
title={Catastrophic forgetting meets negative transfer: Batch spectral shrinkage for safe transfer learning},
96
-
author={Chen, Xinyang and Wang, Sinan and Fu, Bo and Long, Mingsheng and Wang, Jianmin},
97
-
booktitle={NeurIPS},
98
-
year={2019}
96
+
title={Catastrophic forgetting meets negative transfer: Batch spectral shrinkage for safe transfer learning},
97
+
author={Chen, Xinyang and Wang, Sinan and Fu, Bo and Long, Mingsheng and Wang, Jianmin},
98
+
booktitle={NeurIPS},
99
+
year={2019}
99
100
}
100
101
101
102
@inproceedings{DELTA,
102
-
title={Delta: Deep learning transfer using feature map with attention for convolutional networks},
103
-
author={Li, Xingjian and Xiong, Haoyi and Wang, Hanchao and Rao, Yuxuan and Liu, Liping and Chen, Zeyu and Huan, Jun},
104
-
booktitle={ICLR},
105
-
year={2019}
103
+
title={Delta: Deep learning transfer using feature map with attention for convolutional networks},
104
+
author={Li, Xingjian and Xiong, Haoyi and Wang, Hanchao and Rao, Yuxuan and Liu, Liping and Chen, Zeyu and Huan, Jun},
105
+
booktitle={ICLR},
106
+
year={2019}
106
107
}
107
108
108
109
@inproceedings{StocNorm,
109
-
title={Stochastic Normalization},
110
-
author={Kou, Zhi and You, Kaichao and Long, Mingsheng and Wang, Jianmin},
111
-
booktitle={NeurIPS},
112
-
year={2020}
110
+
title={Stochastic Normalization},
111
+
author={Kou, Zhi and You, Kaichao and Long, Mingsheng and Wang, Jianmin},
112
+
booktitle={NeurIPS},
113
+
year={2020}
113
114
}
114
115
115
116
@inproceedings{CoTuning,
116
-
title={Co-Tuning for Transfer Learning},
117
-
author={You, Kaichao and Kou, Zhi and Long, Mingsheng and Wang, Jianmin},
118
-
booktitle={NeurIPS},
119
-
year={2020}
117
+
title={Co-Tuning for Transfer Learning},
118
+
author={You, Kaichao and Kou, Zhi and Long, Mingsheng and Wang, Jianmin},
119
+
booktitle={NeurIPS},
120
+
year={2020}
120
121
}
121
122
122
123
@article{BiTuning,
123
-
title={Bi-tuning of Pre-trained Representations},
124
-
author={Zhong, Jincheng and Wang, Ximei and Kou, Zhi and Wang, Jianmin and Long, Mingsheng},
125
-
journal={arXiv preprint arXiv:2011.06182},
126
-
year={2020}
124
+
title={Bi-tuning of Pre-trained Representations},
125
+
author={Zhong, Jincheng and Wang, Ximei and Kou, Zhi and Wang, Jianmin and Long, Mingsheng},
0 commit comments