Skip to content

Commit b78bab2

Browse files
authored
Release v0.2.0
2 parents c5bf348 + 44b0aec commit b78bab2

File tree

113 files changed

+2340
-1163
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

113 files changed

+2340
-1163
lines changed

.dev_scripts/gather_models.py

Lines changed: 265 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,265 @@
1+
# Copyright (c) OpenMMLab. All rights reserved.
2+
import argparse
3+
import glob
4+
import json
5+
import os.path as osp
6+
import shutil
7+
import subprocess
8+
from collections import OrderedDict
9+
10+
import mmcv
11+
import torch
12+
import yaml
13+
14+
15+
def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
16+
17+
class OrderedDumper(Dumper):
18+
pass
19+
20+
def _dict_representer(dumper, data):
21+
return dumper.represent_mapping(
22+
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items())
23+
24+
OrderedDumper.add_representer(OrderedDict, _dict_representer)
25+
return yaml.dump(data, stream, OrderedDumper, **kwds)
26+
27+
28+
def process_checkpoint(in_file, out_file):
29+
checkpoint = torch.load(in_file, map_location='cpu')
30+
# remove optimizer for smaller file size
31+
if 'optimizer' in checkpoint:
32+
del checkpoint['optimizer']
33+
34+
# remove ema state_dict
35+
for key in list(checkpoint['state_dict']):
36+
if key.startswith('ema_'):
37+
checkpoint['state_dict'].pop(key)
38+
39+
# if it is necessary to remove some sensitive data in checkpoint['meta'],
40+
# add the code here.
41+
if torch.__version__ >= '1.6':
42+
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
43+
else:
44+
torch.save(checkpoint, out_file)
45+
sha = subprocess.check_output(['sha256sum', out_file]).decode()
46+
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
47+
subprocess.Popen(['mv', out_file, final_file])
48+
return final_file
49+
50+
51+
def get_final_epoch(config):
52+
cfg = mmcv.Config.fromfile('./configs/' + config)
53+
return cfg.runner.max_epochs
54+
55+
56+
def get_best_epoch(exp_dir):
57+
best_epoch_full_path = list(
58+
sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1]
59+
best_epoch_model_path = best_epoch_full_path.split('/')[-1]
60+
best_epoch = best_epoch_model_path.split('_')[-1].split('.')[0]
61+
return best_epoch_model_path, int(best_epoch)
62+
63+
64+
def get_real_epoch(config):
65+
cfg = mmcv.Config.fromfile('./configs/' + config)
66+
epoch = cfg.runner.max_epochs
67+
if cfg.data.train.type == 'RepeatDataset':
68+
epoch *= cfg.data.train.times
69+
return epoch
70+
71+
72+
def get_final_results(log_json_path, epoch, results_lut):
73+
result_dict = dict()
74+
with open(log_json_path, 'r') as f:
75+
for line in f.readlines():
76+
log_line = json.loads(line)
77+
if 'mode' not in log_line.keys():
78+
continue
79+
80+
if log_line['mode'] == 'train' and log_line['epoch'] == epoch:
81+
result_dict['memory'] = log_line['memory']
82+
83+
if log_line['mode'] == 'val' and log_line['epoch'] == epoch:
84+
result_dict.update({
85+
key: log_line[key]
86+
for key in results_lut if key in log_line
87+
})
88+
return result_dict
89+
90+
91+
def get_dataset_name(config):
92+
# If there are more dataset, add here.
93+
name_map = dict(
94+
HRSCDataset='HRSC 2016', SARDataset='SAR', DOTADataset='DOTA v1.0')
95+
cfg = mmcv.Config.fromfile('./configs/' + config)
96+
return name_map[cfg.dataset_type]
97+
98+
99+
def convert_model_info_to_pwc(model_infos):
100+
pwc_files = {}
101+
for model in model_infos:
102+
cfg_folder_name = osp.split(model['config'])[-2]
103+
pwc_model_info = OrderedDict()
104+
pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0]
105+
pwc_model_info['In Collection'] = 'Please fill in Collection name'
106+
pwc_model_info['Config'] = osp.join('configs', model['config'])
107+
108+
# get metadata
109+
memory = round(model['results']['memory'] / 1024, 1)
110+
epochs = get_real_epoch(model['config'])
111+
meta_data = OrderedDict()
112+
meta_data['Training Memory (GB)'] = memory
113+
meta_data['Epochs'] = epochs
114+
pwc_model_info['Metadata'] = meta_data
115+
116+
# get dataset name
117+
dataset_name = get_dataset_name(model['config'])
118+
119+
# get results
120+
results = []
121+
# if there are more metrics, add here.
122+
if 'mAP' in model['results']:
123+
metric = round(model['results']['mAP'] * 100, 1)
124+
results.append(
125+
OrderedDict(
126+
Task='Object Detection',
127+
Dataset=dataset_name,
128+
Metrics={'box AP': metric}))
129+
pwc_model_info['Results'] = results
130+
131+
link_string = 'https://download.openmmlab.com/mmrotate/v0.1.0/'
132+
link_string += '{}/{}'.format(model['config'].rstrip('.py'),
133+
osp.split(model['model_path'])[-1])
134+
pwc_model_info['Weights'] = link_string
135+
if cfg_folder_name in pwc_files:
136+
pwc_files[cfg_folder_name].append(pwc_model_info)
137+
else:
138+
pwc_files[cfg_folder_name] = [pwc_model_info]
139+
return pwc_files
140+
141+
142+
def parse_args():
143+
parser = argparse.ArgumentParser(description='Gather benchmarked models')
144+
parser.add_argument(
145+
'root',
146+
type=str,
147+
help='root path of benchmarked models to be gathered')
148+
parser.add_argument(
149+
'out', type=str, help='output path of gathered models to be stored')
150+
parser.add_argument(
151+
'--best',
152+
action='store_true',
153+
help='whether to gather the best model.')
154+
155+
args = parser.parse_args()
156+
return args
157+
158+
159+
def main():
160+
args = parse_args()
161+
models_root = args.root
162+
models_out = args.out
163+
mmcv.mkdir_or_exist(models_out)
164+
165+
# find all models in the root directory to be gathered
166+
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
167+
168+
# filter configs that is not trained in the experiments dir
169+
used_configs = []
170+
for raw_config in raw_configs:
171+
if osp.exists(osp.join(models_root, raw_config)):
172+
used_configs.append(raw_config)
173+
print(f'Find {len(used_configs)} models to be gathered')
174+
175+
# find final_ckpt and log file for trained each config
176+
# and parse the best performance
177+
model_infos = []
178+
for used_config in used_configs:
179+
exp_dir = osp.join(models_root, used_config)
180+
# check whether the exps is finished
181+
if args.best is True:
182+
final_model, final_epoch = get_best_epoch(exp_dir)
183+
else:
184+
final_epoch = get_final_epoch(used_config)
185+
final_model = 'epoch_{}.pth'.format(final_epoch)
186+
187+
model_path = osp.join(exp_dir, final_model)
188+
# skip if the model is still training
189+
if not osp.exists(model_path):
190+
continue
191+
192+
# get the latest logs
193+
log_json_path = list(
194+
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
195+
log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1]
196+
cfg = mmcv.Config.fromfile('./configs/' + used_config)
197+
results_lut = cfg.evaluation.metric
198+
if not isinstance(results_lut, list):
199+
results_lut = [results_lut]
200+
model_performance = get_final_results(log_json_path, final_epoch,
201+
results_lut)
202+
203+
if model_performance is None:
204+
continue
205+
206+
model_time = osp.split(log_txt_path)[-1].split('.')[0]
207+
model_infos.append(
208+
dict(
209+
config=used_config,
210+
results=model_performance,
211+
epochs=final_epoch,
212+
model_time=model_time,
213+
final_model=final_model,
214+
log_json_path=osp.split(log_json_path)[-1]))
215+
216+
# publish model for each checkpoint
217+
publish_model_infos = []
218+
for model in model_infos:
219+
model_publish_dir = osp.join(models_out, model['config'].rstrip('.py'))
220+
mmcv.mkdir_or_exist(model_publish_dir)
221+
222+
model_name = osp.split(model['config'])[-1].split('.')[0]
223+
224+
model_name += '_' + model['model_time']
225+
publish_model_path = osp.join(model_publish_dir, model_name)
226+
trained_model_path = osp.join(models_root, model['config'],
227+
model['final_model'])
228+
229+
# convert model
230+
final_model_path = process_checkpoint(trained_model_path,
231+
publish_model_path)
232+
233+
# copy log
234+
shutil.copy(
235+
osp.join(models_root, model['config'], model['log_json_path']),
236+
osp.join(model_publish_dir, f'{model_name}.log.json'))
237+
shutil.copy(
238+
osp.join(models_root, model['config'],
239+
model['log_json_path'].rstrip('.json')),
240+
osp.join(model_publish_dir, f'{model_name}.log'))
241+
242+
# copy config to guarantee reproducibility
243+
config_path = model['config']
244+
config_path = osp.join(
245+
'configs',
246+
config_path) if 'configs' not in config_path else config_path
247+
target_config_path = osp.split(config_path)[-1]
248+
shutil.copy(config_path, osp.join(model_publish_dir,
249+
target_config_path))
250+
251+
model['model_path'] = final_model_path
252+
publish_model_infos.append(model)
253+
254+
models = dict(models=publish_model_infos)
255+
print(f'Totally gathered {len(publish_model_infos)} models')
256+
mmcv.dump(models, osp.join(models_out, 'model_info.json'))
257+
258+
pwc_files = convert_model_info_to_pwc(publish_model_infos)
259+
for name in pwc_files:
260+
with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f:
261+
ordered_yaml_dump(pwc_files[name], f, encoding='utf-8')
262+
263+
264+
if __name__ == '__main__':
265+
main()

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,9 @@ instance/
6666
.scrapy
6767

6868
# Sphinx documentation
69-
docs/_build/
69+
docs/en/_build/
70+
docs/zh_cn/_build/
71+
src
7072

7173
# PyBuilder
7274
target/

README.md

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -64,14 +64,10 @@ https://user-images.githubusercontent.com/10410257/154433305-416d129b-60c8-44c7-
6464

6565
## Changelog
6666

67-
**0.1.1** was released in 14/3/2022:
67+
**0.2.0** was released in 30/3/2022:
6868

69-
- Add [colab tutorial](demo/MMRotate_Tutorial.ipynb) for beginners (#66)
70-
- Support [huge image inference](deom/huge_image_demo.py) (#34)
71-
- Support HRSC Dataset (#96)
72-
- Support mixed precision training (#72)
73-
- Add inference speed statistics [tool](tools/analysis_tools/benchmark.py) (#86)
74-
- Add confusion matrix analysis [tool](tools/analysis_tools/confusion_matrix.py) (#93)
69+
- Support Circular Smooth Label (CSL, ECCV'20) (#153)
70+
- Add [browse_dataset](tools/misc/browse_dataset.py) tool (#98)
7571

7672
Please refer to [changelog.md](docs/en/changelog.md) for details and release history.
7773

@@ -104,6 +100,7 @@ A summary can be found in the [Model Zoo](docs/en/model_zoo.md) page.
104100
* [x] [Rotated RepPoints-OBB](configs/rotated_reppoints/README.md) (ICCV'2019)
105101
* [x] [RoI Transformer](configs/roi_trans/README.md) (CVPR'2019)
106102
* [x] [Gliding Vertex](configs/gliding_vertex/README.md) (TPAMI'2020)
103+
* [x] [CSL](configs/csl/README.md) (ECCV'2020)
107104
* [x] [R<sup>3</sup>Det](configs/r3det/README.md) (AAAI'2021)
108105
* [x] [S<sup>2</sup>A-Net](configs/s2anet/README.md) (TGRS'2021)
109106
* [x] [ReDet](configs/redet/README.md) (CVPR'2021)

README_zh-CN.md

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -61,14 +61,10 @@ https://user-images.githubusercontent.com/10410257/154433305-416d129b-60c8-44c7-
6161

6262
## 更新日志
6363

64-
最新的 **0.1.1** 版本已经在 2022.03.14 发布:
64+
最新的 **0.2.0** 版本已经在 2022.03.14 发布:
6565

66-
- 为初学者添加了 [Colab 教程](demo/MMRotate_Tutorial.ipynb)
67-
- 支持了[大图推理](deom/huge_image_demo.py)
68-
- 支持了 HRSC 遥感数据集
69-
- 支持了混合精度训练
70-
- 添加了推理速度[统计工具](tools/analysis_tools/benchmark.py)
71-
- 添加了混淆矩阵[分析工具](tools/analysis_tools/confusion_matrix.py).
66+
- 支持了 Circular Sommth Label (CSL, ECCV'20) 模型 (#153)
67+
- 增加了[数据集浏览工具](tools/misc/browse_dataset.py) (#98)
7268

7369
如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/en/changelog.md)
7470

@@ -100,6 +96,7 @@ MMRotate 也提供了其他更详细的教程:
10096
* [x] [Rotated RepPoints-OBB](configs/rotated_reppoints/README.md) (ICCV'2019)
10197
* [x] [RoI Transformer](configs/roi_trans/README.md) (CVPR'2019)
10298
* [x] [Gliding Vertex](configs/gliding_vertex/README.md) (TPAMI'2020)
99+
* [x] [CSL](configs/csl/README.md) (ECCV'2020)
103100
* [x] [R<sup>3</sup>Det](configs/r3det/README.md) (AAAI'2021)
104101
* [x] [S<sup>2</sup>A-Net](configs/s2anet/README.md) (TGRS'2021)
105102
* [x] [ReDet](configs/redet/README.md) (CVPR'2021)

configs/csl/README.md

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
# CSL
2+
> [Arbitrary-Oriented Object Detection with Circular Smooth Label](https://link.springer.com/chapter/10.1007/978-3-030-58598-3_40)
3+
4+
<!-- [ALGORITHM] -->
5+
## Abstract
6+
7+
<div align=center>
8+
<img src="https://raw.githubusercontent.com/zytx121/image-host/main/imgs/csl.jpg" width="800"/>
9+
</div>
10+
11+
Arbitrary-oriented object detection has recently attracted increasing attention in vision for their importance
12+
in aerial imagery, scene text, and face etc. In this paper, we show that existing regression-based rotation detectors
13+
suffer the problem of discontinuous boundaries, which is directly caused by angular periodicity or corner ordering.
14+
By a careful study, we find the root cause is that the ideal predictions are beyond the defined range. We design a
15+
new rotation detection baseline, to address the boundary problem by transforming angular prediction from a regression
16+
problem to a classification task with little accuracy loss, whereby high-precision angle classification is devised in
17+
contrast to previous works using coarse-granularity in rotation detection. We also propose a circular smooth label (CSL)
18+
technique to handle the periodicity of the angle and increase the error tolerance to adjacent angles. We further
19+
introduce four window functions in CSL and explore the effect of different window radius sizes on detection performance.
20+
Extensive experiments and visual analysis on two large-scale public datasets for aerial images i.e. DOTA, HRSC2016,
21+
as well as scene text dataset ICDAR2015 and MLT, show the effectiveness of our approach.
22+
23+
## Results and models
24+
25+
DOTA1.0
26+
27+
| Backbone | mAP | Angle | Window func. | Omega | lr schd | Mem (GB) | Inf Time (fps) | Aug | Batch Size | Configs | Download |
28+
|:------------:|:----------:|:-----------:|:-----------:|:-----------:|:---------:|:---------:|:---------:|:---------:|:---------:|:---------:|:-------------:|
29+
| ResNet50 (1024,1024,200) | 68.42 | le90 | - | - | 1x | 3.38 | 17.8 | - | 2 | [rotated_retinanet_obb_r50_fpn_1x_dota_le90](./rotated_retinanet_obb_r50_fpn_1x_dota_le90.py) | [model](https://download.openmmlab.com/mmrotate/v0.1.0/rotated_retinanet/rotated_retinanet_obb_r50_fpn_1x_dota_le90/rotated_retinanet_obb_r50_fpn_1x_dota_le90-c0097bc4.pth) &#124; [log](https://download.openmmlab.com/mmrotate/v0.1.0/rotated_retinanet/rotated_retinanet_obb_r50_fpn_1x_dota_le90/rotated_retinanet_obb_r50_fpn_1x_dota_le90_20220128_130740.log.json)
30+
| ResNet50 (1024,1024,200) | 68.79 | le90 | - | - | 1x | 2.36 | 25.9 | - | 2 | [rotated_retinanet_obb_r50_fpn_fp16_1x_dota_le90](./rotated_retinanet_obb_r50_fpn_fp16_1x_dota_le90.py) | [model](https://download.openmmlab.com/mmrotate/v0.1.0/rotated_retinanet/rotated_retinanet_obb_r50_fpn_fp16_1x_dota_le90/rotated_retinanet_obb_r50_fpn_fp16_1x_dota_le90-01de71b5.pth) &#124; [log](https://download.openmmlab.com/mmrotate/v0.1.0/rotated_retinanet/rotated_retinanet_obb_r50_fpn_fp16_1x_dota_le90/rotated_retinanet_obb_r50_fpn_fp16_1x_dota_le90_20220303_183714.log.json)
31+
| ResNet50 (1024,1024,200) | 69.51 | le90 | Gaussian | 4 | 1x | 2.60 | 24.0 | - | 2 | [rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90](./rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90.py) | [model](https://download.openmmlab.com/mmrotate/v0.1.0/csl/rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90/rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90-b4271aed.pth) &#124; [log](https://download.openmmlab.com/mmrotate/v0.1.0/csl/rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90/rotated_retinanet_obb_csl_gaussian_r50_fpn_fp16_1x_dota_le90_20220321_010033.log.json)
32+
33+
34+
## Citation
35+
```
36+
@inproceedings{yang2020arbitrary,
37+
title={Arbitrary-Oriented Object Detection with Circular Smooth Label},
38+
author={Yang, Xue and Yan, Junchi},
39+
booktitle={European Conference on Computer Vision},
40+
pages={677--694},
41+
year={2020}
42+
}
43+
```

0 commit comments

Comments
 (0)