|
| 1 | +# Copyright (c) OpenMMLab. All rights reserved. |
| 2 | +import argparse |
| 3 | +import glob |
| 4 | +import json |
| 5 | +import os.path as osp |
| 6 | +import shutil |
| 7 | +import subprocess |
| 8 | +from collections import OrderedDict |
| 9 | + |
| 10 | +import mmcv |
| 11 | +import torch |
| 12 | +import yaml |
| 13 | + |
| 14 | + |
| 15 | +def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds): |
| 16 | + |
| 17 | + class OrderedDumper(Dumper): |
| 18 | + pass |
| 19 | + |
| 20 | + def _dict_representer(dumper, data): |
| 21 | + return dumper.represent_mapping( |
| 22 | + yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) |
| 23 | + |
| 24 | + OrderedDumper.add_representer(OrderedDict, _dict_representer) |
| 25 | + return yaml.dump(data, stream, OrderedDumper, **kwds) |
| 26 | + |
| 27 | + |
| 28 | +def process_checkpoint(in_file, out_file): |
| 29 | + checkpoint = torch.load(in_file, map_location='cpu') |
| 30 | + # remove optimizer for smaller file size |
| 31 | + if 'optimizer' in checkpoint: |
| 32 | + del checkpoint['optimizer'] |
| 33 | + |
| 34 | + # remove ema state_dict |
| 35 | + for key in list(checkpoint['state_dict']): |
| 36 | + if key.startswith('ema_'): |
| 37 | + checkpoint['state_dict'].pop(key) |
| 38 | + |
| 39 | + # if it is necessary to remove some sensitive data in checkpoint['meta'], |
| 40 | + # add the code here. |
| 41 | + if torch.__version__ >= '1.6': |
| 42 | + torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) |
| 43 | + else: |
| 44 | + torch.save(checkpoint, out_file) |
| 45 | + sha = subprocess.check_output(['sha256sum', out_file]).decode() |
| 46 | + final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) |
| 47 | + subprocess.Popen(['mv', out_file, final_file]) |
| 48 | + return final_file |
| 49 | + |
| 50 | + |
| 51 | +def get_final_epoch(config): |
| 52 | + cfg = mmcv.Config.fromfile('./configs/' + config) |
| 53 | + return cfg.runner.max_epochs |
| 54 | + |
| 55 | + |
| 56 | +def get_best_epoch(exp_dir): |
| 57 | + best_epoch_full_path = list( |
| 58 | + sorted(glob.glob(osp.join(exp_dir, 'best_*.pth'))))[-1] |
| 59 | + best_epoch_model_path = best_epoch_full_path.split('/')[-1] |
| 60 | + best_epoch = best_epoch_model_path.split('_')[-1].split('.')[0] |
| 61 | + return best_epoch_model_path, int(best_epoch) |
| 62 | + |
| 63 | + |
| 64 | +def get_real_epoch(config): |
| 65 | + cfg = mmcv.Config.fromfile('./configs/' + config) |
| 66 | + epoch = cfg.runner.max_epochs |
| 67 | + if cfg.data.train.type == 'RepeatDataset': |
| 68 | + epoch *= cfg.data.train.times |
| 69 | + return epoch |
| 70 | + |
| 71 | + |
| 72 | +def get_final_results(log_json_path, epoch, results_lut): |
| 73 | + result_dict = dict() |
| 74 | + with open(log_json_path, 'r') as f: |
| 75 | + for line in f.readlines(): |
| 76 | + log_line = json.loads(line) |
| 77 | + if 'mode' not in log_line.keys(): |
| 78 | + continue |
| 79 | + |
| 80 | + if log_line['mode'] == 'train' and log_line['epoch'] == epoch: |
| 81 | + result_dict['memory'] = log_line['memory'] |
| 82 | + |
| 83 | + if log_line['mode'] == 'val' and log_line['epoch'] == epoch: |
| 84 | + result_dict.update({ |
| 85 | + key: log_line[key] |
| 86 | + for key in results_lut if key in log_line |
| 87 | + }) |
| 88 | + return result_dict |
| 89 | + |
| 90 | + |
| 91 | +def get_dataset_name(config): |
| 92 | + # If there are more dataset, add here. |
| 93 | + name_map = dict( |
| 94 | + HRSCDataset='HRSC 2016', SARDataset='SAR', DOTADataset='DOTA v1.0') |
| 95 | + cfg = mmcv.Config.fromfile('./configs/' + config) |
| 96 | + return name_map[cfg.dataset_type] |
| 97 | + |
| 98 | + |
| 99 | +def convert_model_info_to_pwc(model_infos): |
| 100 | + pwc_files = {} |
| 101 | + for model in model_infos: |
| 102 | + cfg_folder_name = osp.split(model['config'])[-2] |
| 103 | + pwc_model_info = OrderedDict() |
| 104 | + pwc_model_info['Name'] = osp.split(model['config'])[-1].split('.')[0] |
| 105 | + pwc_model_info['In Collection'] = 'Please fill in Collection name' |
| 106 | + pwc_model_info['Config'] = osp.join('configs', model['config']) |
| 107 | + |
| 108 | + # get metadata |
| 109 | + memory = round(model['results']['memory'] / 1024, 1) |
| 110 | + epochs = get_real_epoch(model['config']) |
| 111 | + meta_data = OrderedDict() |
| 112 | + meta_data['Training Memory (GB)'] = memory |
| 113 | + meta_data['Epochs'] = epochs |
| 114 | + pwc_model_info['Metadata'] = meta_data |
| 115 | + |
| 116 | + # get dataset name |
| 117 | + dataset_name = get_dataset_name(model['config']) |
| 118 | + |
| 119 | + # get results |
| 120 | + results = [] |
| 121 | + # if there are more metrics, add here. |
| 122 | + if 'mAP' in model['results']: |
| 123 | + metric = round(model['results']['mAP'] * 100, 1) |
| 124 | + results.append( |
| 125 | + OrderedDict( |
| 126 | + Task='Object Detection', |
| 127 | + Dataset=dataset_name, |
| 128 | + Metrics={'box AP': metric})) |
| 129 | + pwc_model_info['Results'] = results |
| 130 | + |
| 131 | + link_string = 'https://download.openmmlab.com/mmrotate/v0.1.0/' |
| 132 | + link_string += '{}/{}'.format(model['config'].rstrip('.py'), |
| 133 | + osp.split(model['model_path'])[-1]) |
| 134 | + pwc_model_info['Weights'] = link_string |
| 135 | + if cfg_folder_name in pwc_files: |
| 136 | + pwc_files[cfg_folder_name].append(pwc_model_info) |
| 137 | + else: |
| 138 | + pwc_files[cfg_folder_name] = [pwc_model_info] |
| 139 | + return pwc_files |
| 140 | + |
| 141 | + |
| 142 | +def parse_args(): |
| 143 | + parser = argparse.ArgumentParser(description='Gather benchmarked models') |
| 144 | + parser.add_argument( |
| 145 | + 'root', |
| 146 | + type=str, |
| 147 | + help='root path of benchmarked models to be gathered') |
| 148 | + parser.add_argument( |
| 149 | + 'out', type=str, help='output path of gathered models to be stored') |
| 150 | + parser.add_argument( |
| 151 | + '--best', |
| 152 | + action='store_true', |
| 153 | + help='whether to gather the best model.') |
| 154 | + |
| 155 | + args = parser.parse_args() |
| 156 | + return args |
| 157 | + |
| 158 | + |
| 159 | +def main(): |
| 160 | + args = parse_args() |
| 161 | + models_root = args.root |
| 162 | + models_out = args.out |
| 163 | + mmcv.mkdir_or_exist(models_out) |
| 164 | + |
| 165 | + # find all models in the root directory to be gathered |
| 166 | + raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True)) |
| 167 | + |
| 168 | + # filter configs that is not trained in the experiments dir |
| 169 | + used_configs = [] |
| 170 | + for raw_config in raw_configs: |
| 171 | + if osp.exists(osp.join(models_root, raw_config)): |
| 172 | + used_configs.append(raw_config) |
| 173 | + print(f'Find {len(used_configs)} models to be gathered') |
| 174 | + |
| 175 | + # find final_ckpt and log file for trained each config |
| 176 | + # and parse the best performance |
| 177 | + model_infos = [] |
| 178 | + for used_config in used_configs: |
| 179 | + exp_dir = osp.join(models_root, used_config) |
| 180 | + # check whether the exps is finished |
| 181 | + if args.best is True: |
| 182 | + final_model, final_epoch = get_best_epoch(exp_dir) |
| 183 | + else: |
| 184 | + final_epoch = get_final_epoch(used_config) |
| 185 | + final_model = 'epoch_{}.pth'.format(final_epoch) |
| 186 | + |
| 187 | + model_path = osp.join(exp_dir, final_model) |
| 188 | + # skip if the model is still training |
| 189 | + if not osp.exists(model_path): |
| 190 | + continue |
| 191 | + |
| 192 | + # get the latest logs |
| 193 | + log_json_path = list( |
| 194 | + sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1] |
| 195 | + log_txt_path = list(sorted(glob.glob(osp.join(exp_dir, '*.log'))))[-1] |
| 196 | + cfg = mmcv.Config.fromfile('./configs/' + used_config) |
| 197 | + results_lut = cfg.evaluation.metric |
| 198 | + if not isinstance(results_lut, list): |
| 199 | + results_lut = [results_lut] |
| 200 | + model_performance = get_final_results(log_json_path, final_epoch, |
| 201 | + results_lut) |
| 202 | + |
| 203 | + if model_performance is None: |
| 204 | + continue |
| 205 | + |
| 206 | + model_time = osp.split(log_txt_path)[-1].split('.')[0] |
| 207 | + model_infos.append( |
| 208 | + dict( |
| 209 | + config=used_config, |
| 210 | + results=model_performance, |
| 211 | + epochs=final_epoch, |
| 212 | + model_time=model_time, |
| 213 | + final_model=final_model, |
| 214 | + log_json_path=osp.split(log_json_path)[-1])) |
| 215 | + |
| 216 | + # publish model for each checkpoint |
| 217 | + publish_model_infos = [] |
| 218 | + for model in model_infos: |
| 219 | + model_publish_dir = osp.join(models_out, model['config'].rstrip('.py')) |
| 220 | + mmcv.mkdir_or_exist(model_publish_dir) |
| 221 | + |
| 222 | + model_name = osp.split(model['config'])[-1].split('.')[0] |
| 223 | + |
| 224 | + model_name += '_' + model['model_time'] |
| 225 | + publish_model_path = osp.join(model_publish_dir, model_name) |
| 226 | + trained_model_path = osp.join(models_root, model['config'], |
| 227 | + model['final_model']) |
| 228 | + |
| 229 | + # convert model |
| 230 | + final_model_path = process_checkpoint(trained_model_path, |
| 231 | + publish_model_path) |
| 232 | + |
| 233 | + # copy log |
| 234 | + shutil.copy( |
| 235 | + osp.join(models_root, model['config'], model['log_json_path']), |
| 236 | + osp.join(model_publish_dir, f'{model_name}.log.json')) |
| 237 | + shutil.copy( |
| 238 | + osp.join(models_root, model['config'], |
| 239 | + model['log_json_path'].rstrip('.json')), |
| 240 | + osp.join(model_publish_dir, f'{model_name}.log')) |
| 241 | + |
| 242 | + # copy config to guarantee reproducibility |
| 243 | + config_path = model['config'] |
| 244 | + config_path = osp.join( |
| 245 | + 'configs', |
| 246 | + config_path) if 'configs' not in config_path else config_path |
| 247 | + target_config_path = osp.split(config_path)[-1] |
| 248 | + shutil.copy(config_path, osp.join(model_publish_dir, |
| 249 | + target_config_path)) |
| 250 | + |
| 251 | + model['model_path'] = final_model_path |
| 252 | + publish_model_infos.append(model) |
| 253 | + |
| 254 | + models = dict(models=publish_model_infos) |
| 255 | + print(f'Totally gathered {len(publish_model_infos)} models') |
| 256 | + mmcv.dump(models, osp.join(models_out, 'model_info.json')) |
| 257 | + |
| 258 | + pwc_files = convert_model_info_to_pwc(publish_model_infos) |
| 259 | + for name in pwc_files: |
| 260 | + with open(osp.join(models_out, name + '_metafile.yml'), 'w') as f: |
| 261 | + ordered_yaml_dump(pwc_files[name], f, encoding='utf-8') |
| 262 | + |
| 263 | + |
| 264 | +if __name__ == '__main__': |
| 265 | + main() |
0 commit comments