1717sys .path .append ('.' )
1818import utils
1919
20-
2120device = torch .device ("cuda" if torch .cuda .is_available () else "cpu" )
2221
2322
@@ -26,7 +25,6 @@ def main(args):
2625 print (args )
2726 print (f'Calc Transferabilities of { args .arch } on { args .data } ' )
2827
29-
3028 try :
3129 features = np .load (os .path .join (logger .get_save_dir (), 'features.npy' ))
3230 predictions = np .load (os .path .join (logger .get_save_dir (), 'preds.npy' ))
@@ -37,21 +35,25 @@ def main(args):
3735 data_transform = utils .get_transform (resizing = args .resizing )
3836 print ("data_transform: " , data_transform )
3937 model = utils .get_model (args .arch , args .pretrained ).to (device )
40- score_dataset , num_classes = utils .get_dataset (args .data , args .root , data_transform , args .sample_rate , args .num_samples_per_classes )
41- score_loader = DataLoader (score_dataset , batch_size = args .batch_size , shuffle = False , num_workers = args .workers , pin_memory = True )
38+ score_dataset , num_classes = utils .get_dataset (args .data , args .root , data_transform , args .sample_rate ,
39+ args .num_samples_per_classes )
40+ score_loader = DataLoader (score_dataset , batch_size = args .batch_size , shuffle = False , num_workers = args .workers ,
41+ pin_memory = True )
4242 print (f'Using { len (score_dataset )} samples for ranking' )
43- features , predictions , targets = utils .forwarding_dataset (score_loader , model , layer = eval (f'model.{ args .layer } ' ), device = device )
43+ features , predictions , targets = utils .forwarding_dataset (score_loader , model ,
44+ layer = eval (f'model.{ args .layer } ' ), device = device )
4445 if args .save_features :
4546 np .save (os .path .join (logger .get_save_dir (), 'features.npy' ), features )
4647 np .save (os .path .join (logger .get_save_dir (), 'preds.npy' ), predictions )
4748 np .save (os .path .join (logger .get_save_dir (), 'targets.npy' ), targets )
4849
4950 print ('Conducting transferability calculation' )
5051 result = logme (features , targets )
51-
52- logger .write (f'# { result :.4f} # data_{ args .data } _sr{ args .sample_rate } _sc{ args .num_samples_per_classes } _model_{ args .arch } _layer_{ args .layer } \n ' )
52+
53+ logger .write (
54+ f'# { result :.4f} # data_{ args .data } _sr{ args .sample_rate } _sc{ args .num_samples_per_classes } _model_{ args .arch } _layer_{ args .layer } \n ' )
5355 logger .close ()
54-
56+
5557
5658if __name__ == '__main__' :
5759 parser = argparse .ArgumentParser (description = 'Ranking pre-trained models with LogME (Log Maximum Evidence)' )
@@ -68,22 +70,22 @@ def main(args):
6870 parser .add_argument ('-sc' , '--num-samples-per-classes' , default = None , type = int ,
6971 help = 'number of samples per classes.' )
7072 parser .add_argument ('-b' , '--batch-size' , default = 48 , type = int ,
71- metavar = 'N' , help = 'mini-batch size (default: 48)' )
73+ metavar = 'N' , help = 'mini-batch size (default: 48)' )
7274 parser .add_argument ('--resizing' , default = 'res.' , type = str )
7375
7476 # model
7577 parser .add_argument ('-a' , '--arch' , metavar = 'ARCH' , default = 'resnet50' ,
7678 choices = utils .get_model_names (),
7779 help = 'model to be ranked: ' +
78- ' | ' .join (utils .get_model_names ()) +
79- ' (default: resnet50)' )
80+ ' | ' .join (utils .get_model_names ()) +
81+ ' (default: resnet50)' )
8082 parser .add_argument ('-l' , '--layer' , default = 'fc' ,
8183 help = 'before which layer features are extracted' )
8284 parser .add_argument ('--pretrained' , default = None ,
83- help = "pretrained checkpoint of the backbone. "
84- "(default: None, use the ImageNet supervised pretrained backbone)" )
85+ help = "pretrained checkpoint of the backbone. "
86+ "(default: None, use the ImageNet supervised pretrained backbone)" )
8587 parser .add_argument ("--save_features" , action = 'store_true' ,
86- help = "whether to save extracted features" )
88+ help = "whether to save extracted features" )
8789
8890 args = parser .parse_args ()
89- main (args )
91+ main (args )
0 commit comments