11import os
22import shutil
3-
3+ import logging
4+ import warnings
45import numpy as np
56
67from pytorch_lightning .pt_overrides .override_data_parallel import LightningDistributedDataParallel
@@ -91,7 +92,7 @@ def __init__(self, monitor='val_loss',
9192 self .stopped_epoch = 0
9293
9394 if mode not in ['auto' , 'min' , 'max' ]:
94- print ( 'EarlyStopping mode %s is unknown, fallback to auto mode.' % mode )
95+ logging . info ( f 'EarlyStopping mode { mode } is unknown, fallback to auto mode.' )
9596 mode = 'auto'
9697
9798 if mode == 'min' :
@@ -121,9 +122,10 @@ def on_epoch_end(self, epoch, logs=None):
121122 current = logs .get (self .monitor )
122123 stop_training = False
123124 if current is None :
124- print ('Early stopping conditioned on metric `%s` '
125- 'which is not available. Available metrics are: %s' %
126- (self .monitor , ',' .join (list (logs .keys ()))), RuntimeWarning )
125+ warnings .warn (
126+ f'Early stopping conditioned on metric `{ self .monitor } `'
127+ f' which is not available. Available metrics are: { "," .join (list (logs .keys ()))} ' ,
128+ RuntimeWarning )
127129 stop_training = True
128130 return stop_training
129131
@@ -141,7 +143,7 @@ def on_epoch_end(self, epoch, logs=None):
141143
142144 def on_train_end (self , logs = None ):
143145 if self .stopped_epoch > 0 and self .verbose > 0 :
144- print ( 'Epoch %05d: early stopping' % ( self .stopped_epoch + 1 ) )
146+ logging . info ( f 'Epoch { self .stopped_epoch + 1 :05d } : early stopping' )
145147
146148
147149class ModelCheckpoint (Callback ):
@@ -187,8 +189,9 @@ def __init__(self, filepath, monitor='val_loss', verbose=0,
187189 self .prefix = prefix
188190
189191 if mode not in ['auto' , 'min' , 'max' ]:
190- print ('ModelCheckpoint mode %s is unknown, '
191- 'fallback to auto mode.' % (mode ), RuntimeWarning )
192+ warnings .warn (
193+ f'ModelCheckpoint mode { mode } is unknown, '
194+ 'fallback to auto mode.' , RuntimeWarning )
192195 mode = 'auto'
193196
194197 if mode == 'min' :
@@ -232,25 +235,26 @@ def on_epoch_end(self, epoch, logs=None):
232235 if self .save_best_only :
233236 current = logs .get (self .monitor )
234237 if current is None :
235- print ('Can save best model only with %s available,'
236- ' skipping.' % (self .monitor ), RuntimeWarning )
238+ warnings .warn (
239+ f'Can save best model only with { self .monitor } available,'
240+ ' skipping.' , RuntimeWarning )
237241 else :
238242 if self .monitor_op (current , self .best ):
239243 if self .verbose > 0 :
240- print ( ' \n Epoch %05d: %s improved from %0.5f to %0.5f,'
241- ' saving model to %s '
242- % ( epoch + 1 , self .monitor , self . best ,
243- current , filepath ) )
244+ logging . info (
245+ f' \n Epoch { epoch + 1 :05d } : { self . monitor } improved '
246+ f' from { self .best :0.5f } to { current :0.5f } ,' ,
247+ f' saving model to { filepath } ' )
244248 self .best = current
245249 self .save_model (filepath , overwrite = True )
246250
247251 else :
248252 if self .verbose > 0 :
249- print ( ' \n Epoch %05d: %s did not improve' %
250- ( epoch + 1 , self .monitor ) )
253+ logging . info (
254+ f' \n Epoch { epoch + 1 :05d } : { self .monitor } did not improve' )
251255 else :
252256 if self .verbose > 0 :
253- print ( '\n Epoch % 05d: saving model to %s' % ( epoch + 1 , filepath ) )
257+ logging . info ( f '\n Epoch { epoch + 1 : 05d} : saving model to { filepath } ' )
254258 self .save_model (filepath , overwrite = False )
255259
256260
@@ -291,6 +295,6 @@ def on_epoch_begin(self, epoch, trainer):
291295 losses = [10 , 9 , 8 , 8 , 6 , 4.3 , 5 , 4.4 , 2.8 , 2.5 ]
292296 for i , loss in enumerate (losses ):
293297 should_stop = c .on_epoch_end (i , logs = {'val_loss' : loss })
294- print (loss )
298+ logging . info (loss )
295299 if should_stop :
296300 break
0 commit comments