@@ -82,7 +82,7 @@ def evaluate_prediction(y_true, y_pred):
82
82
def log_evaluation (metric_outputs , logger , description = 'Comparing y_true and y_pred:' ):
83
83
logger .info (description )
84
84
for metric , value in metric_outputs .items ():
85
- logger .info (' {}: {:.4f }' .format (metric , value ))
85
+ logger .info (' {}: {:.8f }' .format (metric , value ))
86
86
87
87
88
88
class LoggingCallback (Callback ):
@@ -375,13 +375,19 @@ def warmup_scheduler(epoch):
375
375
376
376
ckpt .set_model (template_model )
377
377
J = ckpt .restart (params )
378
+ # J = candle.restart(params, model)
379
+
378
380
if J is not None :
379
381
initial_epoch = J ['epoch' ]
380
382
best_metric_last = J ['best_metric_last' ]
381
383
params ['ckpt_best_metric_last' ] = best_metric_last
382
384
print ('initial_epoch: %i' % initial_epoch )
383
385
384
386
elif args .initial_weights is not None :
387
+ # ckpt = candle.CandleCheckpointCallback(params,
388
+ # verbose=True)
389
+
390
+ # if args.initial_weights:
385
391
logger .info ("Loading initial weights from '{}'"
386
392
.format (args .initial_weights ))
387
393
start = time .time ()
@@ -419,7 +425,7 @@ def warmup_scheduler(epoch):
419
425
es_monitor = keras .callbacks .EarlyStopping (patience = patience ,
420
426
verbose = 1 )
421
427
422
- reduce_lr = ReduceLROnPlateau (monitor = 'val_loss' , factor = 0.5 , patience = 5 , min_lr = 0.00001 )
428
+ reduce_lr = ReduceLROnPlateau (monitor = 'val_loss' , factor = 0.5 , patience = 100 , min_lr = 0.00001 )
423
429
warmup_lr = LearningRateScheduler (warmup_scheduler )
424
430
# prefix + cv_ext + '.
425
431
checkpointer = MultiGPUCheckpoint ('model.h5' , save_best_only = True )
0 commit comments