22
22
from sklearn .metrics import (
23
23
r2_score ,
24
24
roc_auc_score ,
25
- # pearsonr,
26
25
accuracy_score ,
27
26
)
28
27
from scipy .stats import pearsonr
29
28
29
+ import sys
30
30
import adrp
31
31
import candle
32
32
@@ -221,7 +221,7 @@ def run_inference(params):
221
221
sys .exit ("Model format should be one of json, yaml or h5" )
222
222
223
223
# compile separately to get custom functions as needed
224
- loaded_model .compile (optimizer = params ['optimizer' ], loss = params ['loss' ], metrics = ['mae' , r2 ])
224
+ loaded_model .compile (optimizer = params ['optimizer' ], loss = params ['loss' ], metrics = ['mae' , r2 ])
225
225
226
226
# use same data as training
227
227
seed = params ['rng_seed' ]
@@ -516,7 +516,7 @@ def post_process(params, X_train, X_test, Y_test, score, history, model):
516
516
print ("Loaded json model from disk" )
517
517
518
518
# evaluate json loaded model on test data
519
- loaded_model .compile (optimizer = params ['optimizer' ], loss = params ['loss' ], metrics = ['mae' , r2 ])
519
+ loaded_model_json .compile (optimizer = params ['optimizer' ], loss = params ['loss' ], metrics = ['mae' , r2 ])
520
520
score_json = loaded_model_json .evaluate (X_test , Y_test , verbose = 0 )
521
521
522
522
print ("json Validation loss:" , score_json [0 ])
@@ -529,7 +529,7 @@ def post_process(params, X_train, X_test, Y_test, score, history, model):
529
529
print ("Loaded yaml model from disk" )
530
530
531
531
# evaluate loaded model on test data
532
- loaded_model .compile (optimizer = params ['optimizer' ], loss = params ['loss' ], metrics = ['mae' , r2 ])
532
+ loaded_model_yaml .compile (optimizer = params ['optimizer' ], loss = params ['loss' ], metrics = ['mae' , r2 ])
533
533
score_yaml = loaded_model_yaml .evaluate (X_test , Y_test , verbose = 0 )
534
534
535
535
print ("yaml Validation loss:" , score_yaml [0 ])
0 commit comments