forked from i3-research/fibe
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfibe_functions.py
More file actions
1036 lines (871 loc) · 60.2 KB
/
fibe_functions.py
File metadata and controls
1036 lines (871 loc) · 60.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Forward Inclusion Backward Elimination
# Copyright (c) Mohammad Arafat Hussain, Boston Children's Hospital/Harvard Medical School, 2023
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
#
# Last Updated: 09/26/2024 at 2215H EST, By Mohammmad Arafat Hussain.
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import mean_absolute_error, accuracy_score, confusion_matrix
import copy
from collections import Counter
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.svm import SVR, SVC
from sklearn.ensemble import AdaBoostRegressor, AdaBoostClassifier
from sklearn.utils import resample
import os
from data_curation import data_curation, log_files_generator
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
import math
def fibe(feature_df, score_df, data_cleaning=False, fixed_features=None, columns_names=None, task_type=None, probability=False, balance=False, model_name=None, metric=None, voting_strictness=None, nFold=None, maxIter=None, tolerance=None, maxFeatures=None, save_intermediate=False, output_dir=None, inference_data_df=None, inference_score_df=None, verbose=True):
'''
feature_df: is the 2D feature matrix (supports DataFrame, Numpy Array, and List) with columns representing different features.
score_df: is the 1D score vector as a column (supports DataFrame, Numpy Array, and List).
data_cleaning: if True, cleans the data including dropping invalid and imbalanced features, mapping categories to numeric values, imputing data with median/mean values.
fixed_features: Predefined features that must stay in the feature set and the FIBE algorithm does not add or remove those.
Must be either a List of names to select from 'feature_df', or DataFrame of features added separately to 'feature_df.'
columns_names: contain the names of the features. The algorithm returns the names of the selected features from this list.
If not available, then the algorithm returns the column indexes of selected features.
task_type: either 'regression' or 'classification.' The default is 'regression.'
probability: if True, probability values (for the class 1) that leads to binary classifications is returned. This option only works when the 'task_type=regression.'
balance: In a binary classification task, if the data is imbalanced in terms of classes, 'balance=True' uses resampling to balance the data.
model_name: For the 'regression' task, choose from 'linearSVR', 'gaussianSVR', 'RegressionForest', 'AdaBoostDT', 'AdaBoostSVR',
and 'consensus' (consensus using 'linearSVR', 'gaussianSVR', and 'RegressionForest'). The default is 'linearSVR'. For 'classification' task,
to choose from 'linearSVC', 'gaussianSVC', 'RandomForest', 'AdaBoostDT', 'AdaBoostSVC', and 'consensus' (consensus using 'linerSVC',
'gaussianSVC', and 'RandomForest'). The default is 'linearSVC'.
metric: For the 'regression' task, choose from 'MAE' and 'MAPE'. The default is 'MAE.' For the 'classification' task, choose from 'Accuracy',
'F1-score,' and 'binaryROC'. The default is 'Accuracy'.
voting_strictness: either 'strict' that chooses those features that is selected at least 3 times in 5-fold cross-validation, or
'loose' that chooses those features that is selected at least 2 times in 5-fold cross-validation, or 'both' that chooses both strict and loose options with some conditions. The default is 'strict'.
For any random number of folds, N, the 'strict' threshold should be 0.6 X N and the 'loose' threshold should be 0.4 X N. When 'both' is selected, first the system checks if
strick feature number is >= 2/3 of avarage number of features selected in 5-folds. If not, then check if loose feature number is <= 2, while the mean feature number over 5-folds
is greater than 4, then the system does union of features.
nFold: Number of folds in cross-validation. Preferred and default is '5'.
maxIter: is the maximum number of iterations that the algorithm goes back and forth in forward inclusion and backward elimination in each fold. The default is '3'.
tolerance: is the percentage of deviation in the error/accuracy threshold allowed. The default is '0.05', i.e., 5%.
maxFeatures: is the fractional number that indicate the number of best features to be selected of the total features. Default is 0.25, i.e., 25% of the total number of features.
save_intermediate: if True, saves intermediate results to the specified directory. Default is False.
output_dir: directory where intermediate results are saved if save_intermediate is True.
inference_data_df: data for optional second inference cohort for prediction using the selected subset of features.
inference_score_df: scores for optional second inference cohort for prediction using the selected subset of features.
verbose: generates text for intermediate loss and selected feature list during iteration. The default is 'True'.
The outputs are:
subjectList: is the list for subjects used in inference. Each subject/patient is assigned a name as 'subXX' and according to this list, other outputs are organized in the subsequent generated lists.
selectedFeatures: is the list of features if 'columns_names' was not 'None'. Otherwise column indexes of the selected features. For 'voting_strictness' of 'both',
'selectedFeatures' contains two sets of output as [[selected features for 'strict'], [selected feature for 'loose']].
actualScore: is the list containing actual target scores. If 'model_name' is chosen as 'consensus', this list has a repetition of values 3 times, to correspond to
predictions by three models. For 'voting_strictness' of 'both', 'actualScore' contains two sets of output as [[actual scores for 'strict'], [actual scores for 'loose']]. If the argument
'save_intermediate' is set 'True', 'actualScore[-1]' contains an additional list of actual score values of the inference data.
predictedScore: is the list containing predicted scores. If 'model_name' is chosen as 'consensus', this list has 3 predictions per observation. Although 3 predictions per observation
are generated here, 'consensus' uses an averaging of the losses for 3 predictions in decision-making. For 'voting_strictness' of 'both', 'predictedScore' contains two sets of
output as [[predicted scores for 'strict'], [predicted score for 'loose']]. If the argument 'probability' is set 'True' and 'task_type' is 'cassification', then predictedScore contains
an additional list of prediction probability for class 1 score values for the inference data. The structure is then
[[[predicted scores for 'strict'], ['predicted probabilities for 'strict']], [[predicted score for 'loose'],[predicted probabilities for 'loose']]
validationPerformance: is a list containing validation performance in terms of chosen 'metric' for 'nFold' folds. For 'voting_strictness' of 'both', 'validationPerformance' contains
two sets of output as [[validation performance for 'strict'], [validation performance score for 'loose']].
'''
start_time = datetime.now()
print("Code started running at:", start_time.strftime("%Y-%m-%d %H:%M:%S"))
# Data Curation : Added by Ankush Kesri
if data_cleaning == True:
feature_df, drop_log, mapping_list, imputation_log = data_curation(feature_df)
# Saving log files
log_files_generator(drop_log, mapping_list, imputation_log, output_dir)
# Checking and converting to DataFrame if the features are in numpy array format
if type(feature_df) is np.ndarray:
if columns_names != None:
# checking if the length of feature names is equal to the feature column numbers
if feature_df.shape[1] != len(columns_names):
raise ValueError("Number of columns in the Feature is not equal to the number of column names provided")
else:
feature_df = pd.DataFrame(data=feature_df, columns=columns_names)
else:
feature_df = pd.DataFrame(data=feature_df)
# Checking and converting to DataFrame if the features are in list format
elif isinstance(feature_df, list):
if columns_names != None:
# checking if the length of feature names are equal to the feature column numbers
if len(feature_df[0]) != len(columns_names):
raise ValueError("Number of columns in the Feature is not equal to the number of column names provided")
else:
feature_df = pd.DataFrame(data=feature_df, columns=columns_names)
else:
feature_df = pd.DataFrame(data=feature_df)
# checking and converting to DataFrame if the target is in numpy array format
if type(score_df) is np.ndarray:
score_df = pd.DataFrame(data=score_df)
# Checking and converting to DataFrame if the target is in list format
elif isinstance(score_df, list):
score_df = pd.DataFrame(data=score_df)
# checking if the observation/subject numbers are equal in features and in targets
if feature_df.shape[0] != score_df.shape[0]:
raise ValueError("Number of rows in the Feature is not equal to the number of rows in Score")
if inference_data_df is not None and inference_score_df is None:
raise ValueError("Scores for inference cohort is not provided.")
if inference_data_df is not None and inference_score_df is not None:
# Checking and converting to DataFrame if the inference features are in numpy array format
if type(inference_data_df) is np.ndarray:
if columns_names != None:
# checking if the length of feature names is equal to the feature column numbers
if inference_data_df.shape[1] != len(columns_names):
raise ValueError("Number of columns in the inference feature is not equal to the number of column names provided")
else:
inference_data_df = pd.DataFrame(data=inference_data_df, columns=columns_names)
else:
inference_data_df = pd.DataFrame(data=inference_data_df)
# Checking and converting to DataFrame if the features are in list format
elif isinstance(inference_data_df, list):
if columns_names != None:
# checking if the length of feature names are equal to the feature column numbers
if len(inference_data_df[0]) != len(columns_names):
raise ValueError("Number of columns in the inference feature is not equal to the number of column names provided")
else:
inference_data_df = pd.DataFrame(data=inference_data_df, columns=columns_names)
else:
inference_data_df = pd.DataFrame(data=inference_data_df)
# checking and converting to DataFrame if the target is in numpy array format
if type(inference_score_df) is np.ndarray:
inference_score_df = pd.DataFrame(data=inference_score_df)
# Checking and converting to DataFrame if the target is in list format
elif isinstance(inference_score_df, list):
inference_score_df = pd.DataFrame(data=inference_score_df)
# checking if the observation/subject numbers are equal in features and in targets
if inference_data_df.shape[0] != inference_score_df.shape[0]:
raise ValueError("Number of rows in the inference feature is not equal to the number of rows in inference score")
if fixed_features is not None:
if isinstance(fixed_features, list):
for feature in fixed_features:
if feature not in feature_df.columns:
raise ValueError(f'You provided a fixed feature "{feature}," which does not exist in the Feature matrix.')
specialist_features = feature_df[fixed_features]
feature_df = feature_df.drop(fixed_features, axis=1)
elif isinstance(fixed_features, pd.DataFrame):
if feature_df.shape[0] != fixed_features.shape[0]:
raise ValueError("Number of rows in the Feature is not equal to the number of rows in the fixed features.")
specialist_features = copy.deepcopy(fixed_features)
elif type(fixed_features) is np.ndarray:
specialist_features = pd.DataFrame(data=fixed_features)
if feature_df.shape[0] != specialist_features.shape[0]:
raise ValueError("Number of rows in the Feature is not equal to the number of rows in the fixed features.")
else:
raise ValueError("Fixed features must be provided either as a List of feature names, or numpy array, or as a DataFrame containing features.")
else:
specialist_features = []
# Assigning a task type if None
if task_type == None:
task_type = 'regression'
if task_type == 'regression' and probability == True:
raise ValueError("Probability values cannot be generated for regression tasks.")
# Checking preconditions for balancing imbalanced data
if balance == True and task_type == 'regression':
raise ValueError("Data cannot be made balanced for regression task.")
elif balance == True and score_df.nunique()[0] > 2:
raise ValueError("Balancing data only works for binary classification problem. You have more than 2 classes.")
# Assigning a model if None, or, assigning models based on task type
if task_type == 'regression':
if model_name == None or model_name == 'linearSVR':
model = SVR(kernel = 'linear', C=1.0, epsilon=0.2) # Default
elif model_name == 'gaussianSVR':
model = SVR(kernel = 'rbf', C=1.0, gamma='scale')
elif model_name == 'RegressionForest':
model = RandomForestRegressor(n_estimators = 100, random_state=42, max_depth=5)
elif model_name == 'AdaBoostDT':
model = AdaBoostRegressor(estimator=None, n_estimators=50, learning_rate=1.0, random_state=42)
elif model_name == 'AdaBoostSVR':
model = AdaBoostRegressor(estimator=SVR(), n_estimators=50, learning_rate=1.0, random_state=42)
elif model_name == 'consensus':
model = {
"Regression Forest" : RandomForestRegressor(n_estimators = 100, random_state=42, max_depth=5),
"Gaussian SVR" : SVR(kernel = 'rbf', C=1.0, gamma='scale'),
"Linear SVR" : SVR(kernel = 'linear', C=1.0, epsilon=0.2)
}
else:
raise ValueError("Unknown model name. You might have misspelled the name or chose a model that does classification by mistake.")
elif task_type == 'classification':
if model_name == None or model_name == 'linearSVC':
model = SVC(kernel = 'linear', C=1.0) # Default
if probability == True:
model_infer = SVC(kernel = 'linear', C=1.0, probability=True)
elif model_name == 'gaussianSVC':
model = SVC(kernel = 'rbf', C=1.0, gamma='scale')
if probability == True:
model_infer = SVC(kernel = 'rbf', C=1.0, gamma='scale', probability=True)
elif model_name == 'RandomForest':
model = RandomForestClassifier(n_estimators=100, random_state=42, max_depth=5)
elif model_name == 'AdaBoostDT':
model = AdaBoostClassifier(estimator=None, n_estimators=50, learning_rate=1.0, random_state=42)
elif model_name == 'AdaBoostSVC':
model = AdaBoostClassifier(estimator=SVC(), algorithm='SAMME', n_estimators=50, learning_rate=1.0, random_state=42)
elif model_name == 'consensus':
model = {
"Random Forest" : RandomForestClassifier(n_estimators = 100, random_state=42, max_depth=5),
"Gaussian SVC" : SVC(kernel = 'rbf', C=1.0, gamma='scale'),
"Linear SVC" : SVC(kernel = 'linear', C=1.0)
}
if probability == True:
model_infer = {
"Random Forest" : RandomForestClassifier(n_estimators = 100, random_state=42, max_depth=5),
"Gaussian SVC" : SVC(kernel = 'rbf', C=1.0, gamma='scale', probability=True),
"Linear SVC" : SVC(kernel = 'linear', C=1.0, probability=True)
}
else:
raise ValueError("Unknown model name. You might have misspelled the name or chose a model that does regression by mistake.")
else:
raise ValueError("Unknown task. Please select either 'regression' or 'classification.'")
if task_type == 'regression' and metric == None:
metric = 'MAE' # Default
elif task_type == 'classification' and metric == None:
metric = 'Accuracy' # Default
if task_type == 'regression':
if metric == 'Accuracy' or metric == 'binaryROC' or metric == 'F1-score':
raise ValueError(f'{metric} is not supported for regression task. Choose from MAP or MAPE')
elif task_type == 'classification':
if metric == 'MAE' or metric == 'MAPE':
raise ValueError(f'{metric} is not supported for classification task. Choose from Accuracy, binaryROC, or F1-score.')
if voting_strictness == None:
voting_strictness = 'weighted' # Default
if nFold == None:
nFold = 5 # Default
if maxIter == None:
maxIter = 3 # Default
elif maxIter < 1:
raise ValueError("maxIter must be a positive integer and greater than zero.")
if voting_strictness == 'strict':
vote = round(0.6 * nFold)
elif voting_strictness == 'loose':
vote = round(0.4 * nFold)
elif voting_strictness == 'conditional':
vote = 101
elif voting_strictness == 'weighted':
vote = 102
elif voting_strictness == 'union':
vote = 103
else:
raise ValueError("Unknown voting strictness. Must be either 'strict' or 'loose.'")
if tolerance == None:
tolerance = 0.05 # Default
elif tolerance == 1:
raise ValueError("tolerance cannot be 1.")
if maxFeatures == None:
maxFeatures = round(0.25*feature_df.shape[1]) # Default
elif maxFeatures > 1:
raise ValueError("maxFeatures cannot be greater than 1, i.e., the number of features available.")
else:
maxFeatures = round(maxFeatures*feature_df.shape[1])
if save_intermediate and output_dir is None:
raise ValueError("Directory for saving intermediate results is not provided.")
flag_union = 0
shuffle_flag = True
if shuffle_flag == False:
random_seed = None
else:
random_seed = 99 #default
# training a model
selectedFeatures = train(maxIter, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, task_type, balance, model_name, model, metric, tolerance, maxFeatures, save_intermediate, output_dir, verbose)
print(f"\n============================== Inference =====================================\n")
# inference
if probability == True:
model = model_infer
if vote == round(0.6 * nFold) or vote == round(0.4 * nFold):
X = [item for sublist in selectedFeatures for item in sublist]
selectedFeatures = Counter(X)
if verbose:
print(f"\nVoting strictness is selected: {voting_strictness}\n")
final_features = [element for element, count in selectedFeatures.items() if count >= vote]
subjectList, actual_score, predicted_score, validationPerformance = inference(final_features, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + final_features
elif vote == 102: #weighted #default
# making sure that features from strict voting are always included
X = [item for sublist in selectedFeatures for item in sublist]
Feature_counts = Counter(X)
Feature_counts_sorted = sorted(Feature_counts.items(), key=lambda x: x[1], reverse=True) #Keeping the order of features (descending) based on their frequency
strict_voting_features = [item[0] for item in Feature_counts_sorted if item[1] >=3]
print(f"Features selected atleast 3 times that will be included : {strict_voting_features}")
max_length = max(len(sublist) for sublist in selectedFeatures) # max_length = Km
dict_list = []
for sublist in selectedFeatures:
length = len(sublist)
dict_list.append({sublist[i]: max_length - i for i in range(length)})
if verbose:
print(f"Features with assigned ranks in each {nFold}-folds: {dict_list}\n")
final_dict = {}
for d in dict_list:
for key, value in d.items():
if key in final_dict:
final_dict[key] += value
else:
final_dict[key] = value
if verbose:
print(f"Final feature set over {nFold}-folds with weighted ranks: {final_dict}\n")
threshold = max_length #Km
final_dict_sorted = sorted(final_dict.items(), key=lambda x: x[1], reverse=True)
weighted_features_with_rank = [item for item in final_dict_sorted if item[1] >= threshold]
print(f"Features selected after thresholding) the weighted features, threshold = {threshold} : {weighted_features_with_rank}")
weighted_features = [item[0] for item in final_dict_sorted if item[1] >= threshold]
for _ in strict_voting_features:
if _ not in weighted_features:
weighted_features.append(_)
final_features = copy.deepcopy(weighted_features)
if verbose:
print(f"Features selected that satisfied the threshold of {threshold} including features selected at least 3 times: {final_features}\n")
subjectList, actual_score, predicted_score, validationPerformance = inference(final_features, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + final_features
elif vote == 103: #union
X = [item for sublist in selectedFeatures for item in sublist]
Features_counts = Counter(X)
selectedFeatures = sorted(Features_counts.items(), key=lambda x: x[1], reverse=True) #Keeping the order of features based on their frequency
final_features = [item[0] for item in selectedFeatures]
if verbose:
print(f"Following features appeared in the union: {final_features}\n")
subjectList, actual_score, predicted_score, validationPerformance = inference(final_features, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + final_features
elif vote == 101:
X = [item for sublist in selectedFeatures for item in sublist]
selectedFeatures = Counter(X)
if verbose:
print(f"\nVoting strictness is selected: {voting_strictness}")
total_sum = sum(selectedFeatures.values())
mean_value = total_sum / nFold
floored_mean = math.floor(mean_value)
if verbose:
print(f"Average number of features over 5-folds: {floored_mean}")
# for strict choice
final_features = [element for element, count in selectedFeatures.items() if count >= round(0.6 * nFold)]
if len(final_features) >= (2/3)*floored_mean:
if verbose:
print(f"Number of features (w/o specialist features) after strict voting: {len(final_features)}, which is greater or equal 2/3 of the average number of features (i.e., {floored_mean}) over {nFold}-folds. So keeping 'strict' features.\n")
subjectList, actual_score, predicted_score, validationPerformance = inference(final_features, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + final_features
else:
final_features = [element for element, count in selectedFeatures.items() if count >= round(0.4 * nFold)]
# union
if len(final_features) <= 2 and floored_mean > 4:
if verbose:
print(f"Number of features (w/o specialist features) after loose voting: {len(final_features)}, which is less or equal 2, while the average number of features (i.e., {floored_mean}) over {nFold}-folds is greater than 4. So keeping 'union' of features.\n")
union_list = list(selectedFeatures.keys())
subjectList, actual_score, predicted_score, validationPerformance = inference(union_list, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + union_list
else:
final_features = union_list
else:
# for loose choice
if verbose:
print(f"Number of features (w/o specialist features) after loose voting: {len(final_features)}, which is greater than 2. So keeping 'loose' features.\n")
subjectList, actual_score, predicted_score, validationPerformance = inference(final_features, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + final_features
# inference on additional data
if inference_data_df is not None and inference_score_df is not None:
X = [item for sublist in selectedFeatures for item in sublist]
selectedFeatures = Counter(X)
if vote == round(0.6 * nFold) or vote == round(0.4 * nFold):
final_features = [element for element, count in selectedFeatures.items() if count >= vote]
subjectList_add, actual_score_add, predicted_score_add, validationPerformance_add = inference_additional(final_features, feature_df, score_df, specialist_features, inference_data_df, inference_score_df, model_name, model, metric, task_type, probability) # Added task_type
if len(specialist_features) != 0:
final_features = list(specialist_features.columns) + final_features
elif vote == 101: #conditional
# for strict choice
final_features = [element for element, count in selectedFeatures.items() if count >= round(0.6 * nFold)]
if len(final_features) >= (2/3)*floored_mean:
subjectList_add, actual_score_add, predicted_score_add, validationPerformance_add = inference_additional(final_features, feature_df, score_df, specialist_features, inference_data_df, inference_score_df, model_name, model, metric, task_type, probability) # Added task_type
else:
final_features = [element for element, count in selectedFeatures.items() if count >= round(0.4 * nFold)]
# union
if len(final_features) <= 2 and floored_mean > 4:
union_list = list(selectedFeatures.keys())
subjectList_add, actual_score_add, predicted_score_add, validationPerformance_add = inference_additional(final_features, feature_df, score_df, specialist_features, inference_data_df, inference_score_df, model_name, model, metric, task_type, probability) # Added task_type
else:
# for loose choice
subjectList_add, actual_score_add, predicted_score_add, validationPerformance_add = inference_additional(final_features, feature_df, score_df, specialist_features, inference_data_df, inference_score_df, model_name, model, metric, task_type, probability) # Added task_type
actual_score = actual_score + [actual_score_add]
predicted_score = predicted_score + [predicted_score_add]
validationPerformance = validationPerformance + [validationPerformance_add]
return final_features, subjectList, actual_score, predicted_score, validationPerformance
def train(maxIter, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, task_type, balance, model_name, model, metric, tolerance, maxFeatures, save_intermediate, output_dir, verbose=False):
max_iter = maxIter
kf5 = KFold(n_splits = nFold, shuffle=shuffle_flag, random_state=random_seed)
frequency_of_features_selected_all_fold = []
oF = 0
if verbose:
print(f"Total Number of Features to Traverse in each fold: {len(feature_df.columns)}")
print(f"Tolerance is selected: {tolerance}")
print(f"Maximum number of Features allowed under tolerance, if not best: {maxFeatures}\n")
for outer_fold in kf5.split(feature_df):
print("\n=================================================================================\n")
flag_FI = 0
flag_BE = 0
train_val_df = feature_df.iloc[outer_fold[0]]
train_val_score_df = score_df.iloc[outer_fold[0]]
if len(specialist_features) != 0:
train_specialist = specialist_features.iloc[outer_fold[0]]
oF += 1
selected_features = []
best_features = []
if task_type == 'regression':
lowest_error = float('inf')
error_to_fill = lowest_error #newadd
else:
highest_accuracy = float('-inf')
error_to_fill = highest_accuracy #newadd
for i in range(max_iter):
noFeat_tolerance = 0
exit_flag = 0
for q in range(len(feature_df.columns)):
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}]", flush=True)
temp_error = []
for feature in feature_df.columns:
if feature in selected_features:
temp_error.append(error_to_fill)
continue
# Add the new feature to the selected features
temp_features = selected_features + [feature]
inner_error = []
for inner_fold in kf5.split(train_val_df):
training = train_val_df.iloc[inner_fold[0]]
validation = train_val_df.iloc[inner_fold[1]]
if len(specialist_features) != 0:
train_spec = train_specialist.iloc[inner_fold[0]]
valid_spec = train_specialist.iloc[inner_fold[1]]
# training
df_score = train_val_score_df.iloc[inner_fold[0]]
df_feature = training[temp_features]
# validation
df_val_score = train_val_score_df.iloc[inner_fold[1]]
df_val_feature = validation[temp_features]
if balance == True:
if len(specialist_features) != 0:
df_feature, df_score = balanceData(pd.concat([train_spec, df_feature], axis=1), df_score)
df_val_feature, df_val_score = balanceData(pd.concat([valid_spec, df_val_feature], axis=1), df_val_score)
else:
df_feature, df_score = balanceData(df_feature, df_score)
df_val_feature, df_val_score = balanceData(df_val_feature, df_val_score)
else:
if len(specialist_features) != 0:
df_feature = pd.concat([train_spec, df_feature], axis=1)
df_val_feature = pd.concat([valid_spec, df_val_feature], axis=1)
if model_name == 'consensus':
for one_model in model:
model_ = model[one_model]
# Train a Regressor with the selected features and predict
model_.fit(df_feature, df_score.values.ravel())
y_pred = model_.predict(df_val_feature)
# Calculate the mean absolute error for the validation set
inner_error.append(loss_estimation(metric, df_val_score, y_pred))
else:
# Train a Regressor with the selected features and predict
model.fit(df_feature, df_score.values.ravel())
y_pred = model.predict(df_val_feature)
# Calculate the mean absolute error for the validation set
inner_error.append(loss_estimation(metric, df_val_score, y_pred))
temp_error.append(np.mean(inner_error)) #newadd
if verbose:
if task_type == 'regression':
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] -> Feature Added: {feature} | Error Found: {np.mean(inner_error)}", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] -> Feature Added: {feature} | Accuracy Found: {np.mean(inner_error)}", flush=True)
if task_type == 'regression':
if np.min(temp_error) <= lowest_error*(1+tolerance):
selected_features.append(feature_df.columns[np.argmin(temp_error)])
if np.min(temp_error) > lowest_error:
noFeat_tolerance += 1
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] -- {noFeat_tolerance} feature(s) allowed under tolerance, which is not better than the last best.")
if noFeat_tolerance == maxFeatures:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] -- Number of allowed features under tolerance is met. Exiting from further FI | Best Features remain: {best_features}")
exit_flag = 1
break
if np.min(temp_error) < lowest_error:
lowest_error = np.min(temp_error)
best_features = copy.deepcopy(selected_features)
noFeat_tolerance = 0
if verbose:
if len(specialist_features) != 0:
all_feat = list(specialist_features.columns) + selected_features
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] - Traversal over all features finished | Best Error ({metric}): {lowest_error:.4f} | Current Error ({metric}): {np.min(temp_error):.4f} | Selected Features: {all_feat}", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] - Traversal over all features finished | Best Error ({metric}): {lowest_error:.4f} | Current Error ({metric}): {np.min(temp_error):.4f} | Selected Features: {selected_features}", flush=True)
elif np.min(temp_error) > lowest_error*(1+tolerance) or exit_flag == 1:
if verbose:
if len(specialist_features) != 0:
all_feat = list(specialist_features.columns) + best_features
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] No additional feature improves performance beyond tolerance | Best Features: {all_feat} | Starting BE..", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] No additional feature improves performance beyond tolerance | Best Features: {best_features} | Starting BE..", flush=True)
flag_FI = 1
break
else:
if np.max(temp_error) >= highest_accuracy*(1-tolerance) and exit_flag == 0:
selected_features.append(feature_df.columns[np.argmax(temp_error)])
if np.max(temp_error) < highest_accuracy:
noFeat_tolerance += 1
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] -- {noFeat_tolerance} feature(s) allowed under tolerance, which is not better than the last best.")
if noFeat_tolerance == maxFeatures:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] -- Number of allowed features under tolerance is met. Exiting from further FI | Best Features remain: {best_features}")
exit_flag = 1
break
if np.max(temp_error) > highest_accuracy:
highest_accuracy = np.max(temp_error)
best_features = copy.deepcopy(selected_features)
noFeat_tolerance = 0
if verbose:
if len(specialist_features) != 0:
all_feat = list(specialist_features.columns) + selected_features
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] - Traversal over all features finished | Best Prediction ({metric}): {highest_accuracy:.4f} | Currenct Prediction ({metric}): {np.max(temp_error):.4f} | Selected Features: {all_feat}", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] - Traversal over all features finished | Best Prediction ({metric}): {highest_accuracy:.4f} | Currenct Prediction ({metric}): {np.max(temp_error):.4f} | Selected Features: {selected_features}", flush=True)
elif np.max(temp_error) < highest_accuracy*(1-tolerance) or exit_flag == 1:
if verbose:
if len(specialist_features) != 0:
all_feat = list(specialist_features.columns) + best_features
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] No additional feature improves performance beyond tolerance | Best Features: {all_feat} | Starting BE..", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | FI | Traversal: {q+1}] No additional feature improves performance beyond tolerance | Best Features: {best_features} | Starting BE..", flush=True)
flag_FI = 1
break
selected_features = copy.deepcopy(best_features)
if len(selected_features) == 1:
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | -- | Traversal: -] Since there is only one feature selected in FI, skipping BE and next iterations.", flush=True)
break
# Backward Elimination
#selected_features_ = copy.deepcopy(selected_features)
for q in range(len(selected_features)):
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] NOTE: You may see less number of features traversed over in BE, missing ones were under tolerance but not the best.", flush=True)
temp_error = []
for feature in selected_features: #changed
# remove a feature from the selected features
temp_features = copy.deepcopy(selected_features)
if len(temp_features) == 1:
continue
temp_features.remove(feature)
inner_error = []
for inner_fold in kf5.split(train_val_df):
training = train_val_df.iloc[inner_fold[0]]
validation = train_val_df.iloc[inner_fold[1]]
if len(specialist_features) != 0:
train_spec = train_specialist.iloc[inner_fold[0]]
valid_spec = train_specialist.iloc[inner_fold[1]]
# training
df_score = train_val_score_df.iloc[inner_fold[0]]
df_feature = training[temp_features]
# validation
df_val_score = train_val_score_df.iloc[inner_fold[1]]
df_val_feature = validation[temp_features]
if balance == True:
if len(specialist_features) != 0:
df_feature, df_score = balanceData(pd.concat([train_spec, df_feature], axis=1), df_score)
df_val_feature, df_val_score = balanceData(pd.concat([valid_spec, df_val_feature], axis=1), df_val_score)
else:
df_feature, df_score = balanceData(df_feature, df_score)
df_val_feature, df_val_score = balanceData(df_val_feature, df_val_score)
else:
if len(specialist_features) != 0:
df_feature = pd.concat([train_spec, df_feature], axis=1)
df_val_feature = pd.concat([valid_spec, df_val_feature], axis=1)
if model_name == 'consensus':
for one_model in model:
model_ = model[one_model]
# Train a Regressor with the selected features and predict
model_.fit(df_feature, df_score.values.ravel())
y_pred = model_.predict(df_val_feature)
# Calculate the mean absolute error for the validation set
inner_error.append(loss_estimation(metric, df_val_score, y_pred))
else:
# Train a Regressor with the selected features and predict
model.fit(df_feature, df_score.values.ravel())
y_pred = model.predict(df_val_feature)
# Calculate the mean absolute error for the validation set
inner_error.append(loss_estimation(metric, df_val_score, y_pred))
temp_error.append(np.mean(inner_error)) #newadd
if verbose:
if task_type == 'regression':
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] <- Feature Removed: {feature} | Error Found: {np.mean(inner_error)}", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] <- Feature Removed: {feature} | Accuracy Found: {np.mean(inner_error)}", flush=True)
if task_type == 'regression':
if np.min(temp_error) < lowest_error:
lowest_error = np.min(temp_error)
selected_features.remove(selected_features[np.argmin(temp_error)]) #changed
if verbose:
if len(specialist_features) != 0:
all_feat = list(specialist_features.columns) + selected_features
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] - Traversal over all features finished | {metric}: {lowest_error:.4f} | Selected Features: {all_feat}", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] - Traversal over all features finished | {metric}: {lowest_error:.4f} | Selected Features: {selected_features}", flush=True)
else:
flag_BE = 1
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] No removal of additional feature improves performance | Selected Features: {selected_features}", flush=True)
break
else:
if np.max(temp_error) > highest_accuracy:
highest_accuracy = np.max(temp_error)
selected_features.remove(selected_features[np.argmax(temp_error)]) #changed
if verbose:
if len(specialist_features) != 0:
all_feat = list(specialist_features.columns) + selected_features
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] - Traversal over all features finished | {metric}: {highest_accuracy:.4f} | Selected Features: {all_feat}", flush=True)
else:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] - Traversal over all features finished | {metric}: {highest_accuracy:.4f} | Selected Features: {selected_features}", flush=True)
else:
flag_BE = 1
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] No removal of additional feature improves performance | Selected Features: {selected_features}", flush=True)
break
if flag_FI and flag_BE:
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | -- | Traversal: -] Since no addition or removal of any features improves performance, skipping next iterations.", flush=True)
break
if i == 0:
f_set_1 = copy.deepcopy(selected_features)
else:
f_set = copy.deepcopy(selected_features)
if sorted(f_set_1) == sorted(f_set):
if verbose:
print(f"[Fold: {oF} | Iter: {i+1} | BE | Traversal: {q+1}] Selected features in this iteration did not change from the previous iteration. So quiting further iterations.", flush=True)
break
else:
f_set_1 = f_set
# saving intermediate results: features selected in each outer fold
if save_intermediate:
if output_dir is not None:
# Check if the directory exists or can be created
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except OSError as e:
raise RuntimeError(f"Error creating directory '{output_dir}': {e}")
# Check if the user has write permission to the directory
if os.access(output_dir, os.W_OK):
# Directory exists and user has write permission
with open(os.path.join(output_dir, f"Selected_features_at_fold_{oF}.txt"), "w") as f:
f.write(f"{selected_features}")
else:
raise PermissionError(f"You do not have write permission to directory '{output_dir}'")
# saving features selected across all outer folds
if oF == 1:
frequency_of_features_selected_all_fold = [selected_features]
else:
frequency_of_features_selected_all_fold = frequency_of_features_selected_all_fold + [selected_features]
#print(frequency_of_features_selected_all_fold)
#feature_counts = Counter(frequency_of_features_selected_all_fold)
#return feature_counts
return frequency_of_features_selected_all_fold
def inference(final_features, nFold, feature_df, score_df, shuffle_flag, random_seed, specialist_features, balance, model_name, model, metric, task_type, probability):
kf5 = KFold(n_splits = nFold, shuffle=shuffle_flag, random_state=random_seed)
valPerformanceByFold = []
actual_score = []
predicted_score = []
subjects = []
predicted_probs = []
# Generating subject IDs (sub1, sub2, etc.)
subject_ids = ['sub' + str(i+1) for i in range(len(feature_df))]
for infer_fold in kf5.split(feature_df):
train_val_df = feature_df.iloc[infer_fold[0]]
test_df = feature_df.iloc[infer_fold[1]]
# Getting the subject IDs for the test set
test_subjects = [subject_ids[i] for i in infer_fold[1]]
if len(specialist_features) != 0:
train_specialist = specialist_features.iloc[infer_fold[0]]
test_specialist = specialist_features.iloc[infer_fold[1]]
# Training
df_s = score_df.iloc[infer_fold[0]]
df_f = train_val_df[final_features]
# Validation
df_val_s = score_df.iloc[infer_fold[1]]
df_val_f = test_df[final_features]
if len(specialist_features) != 0:
df_f = pd.concat([train_specialist, df_f], axis=1)
df_val_f = pd.concat([test_specialist, df_val_f], axis=1)
if model_name == 'consensus':
predictions = []
probabilities = []
for one_model in model:
model_ = model[one_model]
# Fit data to a model with the selected features and predict
model_.fit(df_f, df_s.values.ravel())
y_pred = model_.predict(df_val_f)
predictions.append(y_pred)
if probability == True:
y_pred_proba = model_.predict_proba(df_val_f)
prob_class_1 = y_pred_proba[:, 1] # probability of class 1
probabilities.append(prob_class_1)
if probability == True:
# Averaging the probabilities from the 3 models
consensus_prob = [(a+b+c)/3 for a,b,c in zip(probabilities[0], probabilities[1], probabilities[2])]
predicted_probs.append(consensus_prob)
if task_type == 'regression':
consensus_pred = [(a+b+c)/3 for a,b,c in zip(predictions[0], predictions[1], predictions[2])]
elif task_type == 'classification':
def majority_vote(a,b,c):
return 1 if a+b+c>1 else 0
consensus_pred = [majority_vote(a,b,c) for a,b,c in zip(predictions[0], predictions[1], predictions[2])]
# Calculating the performance for the validation set
performance = loss_estimation(metric, df_val_s, consensus_pred)
# Saving the actual, predicted scores, and subject IDs
actual_score.append(df_val_s.values.ravel().tolist())
predicted_score.append(consensus_pred)
subjects.append(test_subjects)
valPerformanceByFold.append(performance)
else:
# Fitting data to a model with the selected features and predict
model.fit(df_f, df_s.values.ravel())
y_pred = model.predict(df_val_f)
if probability == True and task_type == 'classification':
y_pred_proba = model.predict_proba(df_val_f)
prob_class_1 = y_pred_proba[:, 1] # probability of class 1
predicted_probs.append(prob_class_1)
# Calculating the performance for the validation set
valPerformanceByFold.append(loss_estimation(metric, df_val_s, y_pred))
actual_score.append(df_val_s.values.ravel().tolist())
predicted_score.append(y_pred.tolist())
subjects.append(test_subjects)
# Flattening the lists of actual scores, predicted scores, and subject IDs
actual = [item for sublist in actual_score for item in sublist]
predicted = [round(item, 2) for sublist in predicted_score for item in sublist]
subjects = [item for sublist in subjects for item in sublist]
if probability == True:
predicted_probs = [round(item, 2) for sublist in predicted_probs for item in sublist]
return subjects, actual, [predicted]+[predicted_probs], valPerformanceByFold
else:
return subjects, actual, predicted, valPerformanceByFold
def inference_additional(final_features, feature_df, score_df, specialist_features, inference_data_df, inference_score_df, model_name, model, metric, task_type, probability):
valPerformanceByFold = []
actual_score = []
predicted_score = []
subjects = []
predicted_probs = []
# Preparing the validation features
if len(specialist_features) != 0:
df_val_f = pd.concat([specialist_features, feature_df[final_features]], axis=1)
else:
df_val_f = feature_df[final_features]
specialist_features = pd.DataFrame(specialist_features)
specialist_feature_names = list(specialist_features.columns)
all_column_names = specialist_feature_names + final_features
inference_data_df2 = inference_data_df[all_column_names].copy()
# Extracting subject IDs (e.g., sub1, sub2, sub3,...)
inference_subjects = ["sub" + str(i + 1) for i in range(len(inference_data_df))]
if model_name == 'consensus':
predictions = []
for one_model in model:
model_ = model[one_model]
# Fit data to a model with the selected features and predict
model_.fit(df_val_f, score_df.values.ravel())
y_pred = model_.predict(inference_data_df2)
predictions.append(y_pred)
if probability == True:
y_pred_proba = model_.predict_proba(inference_data_df2)
prob_class_1 = y_pred_proba[:, 1] # probability of class 1
probabilities.append(prob_class_1)
if probability == True:
# Averaging the probabilities from the 3 models
consensus_prob = [(a+b+c)/3 for a,b,c in zip(probabilities[0], probabilities[1], probabilities[2])]
predicted_probs.append(consensus_prob)
if task_type == 'regression':
consensus_pred = [(a+b+c)/3 for a,b,c in zip(predictions[0],predictions[1],predictions[2])]
elif task_type == 'classification':
def majority_vote(a,b,c):
return 1 if a+b+c>1 else 0
consensus_pred = [majority_vote(a,b,c) for a,b,c in zip(predictions[0],predictions[1],predictions[2])]
# Calculate the performance for the validation set
performance = loss_estimation(metric, df_val_s, consensus_pred)
# Save the actual and prediction scores
actual_score.append(df_val_s.values.ravel().tolist())
predicted_score.append(consensus_pred)
subjects.append(inference_subjects)
valPerformanceByFold.append(performance)
else:
# Fit data to a model with the selected features and predict
model.fit(df_val_f, score_df.values.ravel())
y_pred = model.predict(inference_data_df2)
if probability == True and task_type == 'classification':
y_pred_proba = model.predict_proba(inference_data_df2)
prob_class_1 = y_pred_proba[:, 1] # probability of class 1
predicted_probs.append(prob_class_1)
# Calculate the mean absolute error for the validation set
valPerformanceByFold.append(loss_estimation(metric, inference_score_df, y_pred))
actual_score.append(inference_score_df.values.ravel().tolist())
predicted_score.append(y_pred.tolist())
subjects.append(inference_subjects)
# Flatten the lists of actual scores, predicted scores, and subject IDs
actual = [item for sublist in actual_score for item in sublist]
predicted = [round(item, 2) for sublist in predicted_score for item in sublist]
subjects = [item for sublist in subjects for item in sublist]
if probability == True:
predicted_probs = [round(item, 2) for sublist in predicted_probs for item in sublist]
return subjects, actual, [predicted]+[predicted_probs], valPerformanceByFold
else:
return subjects, actual, predicted, valPerformanceByFold
def loss_estimation(metric, true_values, predicted_values):
true_values = np.array(true_values)
predicted_values = np.array(predicted_values)
if metric == 'MAE':
mae = mean_absolute_error(true_values, predicted_values)
return mae
elif metric == 'MAPE':
true_values_no_zero = np.where(true_values == 0, 1e-10, true_values)
mape = np.mean(np.abs((true_values - predicted_values) / true_values_no_zero)) * 100
return mape
elif metric == 'Accuracy':
accuracy = accuracy_score(true_values, predicted_values)
return accuracy
elif metric == 'binaryROC':
cm = confusion_matrix(true_values, predicted_values)
tn, fp, fn, tp = cm.ravel()
epsilon = 1e-7
sensitivity = round(tp / (tp + fn + epsilon), 2)
specificity = round(tn / (tn + fp + epsilon), 2)
binaryROC = ((1-sensitivity) ** 2) + ((1-specificity) ** 2)
return binaryROC
elif metric == 'F1-score':