-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path1_AG_Training.py
More file actions
73 lines (59 loc) · 2.68 KB
/
1_AG_Training.py
File metadata and controls
73 lines (59 loc) · 2.68 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# -*- coding: utf-8 -*-
# +
"""
Autogluon training script.
Usage: python3 1_AG_Training.py
"""
from autogluon.tabular import TabularPredictor
import uproot
import pandas as pd
import argparse
import utilities as util
from termcolor import colored
def argparser():
"""
Parse options as command-line arguments.
"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-p', "--presets",
action="store",
type=str,
default='medium_quality',
required=False,
choices=['best_quality', 'high_quality','good_quality','medium_quality'],
help="Training presets")
parser.add_argument('-t', "--time_limit",
action="store",
type=int,
default=3600,
required=False,
help="Time limit for training")
return parser
if __name__ == "__main__":
args = argparser().parse_args()
# define training variables
training_variables = util.CS_variables
# load data
print(colored('Loading data and initializing configrations', 'blue'))
MC_4Soffres = uproot.concatenate(['Samples/MC16rd/e_channel/4Soffres_deimos_1/*.root:B0'],
library="np",
filter_branch=lambda branch: branch.name in training_variables)
data_4Soffres = uproot.concatenate(['Samples/Data/e_channel/proc16_4Soffres_deimos_1.root:B0'],
library="np",
filter_branch=lambda branch: branch.name in training_variables)
df_mc_4Soffres = pd.DataFrame(MC_4Soffres)
df_data_4Soffres = pd.DataFrame(data_4Soffres)
# define binary label
df_data_4Soffres['data'] = 1
df_mc_4Soffres['data'] = 0
df_all = pd.concat([df_data_4Soffres,df_mc_4Soffres],ignore_index=True)
# # train test split
# print(colored('Splitting training test samples', 'green'))
# train, test = train_test_split(df_all, test_size=0.2, random_state=0, shuffle=True, stratify=df_all['data'])
# Define and fit the AutoGluon classifier
ag = TabularPredictor(label='data', eval_metric='f1_macro',sample_weight='balance_weight')
predictor = ag.fit(df_all, presets=args.presets, time_limit=args.time_limit,save_bag_folds=True,
infer_limit=0.05, infer_limit_batch_size=10000,)
# hyperparameters={"GBM": ['GBMLarge']},
excluded_model_types=['CAT',]) #'FASTAI','RF','XT','KNN','XGB'