-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest_models.py
More file actions
139 lines (132 loc) · 6.14 KB
/
test_models.py
File metadata and controls
139 lines (132 loc) · 6.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import argparse
import numpy as np
import os
import time
import pickle
def parse_args():
parser = argparse.ArgumentParser(description='Train Score Model')
parser.add_argument('--embedding_type', type=str, choices=['onehot'], required=True,
help='Type of embedding to use: onehot')
parser.add_argument('--plastic_type', type=str, default='PET',choices=['PET', 'PE', 'PP','PVC','Nylon'],
help='Type of plastic to use: PET, PE, PP, PVC, Nylon')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size for training and embedding computation')
parser.add_argument('--epochs', type=int, default=500, help='Number of epochs for training')
parser.add_argument('--model_type', type=str, choices=['bilstm', 'lstm', 'transformer','gru','rnn'], default='lstm',
help='Type of model to train: bilstm, lstm, transformer, gru, rnn')
parser.add_argument('--session_name', type=str, default='experiment', help='Session name for training')
parser.add_argument('--device', type=str, default='cuda:0', help='Device to use for training and embedding computation')
parser.add_argument('--hidden_dim', type=int, default=128, help='Hidden dimension for the model')
parser.add_argument('--num_layers', type=int, default=2, help='Number of layers for the model')
parser.add_argument('--num_heads', type=int, default=8, help='Number of heads for the transformer model')
parser.add_argument('--dropout', type=float, default=0.1, help='Dropout rate for the transformer model')
return parser.parse_args()
def main():
args = parse_args()
# Unpack arguments
embedding_type = args.embedding_type
batch_size = args.batch_size
epochs = args.epochs
model_type = args.model_type
session_name = args.session_name
device = args.device
plastic_type = args.plastic_type
hidden_dim = args.hidden_dim
num_layers = args.num_layers
num_heads = args.num_heads
dropout = args.dropout
if plastic_type == 'PET':
print("using PET")
if embedding_type == 'onehot':
Z_train = np.load('onehot_train_val_test/PET/X_train.npy')
y_train = np.load('onehot_train_val_test/PET/y_train.npy')
Z_val = np.load('onehot_train_val_test/PET/X_val.npy')
y_val = np.load('onehot_train_val_test/PET/y_val.npy')
print("Train data loaded:")
print("Z_train shape:", Z_train.shape)
print("y_train shape:", y_train.shape)
print("Validation data loaded:")
print("Z_val shape:", Z_val.shape)
print("y_val shape:", y_val.shape)
else:
print(f"Unsupported embedding type: {embedding_type}")
return
elif plastic_type == 'PE':
print("using PE")
if embedding_type == 'onehot':
Z_train = np.load('onehot_train_val_test/PE/X_train.npy')
y_train = np.load('onehot_train_val_test/PE/y_train.npy')
Z_val = np.load('onehot_train_val_test/PE/X_val.npy')
y_val = np.load('onehot_train_val_test/PE/y_val.npy')
else:
print(f"Unsupported embedding type: {embedding_type}")
return
elif plastic_type == 'PP':
print("using PP")
if embedding_type == 'onehot':
Z_train = np.load('onehot_train_val_test/PP/X_train.npy')
y_train = np.load('onehot_train_val_test/PP/y_train.npy')
Z_val = np.load('onehot_train_val_test/PP/X_val.npy')
y_val = np.load('onehot_train_val_test/PP/y_val.npy')
else:
print(f"Unsupported embedding type: {embedding_type}")
return
elif plastic_type == 'PVC':
print("using PVC")
if embedding_type == 'onehot':
Z_train = np.load('onehot_train_val_test/PVC/X_train.npy')
y_train = np.load('onehot_train_val_test/PVC/y_train.npy')
Z_val = np.load('onehot_train_val_test/PVC/X_val.npy')
y_val = np.load('onehot_train_val_test/PVC/y_val.npy')
else:
print(f"Unsupported embedding type: {embedding_type}")
return
elif plastic_type == 'PMMA':
print("using PMMA")
if embedding_type == 'onehot':
Z_train = np.load('onehot_train_val_test/PMMA/X_train.npy')
y_train = np.load('onehot_train_val_test/PMMA/y_train.npy')
Z_val = np.load('onehot_train_val_test/PMMA/X_val.npy')
y_val = np.load('onehot_train_val_test/PMMA/y_val.npy')
else:
print(f"Unsupported embedding type: {embedding_type}")
return
elif plastic_type == 'Nylon':
print("using Nylon")
if embedding_type == 'onehot':
Z_train = np.load('onehot_train_val_test/Nylon/X_train.npy')
y_train = np.load('onehot_train_val_test/Nylon/y_train.npy')
Z_val = np.load('onehot_train_val_test/Nylon/X_val.npy')
y_val = np.load('onehot_train_val_test/Nylon/y_val.npy')
else:
print(f"Unsupported embedding type: {embedding_type}")
return
else:
print(f"Unsupported plastic type: {plastic_type}")
return
model_types = [model_type]
from PepBD_surrogate import ScoreModel # Ensure this module is available
for m_type in model_types:
print(f"\n{'='*50}")
print(f"Training model type: {m_type.upper()}")
print(f"{'='*50}")
start_time = time.time()
# Initialize ScoreModel
model = ScoreModel(
session_name=session_name,
model_type=m_type,
epochs=epochs,
batch_size=batch_size,
device=device, # Use the specified device
n_samples=Z_train.shape[0],
hidden_dim=hidden_dim,
num_layers=num_layers,
num_heads=num_heads,
dropout=dropout
)
# Train the model
print("Starting training...")
history = model.train(Z_train=Z_train, Y_train=y_train, Z_val=Z_val, Y_val=y_val)
training_time = time.time() - start_time
print(f"Training time for {m_type.upper()}: {training_time:.2f} seconds")
if __name__ == "__main__":
main()