-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathtrain.py
More file actions
165 lines (140 loc) · 6.35 KB
/
train.py
File metadata and controls
165 lines (140 loc) · 6.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import csv
import dgl
import os
import sys
import torch
import argparse
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader
from dataset import DesignDataset
from model import MetricPredictor
from netlist import read_netlist
from log import get_logger
# setup device
device = torch.device("cuda:{}".format(0) if torch.cuda.is_available() else "cpu")
def collate(samples):
IDs, Gs, Ms = [], [], []
for s in samples:
IDs.append(s['id'])
Gs.append(s['design'])
Ms.append(s['metrics'].view(-1, 1))
return IDs, dgl.batch(Gs).to(device), torch.cat(Ms, dim=0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("output_dir", type=str, \
help="Output directory of the model")
parser.add_argument("-model", type=str, required=False, default=None, \
help="Path of model. If passed, training is skipped.")
parser.add_argument("-epochs", type=int, required=False, default=100, \
help="Number of epochs")
parser.add_argument("-lr", type=float, required=False, default=1e-4, \
help="Learning rate")
parser.add_argument("-batch_size", type=int, required=False, default=32, \
help="Batch size")
parser.add_argument("-gcn_hidden_dim", type=int, required=False, default=256, \
help="Number of hidden units of GCN")
parser.add_argument("-netlist_embedding_size", type=int, required=False, default=128, \
help="Embedding size of the netlist")
parser.add_argument("-predictor_hidden_dim", type=int, required=False, default=192, \
help="Number of hidden units of the last-layer predictor")
args = parser.parse_args()
logging.getLogger('matplotlib.font_manager').disabled = True
logger = get_logger()
learning_rate = args.lr
epochs = args.epochs
batch_size = args.batch_size
# setup playground
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
prefix = [args.epochs, args.lr, args.gcn_hidden_dim, args.netlist_embedding_size, \
args.predictor_hidden_dim]
prefix = '-'.join(list(map(str, prefix)))
# load designs
dgl_graphs = 'data/dgl'
dgl_files = [f for f in os.listdir(dgl_graphs) if os.path.isfile(os.path.join(dgl_graphs, f)) and f.endswith(".dgl")]
Gs = {}
for dgl_file in dgl_files:
design = '.'.join(dgl_file.split('.')[:-1])
g = read_netlist('', os.path.join(dgl_graphs, design))
design = '.'.join(dgl_file.split('.')[:-2])
Gs[design] = g
# load dataset
train_dataset = DesignDataset(Gs, 'data/train.csv')
test_dataset = DesignDataset(Gs, 'data/test.csv')
# define model
model = MetricPredictor(2, args.gcn_hidden_dim, args.netlist_embedding_size, \
args.predictor_hidden_dim).to(device)
loss_func = nn.MSELoss()
_min = 1.0
_max = 12012.0
if not args.model:
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# train
model.train()
epoch_losses = []
for epoch in range(epochs):
epoch_loss = 0
dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=batch_size, collate_fn=collate)
for i_batch, (_, G, M) in enumerate(dataloader):
_, prediction = model(G)
loss = loss_func(prediction.to(device), M.to(device))
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
if i_batch % 10 == 0:
logger.info('Epoch: {}, iteration: {}, MSE: {:.4f}'.format(epoch, i_batch, epoch_loss / (i_batch+1)))
i_batch += 1
epoch_loss /= i_batch
mse = epoch_loss
rmse = mse ** 0.5
scaled_rmse = (rmse * (_max - _min)) + _min
logger.info('Epoch {}, MSE {:.4f}'.format(epoch, mse))
logger.info('Epoch {}, RMSE {:.4f}'.format(epoch, rmse))
logger.info('Epoch {}, Scaled RMSE {:.4f}'.format(epoch, scaled_rmse))
epoch_losses.append(epoch_loss)
with open(os.path.join(args.output_dir, prefix + '-training-losses.log'), 'w') as f:
f.write('\n'.join(list(map(str, epoch_losses))))
torch.save(model.state_dict(), os.path.join(args.output_dir, prefix + '-model.pth'))
else:
logger.info("Skipping training. Loading an existing model.")
model.load_state_dict(torch.load(args.model))
# test
losses = []
embeddings = []
model.eval()
dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=batch_size, collate_fn=collate)
for i_batch, (ID, G, M) in enumerate(dataloader):
print(i_batch)
graph_embeeding, prediction = model(G)
loss = loss_func(prediction.to(device), M.to(device))
losses.append(loss.detach().item())
embeddings.append((ID, graph_embeeding.tolist(), prediction.tolist(), M.tolist()))
with open(os.path.join(args.output_dir, prefix + '.log'), 'w') as f:
for arg in vars(args):
f.write(str(arg) + ': ' + str(getattr(args, arg)) + '\n')
f.write('\n\n')
mse = torch.mean(torch.tensor(losses))
rmse = mse ** 0.5
scaled_rmse = (rmse * (_max - _min)) + _min
f.write('MSE: {}\n'.format(mse))
f.write('RMSE {:.4f}\n'.format(rmse))
f.write('Scaled RMSE {:.4f}'.format(scaled_rmse))
logger.info('MSE: {}\n'.format(mse))
logger.info('RMSE {:.4f}\n'.format(rmse))
logger.info('Scaled RMSE {:.4f}'.format(scaled_rmse))
with open(os.path.join(args.output_dir, prefix + '-embeddings.csv'), 'w') as f:
for e in embeddings:
ID, graph_embeddings, prediction, actual = e
for i in range(len(ID)):
line = str(ID[i]) + ','
line += ';'.join(list(map(str, graph_embeddings[i]))) + ','
line += str(prediction[i][0]) + ','
line += str(actual[i][0]) + '\n'
f.write(line)