-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_autoencoder.py
More file actions
153 lines (113 loc) · 6.65 KB
/
train_autoencoder.py
File metadata and controls
153 lines (113 loc) · 6.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import os
import socket
import torch
from omegaconf import DictConfig, OmegaConf
import hydra
OmegaConf.register_new_resolver("eval", eval)
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Events, Engine
from ignite.metrics import RunningAverage
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger
from ignite.handlers import Checkpoint, DiskSaver
from src.train_utils import setup_validation_autoencoder, create_train_dataloaders, resume_training, create_model
from src.data import create_skeleton
from src.utils.reproducibility import set_seed
from src.utils.tensorboard import setup_tb_logging_valid, setup_tb_logging_train
from src.core.trainer import AutoEncoderTrainer, setup_scheduler_step
from src.utils.config import flat_hydra_config, config_exp_folder
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
def train(output_log_path: str,
num_epochs: int,
save_frequency: int = 100,
eval_frequency: int = None,
num_iteration_eval: int = 0,
device: str = 'cpu',
seed: int = 52345,
num_iter_perepoch: int = 580,
detect_anomaly: bool = False,
if_resume_training: bool = False,
if_run_validation: bool = False,
**kwargs) -> None:
torch.autograd.set_detect_anomaly(detect_anomaly)
# Init seed
set_seed(seed)
torch.set_default_dtype(torch.float64 if kwargs["dtype"]== "float64" else torch.float32)
print(f"Default datatype: {torch.get_default_dtype()} . Using device: {device} - {torch.cuda.get_device_name(0).replace(' ', '_') if torch.cuda.is_available() else ''}")
skeleton = create_skeleton(**kwargs)
# # Create model
model = create_model(device, skeleton=skeleton, num_nodes=skeleton.num_nodes,
node_types=skeleton.nodes_type_id, **kwargs['autoenc_arch'], **kwargs)
print(f"Created Autoencoder with {str(round(sum(p.numel() for p in model.parameters() if p.requires_grad)/1000000, 3))}M trainable parameters.")
# Load dataset
data_loader_train, data_loader_eval, data_loader_train_eval, random_state_manager = create_train_dataloaders(skeleton=skeleton, if_run_validation=if_run_validation, if_resume_training=if_resume_training, **kwargs)
model_trainer = AutoEncoderTrainer(model, iter_per_epoch=num_iter_perepoch, **kwargs)
def set_epoch_seed(engine: Engine):
set_seed(seed + engine.state.epoch)
random_state_manager.reseed_generator(seed + engine.state.epoch)
def preprocess(engine: Engine):
engine.state.batch = [t.to(device) for t in engine.state.batch[:2]]
# Define Training Engines
trainer = Engine(model_trainer.train_step)
RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'Loss')
trainer.add_event_handler(Events.ITERATION_STARTED, preprocess)
trainer.add_event_handler(Events.EPOCH_STARTED, set_epoch_seed)
# Set up schedulers
setup_scheduler_step(model_trainer, train_engine=trainer)
if model_trainer.curriculum_scheduler is not None:
trainer.add_event_handler(Events.ITERATION_STARTED, model_trainer.curriculum_scheduler)
pbar = ProgressBar()
pbar.attach(trainer, ['Loss'])
# Setup tensorboard logging and progressbar
tb_logger = TensorboardLogger(log_dir=os.path.join(output_log_path, 'tb'))
setup_tb_logging_train(tb_logger, trainer=trainer, optimizer=model_trainer.optimizer,
model=model, train_out_transform=lambda out: {"loss": out[0], "pred horiz": out[-1]})
if if_run_validation:
evaluator, evaluator_train = setup_validation_autoencoder(model, preprocess, model_trainer, skeleton)
pbar.attach(evaluator)
pbar.attach(evaluator_train)
setup_tb_logging_valid(tb_logger, trainer=trainer, evaluator=evaluator, evaluator_train=evaluator_train,
loss_names=["Loss"], metric_names=['ADE', 'FDE', "MPJPE_AVG", "MPJPE"])
# Define handlers for saving checkpoints
objects_to_checkpoint = {'trainer': trainer, 'model': model,"random_states": random_state_manager, **model_trainer.objects_to_checkpoint()}
checkpoint_handler = Checkpoint(objects_to_checkpoint, DiskSaver(os.path.join(output_log_path, "checkpoints"), create_dir=True, require_empty=False), n_saved=20,
global_step_transform= lambda engine, event_name: trainer.state.epoch, score_function=lambda engine: -1* round(engine.state.metrics['MPJPE_AVG'].item(), 7), score_name="val_mpjpe")
static_checkpoint_handler = Checkpoint(objects_to_checkpoint, checkpoint_handler.save_handler, n_saved=None, global_step_transform= lambda engine, event_name: trainer.state.epoch)
if if_resume_training:
checkpoint = torch.load(kwargs['load_path'])
Checkpoint.load_objects(to_load=objects_to_checkpoint, checkpoint=checkpoint)
print("Resuming training from: epoch = ", trainer.state.epoch, '. Iteration = ', trainer.state.iteration)
#Regularly save checkpoints during training
@trainer.on(Events.EPOCH_COMPLETED(every=save_frequency))
def save_latest_model(engine: Engine):
static_checkpoint_handler(trainer)
#Setup evaluation process between epochs
if if_run_validation:
@trainer.on(Events.EPOCH_COMPLETED(every=eval_frequency))
def train_epoch_completed(engine: Engine):
set_seed(0)
random_state_manager.reseed_generator(0)
evaluator.run(data_loader_eval)
evaluator_train.run(data_loader_train_eval, epoch_length=num_iteration_eval)
checkpoint_handler(evaluator)
set_seed(seed + trainer.state.epoch)
random_state_manager.reseed_generator(seed + trainer.state.epoch)
trainer.run(data_loader_train, epoch_length=num_iter_perepoch, max_epochs=num_epochs)
static_checkpoint_handler(trainer) # Save the last model after training
tb_logger.close()
@hydra.main(config_path="./configs/config_train_autoencoder", config_name="config", version_base="1.3")
def main(cfg: DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
# for backwards compatibility with the code
cfg = flat_hydra_config(cfg)
assert cfg['num_workers'] >=1, "Necessary for reproducibility issues"
if cfg['if_resume_training']:
print("Configuration file will be ignored. Considering old configuration file.")
cfg = resume_training(cfg)
else:
# Start from scratch
cfg = config_exp_folder(cfg)
print("Traing data saved at ", cfg['output_log_path'])
print("Training on device: ", f"{socket.gethostname().split('.')[0]}")
train(**cfg)
if __name__ == '__main__':
main()