forked from ssi-research/DGH
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
99 lines (84 loc) · 2.72 KB
/
utils.py
File metadata and controls
99 lines (84 loc) · 2.72 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
import random
import numpy as np
import torch
from contextlib import contextmanager, ContextDecorator
import time
def display_run_time(start_time: float, end_time: float) -> None:
"""
Displays the total runtime in HH:MM:SS format.
"""
total_run_time_in_sec = int(end_time - start_time)
hours = int(total_run_time_in_sec / 3600)
minutes = int(total_run_time_in_sec / 60 - hours * 60)
seconds = int(total_run_time_in_sec - hours * 3600 - minutes * 60)
if hours == 0:
hours_disp = '00'
elif hours < 10:
hours_disp = '0' + str(hours)
else:
hours_disp = str(hours)
if minutes == 0:
minutes_disp = '00'
elif minutes < 10:
minutes_disp = '0' + str(minutes)
else:
minutes_disp = str(minutes)
if seconds == 0:
seconds_disp = '00'
elif seconds < 10:
seconds_disp = '0' + str(seconds)
else:
seconds_disp = str(seconds)
hours_disp = '0' + hours_disp if len(hours_disp) < 2 else hours_disp
print(f'Total run time in HH:MM:SS is: {hours_disp}:{minutes_disp}:{seconds_disp}')
def set_seed(seed: int = 0) -> None:
"""
Sets the seed for various random number generators to ensure reproducibility.
"""
print("Setting initial seed... ")
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def to_numpy(tensor: torch.Tensor) -> np.ndarray:
"""
Converts a PyTorch tensor to a NumPy array, handling quantized tensors and CUDA tensors.
"""
if isinstance(tensor, np.ndarray):
return tensor
if tensor is None:
return tensor
if tensor.is_quantized:
tensor = tensor.dequantize()
# if tensor is allocated on GPU, first copy to CPU
# then detach from the current graph and convert to numpy array
if hasattr(tensor, 'is_cuda'):
if tensor.is_cuda:
return tensor.cpu().detach().numpy()
# if tensor is on CPU only
if hasattr(tensor, 'detach'):
return tensor.detach().numpy()
if hasattr(tensor, 'numpy'):
return tensor.numpy()
return np.array(tensor)
def set_model(model: torch.nn.Module, train_mode: bool = False):
"""
Set model to work in train/eval mode and GPU mode if GPU is available
Args:
model: Pytorch model
train_mode: Whether train mode or eval mode
Returns:
"""
if train_mode:
model.train()
else:
model.eval()
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()