-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutil.py
More file actions
60 lines (51 loc) · 1.67 KB
/
util.py
File metadata and controls
60 lines (51 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from functools import wraps
import time
import torch
import random
import string
import numpy as np
import itertools
import os
ROOTDIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
HOMEDIR = os.path.abspath(os.path.expanduser("~"))
TMP = '/tmp/sopt'
def chunkify(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def flatten(*args):
ret = []
for arg in args:
if isinstance(arg,list):
ret.extend(flatten(*arg))
else:
ret.append(arg)
return ret
def timeit(func):
@wraps(func)
def timeit_wrapper(*args, **kwargs):
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
total_time = end_time - start_time
print(f'Function {func.__name__}{args} {kwargs} Took {total_time:.4f} seconds')
return result
return timeit_wrapper
nvmlInit_called = False
def report_cuda_size():
global nvmlInit_called
if torch.cuda.is_available():
from pynvml import nvmlInit, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo
if not nvmlInit_called:
nvmlInit()
nvmlInit_called = True
h = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(h)
print(f'cuda total : {info.total // 1024 // 1024}MB')
print(f'cuda free : {info.free // 1024 // 1024}MB')
print(f'cuda used : {info.used // 1024 // 1024}MB')
def report_model_size(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(f"num params {params // 1024 // 1024}M {params // 1024}K ")
def randstring(n):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))