Skip to content

Commit 8d209b4

Browse files
authored
Merge pull request #4 from DeepLabCut/jls_benchmarking
a bit more info in sys_info
2 parents 0496889 + 70f2c10 commit 8d209b4

File tree

1 file changed

+74
-34
lines changed

1 file changed

+74
-34
lines changed

dlclive/bench.py

Lines changed: 74 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -12,43 +12,66 @@
1212
import sys
1313
import argparse
1414
import pickle
15-
15+
import subprocess
16+
import typing
17+
import warnings
18+
19+
try:
20+
from pip._internal.operations import freeze
21+
except ImportError:
22+
from pip.operations import freeze
1623
import cpuinfo
1724
from tqdm import tqdm
1825
import numpy as np
1926
import tensorflow as tf
2027
import cv2
2128

2229
from dlclive import DLCLive
30+
from dlclive import VERSION
31+
from dlclive import __file__ as dlcfile
2332

2433

25-
def get_system_info():
34+
def get_system_info() -> dict:
2635
""" Return summary info for system running benchmark
2736
2837
Returns
2938
-------
30-
str
31-
name of machine
32-
str
33-
operating system
34-
str
35-
path to python (which conda/virtual environment)
36-
str
37-
device name
39+
dict
40+
Dictionary containing the following system information:
41+
42+
* ``host_name`` (str): name of machine
43+
* ``op_sys`` (str): operating system
44+
* ``python`` (str): path to python (which conda/virtual environment)
45+
* ``device`` (tuple): (device type (``'GPU'`` or ``'CPU'```), device information)
46+
* ``freeze`` (list): list of installed packages and versions
47+
* ``python_version`` (str): python version
48+
* ``git_hash`` (str, None): If installed from git repository, hash of HEAD commit
49+
* ``dlclive_version`` (str): dlclive version from :data:`dlclive.VERSION`
3850
"""
3951

4052

4153
### get os
4254

4355
op_sys = platform.platform()
44-
host_name = platform.node()
56+
host_name = platform.node().replace(' ', '')
4557

4658
# A string giving the absolute path of the executable binary for the Python interpreter, on systems where this makes sense.
4759
if platform.system() == 'Windows':
4860
host_python = sys.executable.split(os.path.sep)[-2]
4961
else:
5062
host_python = sys.executable.split(os.path.sep)[-3]
5163

64+
# try to get git hash if possible
65+
dlc_basedir = os.path.dirname(os.path.dirname(dlcfile))
66+
git_hash = None
67+
try:
68+
git_hash = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=dlc_basedir)
69+
git_hash = git_hash.decode('utf-8').rstrip('\n')
70+
except subprocess.CalledProcessError:
71+
# not installed from git repo, eg. pypi
72+
# fine, pass quietly
73+
pass
74+
5275
### get device info (GPU or CPU)
5376

5477
dev = None
@@ -63,10 +86,21 @@ def get_system_info():
6386
dev = get_cpu_info() #[get_cpu_info()['brand']]
6487
dev_type = "CPU"
6588

66-
return host_name, op_sys, host_python, (dev_type, dev)
67-
68-
69-
def run_benchmark(model_path, video_path, resize=None, pixels=None, n_frames=10000, print_rate=False):
89+
# return a dictionary rather than a tuple for inspectability's sake
90+
return {
91+
'host_name': host_name,
92+
'op_sys' : op_sys,
93+
'python': host_python,
94+
'device_type': dev_type,
95+
'device': dev,
96+
'freeze': list(freeze.freeze()), # pip freeze to get versions of all packages
97+
'python_version': sys.version,
98+
'git_hash': git_hash,
99+
'dlclive_version': VERSION
100+
}
101+
102+
103+
def run_benchmark(model_path, video_path, resize=None, pixels=None, n_frames=10000, print_rate=False) -> typing.Tuple[np.ndarray, int, bool]:
70104
""" Benchmark on inference times for a given DLC model and video
71105
72106
Parameters
@@ -141,18 +175,24 @@ def run_benchmark(model_path, video_path, resize=None, pixels=None, n_frames=100
141175
return inf_times, resize*im_size[0] * resize*im_size[1], TFGPUinference
142176

143177

144-
def save_benchmark(sys_info, inf_times, pixels, iter , TFGPUinference, model=None, out_dir=None):
178+
def save_benchmark(sys_info: dict,
179+
inf_times: np.ndarray,
180+
pixels: typing.Union[np.ndarray, float],
181+
iter: int,
182+
TFGPUinference: bool,
183+
model: str = None,
184+
out_dir: str = None):
145185
""" Save benchmarking data with system information to a pickle file
146186
147187
Parameters
148188
----------
149-
sys_info : tuple
189+
sys_info : dict
150190
system information generated by :func:`get_system_info`
151191
inf_times : :class:`numpy.ndarray`
152192
array of inference times generated by :func:`run_benchmark`
153193
pixels : float or :class:`numpy.ndarray`
154194
number of pixels for each benchmark run. If an array, each index corresponds to a row in inf_times
155-
iter: integer
195+
iter: int
156196
number of the specific instance of experiment (so every part is saved individually)
157197
TFGPUinference: bool
158198
flag if using tensorflow inference or numpy inference DLC model
@@ -169,8 +209,8 @@ def save_benchmark(sys_info, inf_times, pixels, iter , TFGPUinference, model=Non
169209
"""
170210

171211
out_dir = out_dir if out_dir is not None else os.getcwd()
172-
host_name, op_sys, host_python, dev = sys_info
173-
host_name = host_name.replace(" ", "")
212+
# host_name, op_sys, host_python, dev = sys_info
213+
174214

175215
model_type = None
176216
if model is not None:
@@ -182,23 +222,19 @@ def save_benchmark(sys_info, inf_times, pixels, iter , TFGPUinference, model=Non
182222
model_type = None
183223

184224
fn_ind = 0
185-
base_name = "benchmark_{}_{}_{}_{}.pickle".format(host_name, dev[0], fn_ind,iter)
225+
base_name = "benchmark_{}_{}_{}_{}.pickle".format(sys_info['host_name'], sys_info['device_type'], fn_ind, iter)
186226
while os.path.isfile(os.path.normpath(out_dir + '/' + base_name)):
187227
fn_ind += 1
188-
base_name = "benchmark_{}_{}_{}_{}.pickle".format(host_name, dev[0], fn_ind,iter)
228+
base_name = "benchmark_{}_{}_{}_{}.pickle".format(sys_info['host_name'], sys_info['device_type'], fn_ind,iter)
189229

190230
fn = os.path.normpath(out_dir)
191231

192-
data = {'host_name' : host_name,
193-
'op_sys' : op_sys,
194-
'python' : host_python,
195-
'device_type' : dev[0],
196-
'device' : dev[1],
197-
'model' : model,
232+
data = {'model' : model,
198233
'model_type' : model_type,
199234
'TFGPUinference' : TFGPUinference,
200235
'pixels' : pixels,
201236
'inference_times' : inf_times}
237+
data.update(sys_info)
202238

203239
pickle.dump(data, open(os.path.normpath(out_dir + '/' + base_name), 'wb'))
204240

@@ -250,21 +286,25 @@ def benchmark_model_by_size(model_path, video_path, output=None, n_frames=10000,
250286
#inf_times = np.zeros((len(resize), n_frames))
251287
#pixels_out = np.zeros(len(resize))
252288
print(resize)
289+
290+
# get system info once, shouldn't change between runs
291+
sys_info = get_system_info()
292+
253293
for i in range(len(resize)):
254294

255295
print("\nRun {:d} / {:d}\n".format(i+1, len(resize)))
256296

257297
inf_times, pixels_out, TFGPUinference = run_benchmark(model_path,
258-
video_path,
259-
resize=resize[i],
260-
pixels=pixels[i],
261-
n_frames=n_frames,
262-
print_rate=print_rate)
298+
video_path,
299+
resize=resize[i],
300+
pixels=pixels[i],
301+
n_frames=n_frames,
302+
print_rate=print_rate)
263303

264304
#TODO: check if a part has already been complted?
265305

266306
### saving results intermediately
267-
sys_info = get_system_info()
307+
268308
#print("Your system info:", sys_info)
269309
save_benchmark(sys_info, inf_times, pixels_out, i, TFGPUinference, model=os.path.basename(model_path), out_dir=output)
270310

0 commit comments

Comments
 (0)