Skip to content

Commit cd381e3

Browse files
MMathisLabgkane26sneakers-the-rat
authored
Gk dev (#11)
* dlclive: add tf_config as parameter * graph: add tfconfig, new full load graph function * bench + display: add display to benchmarking, fix bugs in display * fix bugs in display * add kwargs to processors * clean up load graph, color conversions * dlclive: add kwargs to get_pose and init_inference * add kalman filter processor * kalmanfilter processor: update delta time * dlclive: destroy display on close * new analyze tools: speed bench, display, analyze pose and labeled video * dlclive and graph: clean up/benign bug fix * fix colormaps for display * update readme: add benchmark info * setup: add pandas and tables, needed for analyze_videos * Update kalmanfilter.py * change display_lik to pcutoff * resolve setup and readme conflicts * Update README.md * fixed resize in python, check cmd * Update README.md * analyze: allow pixels/resize to be scalar or list * fix TF warnings * changed name to benchmark_videos * Update README.md * dlclive: tfgpu flip x and y * name update * additional metadata about benchmarking tests - video path - video codec - video pixel format - video fps - video total frames - resize - original frame size - resized frame size - pixels - dlclive_params * added DLCLive.parameterization property to give a dict of the object's parameterization * decode_fourcc to convert float versions of fourcc/codec codes to text (also works for other float opencv properties like pixel format) also fixed placement of setting default resize in run_benchmark when pixels is not set * bugfix: out_dir instead of output in if __name__ == "__main__" part of bench.py * graceful failures when CAP_PROPS cant be gotten * missing colon :( * complete convert analyze to benchmarking * don't require frame to init_inference unless tflite * don't require frame to init_inference unless tflite * don't require frame to init_inference unless tflite * incorrect signature * getting from sys_info * kalmanfilter: add likelihood threshold * teensylaser: fix bug * dlclive: formatting, pass frame to init inference optional * utils: add decode_fourcc * benchmark: more info, remove need for pandas * Update run.py * Update version.py let's bump to first release on pypi as 0.0.1 * Update reinstall.sh * Update setup.py * added example processors, fixed headers * Update install_jetson.md * Update README.md Co-authored-by: gkane <[email protected]> Co-authored-by: sneakers-the-rat <[email protected]>
1 parent 5407e63 commit cd381e3

File tree

24 files changed

+1287
-389
lines changed

24 files changed

+1287
-389
lines changed

benchmarking/run.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
1+
"""
2+
DeepLabCut Toolbox (deeplabcut.org)
3+
© A. & M. Mathis Labs
4+
5+
Licensed under GNU Lesser General Public License v3.0
6+
"""
7+
18
# Example script for running benchmark tests in Kane et al, 2020.
29

310
import os
@@ -6,7 +13,8 @@
613
from dlclive import benchmark_model_by_size
714

815
# Update the datafolder to where the data is:
9-
datafolder=PUTFOLDER_TO_data!
16+
datafolder='/your/path/to/data/here'
17+
1018
n_frames = 1000 #change to 10000 for testing on a GPU!
1119
pixels = [2500, 10000, 40000, 160000, 320000, 640000]
1220

dlclive/bench.py

Lines changed: 102 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from dlclive import VERSION
3232
from dlclive import __file__ as dlcfile
3333

34+
from dlclive.utils import decode_fourcc
3435

3536
def get_system_info() -> dict:
3637
""" Return summary info for system running benchmark
@@ -100,7 +101,10 @@ def get_system_info() -> dict:
100101
'dlclive_version': VERSION
101102
}
102103

103-
def run_benchmark(model_path, video_path, tf_config=None, resize=None, pixels=None, n_frames=10000, print_rate=False, display=False, pcutoff=0.0, display_radius=3) -> typing.Tuple[np.ndarray, int, bool]:
104+
def run_benchmark(model_path, video_path, tf_config=None,
105+
resize=None, pixels=None, n_frames=10000,
106+
print_rate=False, display=False, pcutoff=0.0,
107+
display_radius=3) -> typing.Tuple[np.ndarray, int, bool, dict]:
104108
""" Benchmark on inference times for a given DLC model and video
105109
106110
Parameters
@@ -136,6 +140,8 @@ def run_benchmark(model_path, video_path, tf_config=None, resize=None, pixels=No
136140

137141
if pixels is not None:
138142
resize = np.sqrt(pixels / (im_size[0] * im_size[1]))
143+
else:
144+
resize = resize if resize is not None else 1
139145

140146
### initialize live object
141147

@@ -168,11 +174,49 @@ def run_benchmark(model_path, video_path, tf_config=None, resize=None, pixels=No
168174

169175
### close video and tensorflow session
170176

177+
# gather video and test parameterization
178+
179+
# dont want to fail here so gracefully failing on exception --
180+
# eg. some packages of cv2 don't have CAP_PROP_CODEC_PIXEL_FORMAT
181+
try:
182+
fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC))
183+
except:
184+
fourcc = ""
185+
186+
try:
187+
fps = round(cap.get(cv2.CAP_PROP_FPS))
188+
except:
189+
fps = None
190+
191+
try:
192+
pix_fmt = decode_fourcc(cap.get(cv2.CAP_PROP_CODEC_PIXEL_FORMAT))
193+
except:
194+
pix_fmt = ""
195+
196+
try:
197+
frame_count = round(cap.get(cv2.CAP_PROP_FRAME_COUNT))
198+
except:
199+
frame_count = None
200+
201+
202+
203+
meta = {
204+
'video_path': video_path,
205+
'video_codec': fourcc,
206+
'video_pixel_format': pix_fmt,
207+
'video_fps': fps,
208+
'video_total_frames': frame_count,
209+
'resize': resize,
210+
'original_frame_size': im_size,
211+
'resized_frame_size': (im_size[0]*resize, im_size[1]*resize),
212+
'pixels': pixels,
213+
'dlclive_params': live.parameterization
214+
}
215+
171216
cap.release()
172217
live.close()
173218

174-
resize = resize if resize is not None else 1
175-
return inf_times, resize*im_size[0] * resize*im_size[1], TFGPUinference
219+
return inf_times, resize*im_size[0] * resize*im_size[1], TFGPUinference, meta
176220

177221
def get_savebenchmarkfn(sys_info ,i, fn_ind, out_dir=None):
178222
''' get filename to save data (definitions see save_benchmark)'''
@@ -181,7 +225,14 @@ def get_savebenchmarkfn(sys_info ,i, fn_ind, out_dir=None):
181225
datafilename = out_dir + '/' + base_name
182226
return datafilename
183227

184-
def save_benchmark(sys_info, inf_times, pixels, i, fn_ind, TFGPUinference, model=None, out_dir=None,datafilename=None):
228+
def save_benchmark(sys_info: dict,
229+
inf_times: np.ndarray,
230+
pixels: typing.Union[np.ndarray, float],
231+
iter: int,
232+
TFGPUinference: bool = None,
233+
model: str = None,
234+
out_dir: str = None,
235+
meta: dict=None):
185236
""" Save benchmarking data with system information to a pickle file
186237
187238
Parameters
@@ -200,17 +251,16 @@ def save_benchmark(sys_info, inf_times, pixels, i, fn_ind, TFGPUinference, model
200251
name of model
201252
out_dir : str, optional
202253
path to directory to save data. If None, uses pwd, by default None
203-
254+
meta: dict, optional
255+
metadata returned form run_benchmark
204256
205257
Returns
206258
-------
207259
bool
208260
flag indicating successful save
209261
"""
210262

211-
if datafilename is None:
212-
#out_dir = out_dir if out_dir is not None else os.getcwd()
213-
datafilename=get_savebenchmarkfn(sys_info ,iter, fn_ind, out_dir=out_dir)
263+
out_dir = out_dir if out_dir is not None else os.getcwd()
214264

215265
model_type = None
216266
if model is not None:
@@ -221,14 +271,32 @@ def save_benchmark(sys_info, inf_times, pixels, i, fn_ind, TFGPUinference, model
221271
else:
222272
model_type = None
223273

224-
data = {'model' : model,
225-
'model_type' : model_type,
226-
'TFGPUinference' : TFGPUinference,
227-
'pixels' : pixels,
228-
'inference_times' : inf_times}
274+
fn_ind = 0
275+
base_name = "benchmark_{}_{}_{}_{}.pickle".format(sys_info['host_name'],
276+
sys_info['device'][0],
277+
fn_ind,
278+
iter)
279+
while os.path.isfile(os.path.normpath(out_dir + '/' + base_name)):
280+
fn_ind += 1
281+
base_name = "benchmark_{}_{}_{}_{}.pickle".format(sys_info['host_name'],
282+
sys_info['device'][0],
283+
fn_ind,
284+
iter)
285+
286+
data = {'model': model,
287+
'model_type': model_type,
288+
'TFGPUinference': TFGPUinference,
289+
'pixels': pixels,
290+
'inference_times': inf_times}
291+
292+
data.update(sys_info)
293+
294+
if meta:
295+
data.update(meta)
229296

230297
data.update(sys_info)
231298

299+
datafilename = os.path.normpath(f"{out_dir}/{base_name}")
232300
pickle.dump(data, open(os.path.normpath(datafilename), 'wb'))
233301

234302
return True
@@ -283,24 +351,27 @@ def benchmark_model_by_size(model_path, video_path, output=None, n_frames=10000,
283351

284352
for i in range(len(resize)):
285353

286-
sys_info = get_system_info()
287-
datafilename=get_savebenchmarkfn(sys_info ,i, fn_ind, out_dir=out_dir)
288-
289-
#Check if a subset was already been completed?
290-
if os.path.isfile(os.path.normpath(datafilename)):
291-
print("\nAlready ran {:d} / {:d}\n".format(i+1, len(resize)))
292-
else:
293-
print("\nRun {:d} / {:d}\n".format(i+1, len(resize)))
294-
inf_times, pixels_out, TFGPUinference = run_benchmark(model_path,
295-
video_path,
296-
resize=resize[i],
297-
pixels=pixels[i],
298-
n_frames=n_frames,
299-
print_rate=print_rate)
300-
301-
### saving results intermediately
302-
save_benchmark(sys_info, inf_times, pixels_out, i, fn_ind, TFGPUinference, model=os.path.basename(model_path), datafilename=datafilename)
303-
354+
print("\nRun {:d} / {:d}\n".format(i+1, len(resize)))
355+
356+
inf_times, pixels_out, TFGPUinference, benchmark_meta = run_benchmark(
357+
model_path,
358+
video_path,
359+
tf_config=tf_config,
360+
resize=resize[i],
361+
pixels=pixels[i],
362+
n_frames=n_frames,
363+
print_rate=print_rate,
364+
display=display,
365+
pcutoff=pcutoff,
366+
display_radius=display_radius)
367+
368+
#TODO: check if a part has already been complted?
369+
370+
### saving results intermediately
371+
save_benchmark(sys_info, inf_times, pixels_out, i, TFGPUinference,
372+
model=os.path.basename(model_path),
373+
out_dir = output,
374+
meta=benchmark_meta)
304375

305376
def main():
306377

0 commit comments

Comments
 (0)