2424
2525def get_system_info ():
2626 """ Return summary info for system running benchmark
27-
27+
2828 Returns
2929 -------
3030 str
@@ -42,6 +42,8 @@ def get_system_info():
4242
4343 op_sys = platform .platform ()
4444 host_name = platform .node ()
45+
46+ # A string giving the absolute path of the executable binary for the Python interpreter, on systems where this makes sense.
4547 if platform .system () == 'Windows' :
4648 host_python = sys .executable .split (os .path .sep )[- 2 ]
4749 else :
@@ -58,15 +60,15 @@ def get_system_info():
5860 dev_type = "GPU"
5961 else :
6062 from cpuinfo import get_cpu_info
61- dev = [get_cpu_info ()['brand' ]]
63+ dev = get_cpu_info () # [get_cpu_info()['brand']]
6264 dev_type = "CPU"
6365
6466 return host_name , op_sys , host_python , (dev_type , dev )
6567
6668
6769def run_benchmark (model_path , video_path , resize = None , pixels = None , n_frames = 10000 , print_rate = False ):
6870 """ Benchmark on inference times for a given DLC model and video
69-
71+
7072 Parameters
7173 ----------
7274 model_path : str
@@ -81,7 +83,7 @@ def run_benchmark(model_path, video_path, resize=None, pixels=None, n_frames=100
8183 number of frames to run inference on, by default 10000
8284 print_rate : bool, optional
8385 flat to print inference rate frame by frame, by default False
84-
86+
8587 Returns
8688 -------
8789 :class:`numpy.ndarray`
@@ -123,7 +125,7 @@ def run_benchmark(model_path, video_path, resize=None, pixels=None, n_frames=100
123125 start_pose = time .time ()
124126 live .get_pose (frame )
125127 inf_times [i ] = time .time () - start_pose
126-
128+
127129 if print_rate :
128130 print ("pose rate = {:d}" .format (int (1 / inf_times [i ])))
129131
@@ -139,9 +141,9 @@ def run_benchmark(model_path, video_path, resize=None, pixels=None, n_frames=100
139141 return inf_times , resize * im_size [0 ] * resize * im_size [1 ], TFGPUinference
140142
141143
142- def save_benchmark (sys_info , inf_times , pixels , TFGPUinference , model = None , out_dir = None ):
144+ def save_benchmark (sys_info , inf_times , pixels , iter , TFGPUinference , model = None , out_dir = None ):
143145 """ Save benchmarking data with system information to a pickle file
144-
146+
145147 Parameters
146148 ----------
147149 sys_info : tuple
@@ -150,13 +152,16 @@ def save_benchmark(sys_info, inf_times, pixels, TFGPUinference, model=None, out_
150152 array of inference times generated by :func:`run_benchmark`
151153 pixels : float or :class:`numpy.ndarray`
152154 number of pixels for each benchmark run. If an array, each index corresponds to a row in inf_times
155+ iter: integer
156+ number of the specific instance of experiment (so every part is saved individually)
153157 TFGPUinference: bool
154158 flag if using tensorflow inference or numpy inference DLC model
155159 model: str, optional
156160 name of model
157161 out_dir : str, optional
158162 path to directory to save data. If None, uses pwd, by default None
159-
163+
164+
160165 Returns
161166 -------
162167 bool
@@ -166,6 +171,7 @@ def save_benchmark(sys_info, inf_times, pixels, TFGPUinference, model=None, out_
166171 out_dir = out_dir if out_dir is not None else os .getcwd ()
167172 host_name , op_sys , host_python , dev = sys_info
168173 host_name = host_name .replace (" " , "" )
174+
169175 model_type = None
170176 if model is not None :
171177 if 'resnet' in model :
@@ -176,10 +182,10 @@ def save_benchmark(sys_info, inf_times, pixels, TFGPUinference, model=None, out_
176182 model_type = None
177183
178184 fn_ind = 0
179- base_name = "benchmark_{}_{}_{}.pickle" .format (host_name , dev [0 ], fn_ind )
185+ base_name = "benchmark_{}_{}_{}_{} .pickle" .format (host_name , dev [0 ], fn_ind , iter )
180186 while os .path .isfile (os .path .normpath (out_dir + '/' + base_name )):
181187 fn_ind += 1
182- base_name = "benchmark_{}_{}_{}.pickle" .format (host_name , dev [0 ], fn_ind )
188+ base_name = "benchmark_{}_{}_{}_{} .pickle" .format (host_name , dev [0 ], fn_ind , iter )
183189
184190 fn = os .path .normpath (out_dir )
185191
@@ -198,10 +204,14 @@ def save_benchmark(sys_info, inf_times, pixels, TFGPUinference, model=None, out_
198204
199205 return True
200206
207+ def read_pickle (filename ):
208+ """ Read the pickle file """
209+ with open (filename , "rb" ) as handle :
210+ return pickle .load (handle )
201211
202212def benchmark_model_by_size (model_path , video_path , output = None , n_frames = 10000 , resize = None , pixels = None , print_rate = False ):
203213 """Benchmark DLC model by image size
204-
214+
205215 Parameters
206216 ----------
207217 model_path : str
@@ -232,24 +242,26 @@ def benchmark_model_by_size(model_path, video_path, output=None, n_frames=10000,
232242
233243 ### initialize full inference times
234244
235- inf_times = np .zeros ((len (resize ), n_frames ))
236- pixels_out = np .zeros (len (resize ))
237-
245+ # inf_times = np.zeros((len(resize), n_frames))
246+ # pixels_out = np.zeros(len(resize))
247+ print ( resize )
238248 for i in range (len (resize )):
239249
240250 print ("\n Run {:d} / {:d}\n " .format (i + 1 , len (resize )))
241251
242- inf_times [ i ] , pixels_out [ i ] , TFGPUinference = run_benchmark (model_path ,
252+ inf_times , pixels_out , TFGPUinference = run_benchmark (model_path ,
243253 video_path ,
244254 resize = resize [i ],
245255 pixels = pixels [i ],
246256 n_frames = n_frames ,
247257 print_rate = print_rate )
248258
249- ### save results
259+ #TODO: check if a part has already been complted?
250260
251- sys_info = get_system_info ()
252- save_benchmark (sys_info , inf_times , pixels_out , TFGPUinference , model = os .path .basename (model_path ), out_dir = output )
261+ ### saving results intermediately
262+ sys_info = get_system_info ()
263+ #print("Your system info:", sys_info)
264+ save_benchmark (sys_info , inf_times , pixels_out , i , TFGPUinference , model = os .path .basename (model_path ), out_dir = output )
253265
254266
255267def main ():
@@ -274,4 +286,4 @@ def main():
274286
275287
276288if __name__ == "__main__" :
277- main ()
289+ main ()
0 commit comments