@@ -28,31 +28,41 @@ class DLCLive:
2828 -----------
2929
3030 model_path: Path
31- Full path to exported model file
31+ Full path to exported model (created when `deeplabcut.export_model(...)` was
32+ called). For PyTorch models, this is a single model file. For TensorFlow models,
33+ this is a directory containing the model snapshots.
3234
3335 model_type: string, optional
34- which model to use: 'pytorch' or 'onnx' for exported snapshot
36+ Which model to use. For the PyTorch engine, options are [`pytorch`]. For the
37+ TensorFlow engine, options are [`base`, `tensorrt`, `lite`].
38+
39+ precision: string, optional
40+ Precision of model weights, for model_type "pytorch" and "tensorrt". Options
41+ are, for different model_types:
42+ "pytorch": {"FP32", "FP16"}
43+ "tensorrt": {"FP32", "FP16", "INT8"}
3544
3645 tf_config:
46+ TensorFlow only. Optional ConfigProto for the TensorFlow session.
3747
48+ single_animal: bool, default=True
49+ PyTorch only.
3850
39- precision: string, optional
40- precision of model weights, for model_type='onnx' or 'pytorch'. Can be 'FP32'
41- (default) or 'FP16'
51+ device: str, optional, default=None
52+ PyTorch only.
53+
54+ top_down_config: dict, optional, default=None
55+
56+ top_down_dynamic: dict, optional, default=None
4257
4358 cropping: list of int
44- cropping parameters in pixel number: [x1, x2, y1, y2] #A: Maybe this is the
45- dynamic cropping of each frame to speed of processing, so instead of analyzing
46- the whole frame, it analyzes only the part of the frame where the animal is
47-
48- dynamic: triple containing (state, detectiontreshold, margin) #A: margin adds some
49- space so the 'bbox' isn't too narrow around the animal'. First key points are
50- predicted, then dynamic cropping is performed to 'single out' the animal, and
51- then pose is estimated, we think.
59+ Cropping parameters in pixel number: [x1, x2, y1, y2]
60+
61+ dynamic: triple containing (state, detectiontreshold, margin)
5262 If the state is true, then dynamic cropping will be performed. That means that
5363 if an object is detected (i.e. any body part > detectiontreshold), then object
5464 boundaries are computed according to the smallest/largest x position and
55- smallest/largest y position of all body parts. This window is expanded by the
65+ smallest/largest y position of all body parts. This window is expanded by the
5666 margin and from then on only the posture within this crop is analyzed (until the
5767 object is lost, i.e. <detectiontreshold). The current position is utilized for
5868 updating the crop window for the next frame (this is why the margin is important
@@ -63,8 +73,7 @@ class DLCLive:
6373 For example, resize=0.5 will downsize both the height and width of the image by
6474 a factor of 2.
6575
66- processor: dlc pose processor object, optional #A: this is possibly the 'predictor'
67- - or is it what enables use on jetson boards?
76+ processor: dlc pose processor object, optional
6877 User-defined processor object. Must contain two methods: process and save.
6978 The 'process' method takes in a pose, performs some processing, and returns
7079 processed pose.
@@ -80,12 +89,19 @@ class DLCLive:
8089 boolean flag to convert frames from BGR to RGB color scheme
8190
8291 display: bool, optional
83- Display frames with DeepLabCut labels?
92+ Open a display to show predicted pose in frames with DeepLabCut labels.
8493 This is useful for testing model accuracy and cropping parameters, but it is
8594 very slow.
8695
96+ pcutoff: float, default=0.5
97+ Only used when display=True. The score threshold for displaying a bodypart in
98+ the display.
99+
100+ display_radius: int, default=3
101+ Only used when display=True. Radius for keypoint display in pixels, default=3
102+
87103 display_cmap: str, optional
88- String indicating the Matplotlib colormap to use.
104+ Only used when display=True. String indicating the Matplotlib colormap to use.
89105 """
90106
91107 PARAMETERS = (
@@ -103,33 +119,36 @@ def __init__(
103119 self ,
104120 model_path : str | Path ,
105121 model_type : str = "base" ,
106- # tf_config: Any = None,
107122 precision : str = "FP32" ,
108- # single_animal: bool = True,
109- # device: str | None = None,
123+ tf_config : Any = None ,
124+ single_animal : bool = True ,
125+ device : str | None = None ,
126+ top_down_config : dict | None = None ,
127+ top_down_dynamic : dict | None = None ,
110128 cropping : list [int ] | None = None ,
111129 dynamic : tuple [bool , float , float ] = (False , 0.5 , 10 ),
112130 resize : float | None = None ,
113131 convert2rgb : bool = True ,
114132 processor : Processor | None = None ,
115133 display : bool | Display = False ,
116134 pcutoff : float = 0.5 ,
117- # bbox_cutoff: float = 0.6,
118- # max_detections: int = 1,
119135 display_radius : int = 3 ,
120136 display_cmap : str = "bmy" ,
121- ** kwargs ,
122137 ):
123138 self .path = Path (model_path )
124139 self .runner : BaseRunner = factory .build_runner (
125140 model_type ,
126141 model_path ,
127- ** kwargs ,
142+ precision = precision ,
143+ tf_config = tf_config ,
144+ single_animal = single_animal ,
145+ device = device ,
146+ dynamic = top_down_dynamic ,
147+ top_down_config = top_down_config ,
128148 )
129149 self .is_initialized = False
130150
131151 self .model_type = model_type
132- self .precision = precision
133152 self .cropping = cropping
134153 self .dynamic = dynamic
135154 self .dynamic_cropping = None
0 commit comments