Skip to content

Commit 44a7937

Browse files
authored
Merge pull request #6 from SchwarzNeuroconLab/main_sleap
Release of SLEAP integration
2 parents 75b4555 + f5426f3 commit 44a7937

File tree

10 files changed

+480
-32
lines changed

10 files changed

+480
-32
lines changed

DeepLabStream.py

Lines changed: 28 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,10 @@
1919

2020
from utils.generic import VideoManager, WebCamManager, GenericManager
2121
from utils.configloader import RESOLUTION, FRAMERATE, OUT_DIR, MODEL_NAME, MULTI_CAM, STACK_FRAMES, \
22-
ANIMALS_NUMBER, STREAMS, STREAMING_SOURCE, MODEL_ORIGIN, CROP, CROP_X, CROP_Y
23-
from utils.plotter import plot_bodyparts, plot_metadata_frame
24-
from utils.poser import load_deeplabcut, load_dpk, load_dlc_live, get_pose, calculate_skeletons,\
25-
find_local_peaks_new, get_ma_pose
26-
22+
ANIMALS_NUMBER, FLATTEN_MA, STREAMS, STREAMING_SOURCE, MODEL_ORIGIN, CROP, CROP_X, CROP_Y
23+
from utils.plotter import plot_bodyparts,plot_metadata_frame
24+
from utils.poser import load_deeplabcut,load_dpk,load_dlc_live,load_sleap, get_pose,calculate_skeletons, \
25+
find_local_peaks_new,get_ma_pose
2726

2827
def create_video_files(directory, devices, resolution, framerate, codec):
2928
"""
@@ -317,6 +316,25 @@ def get_pose_mp(input_q, output_q):
317316
peaks = prediction[0, :, :2]
318317
analysis_time = time.time() - start_time
319318
output_q.put((index,peaks,analysis_time))
319+
320+
elif MODEL_ORIGIN == 'SLEAP':
321+
sleap_model = load_sleap()
322+
while True:
323+
if input_q.full():
324+
index, frame = input_q.get()
325+
start_time = time.time()
326+
input_frame = frame[:, :, ::-1]
327+
#this is weird, but without it, it does not seem to work...
328+
frames = np.array([input_frame])
329+
prediction = sleap_model.predict(frames[[0]], batch_size=1)
330+
#check if this is multiple animal instances or single animal model
331+
if sleap_model.name == 'single_instance_inference_model':
332+
#get predictions (wrap it again, so the behavior is the same for both model types)
333+
peaks = np.array([prediction['peaks'][0, :]])
334+
else:
335+
peaks = prediction['instance_peaks'][0, :]
336+
analysis_time = time.time() - start_time
337+
output_q.put((index,peaks,analysis_time))
320338
else:
321339
raise ValueError(f'Model origin {MODEL_ORIGIN} not available.')
322340

@@ -426,8 +444,11 @@ def get_analysed_frames(self) -> tuple:
426444
self._experiment_running = False
427445

428446
if self._experiment_running and not self._experiment.experiment_finished:
429-
for skeleton in skeletons:
430-
self._experiment.check_skeleton(analysed_image, skeleton)
447+
if ANIMALS_NUMBER > 1 and not FLATTEN_MA:
448+
self._experiment.check_skeleton(analysed_image,skeletons)
449+
else:
450+
for skeleton in skeletons:
451+
self._experiment.check_skeleton(analysed_image, skeleton)
431452

432453
# Gathering data as pd.Series for output
433454
if self._data_output:

Readme.md

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,24 +11,33 @@
1111

1212
DeepLabStream is a python based multi-purpose tool that enables the realtime tracking and manipulation of animals during ongoing experiments.
1313
Our toolbox was orginally adapted from the previously published [DeepLabCut](https://github.com/AlexEMG/DeepLabCut) ([Mathis et al., 2018](https://www.nature.com/articles/s41593-018-0209-y)) and expanded on its core capabilities, but is now able to utilize a variety of different network architectures for online pose estimation
14-
([DLC + maDLC](https://github.com/AlexEMG/DeepLabCut), [DLC-Live](https://github.com/DeepLabCut/DeepLabCut-live), [DeepPosekit's](https://github.com/jgraving/DeepPoseKit) StackedDenseNet, StackedHourGlass and [LEAP](https://github.com/murthylab/sleap)).
14+
([SLEAP](https://github.com/murthylab/sleap), [DLC-Live](https://github.com/DeepLabCut/DeepLabCut-live), [DeepPosekit's](https://github.com/jgraving/DeepPoseKit) StackedDenseNet, StackedHourGlass and [LEAP](https://github.com/murthylab/sleap)).
15+
1516
DeepLabStreams core feature is the utilization of real-time tracking to orchestrate closed-loop experiments. This can be achieved using any type of camera-based video stream (incl. multiple streams). It enables running experimental protocols that are dependent on a constant stream of bodypart positions and feedback activation of several input/output devices. It's capabilities range from simple region of interest (ROI) based triggers to headdirection or behavior dependent stimulation.
1617

1718
![DLS_Stim](docs/DLSSTim_example.gif)
1819

20+
## New features:
21+
22+
#### 02/2021: Multiple Animal Experiments: Full [SLEAP](https://github.com/murthylab/sleap) integration (Tutorials coming soon!)
23+
24+
#### 01/2021: DLStream was published in [Communications Biology](https://www.nature.com/articles/s42003-021-01654-9)
25+
26+
#### 12/2021: New pose estimation model integration ([DLC-Live](https://github.com/DeepLabCut/DeepLabCut-live)) and pre-release of further integration ([DeepPosekit's](https://github.com/jgraving/DeepPoseKit) StackedDenseNet, StackedHourGlass and [LEAP](https://github.com/murthylab/sleap))
27+
1928
## Quick Reference:
2029

21-
### Check out or wiki: [DLStream Wiki](https://github.com/SchwarzNeuroconLab/DeepLabStream/wiki)
30+
#### Check out or wiki: [DLStream Wiki](https://github.com/SchwarzNeuroconLab/DeepLabStream/wiki)
2231

23-
### Read the paper: [Schweihoff, et al. 2021](https://www.nature.com/articles/s42003-021-01654-9)
32+
#### Read the paper: [Schweihoff, et al. 2021](https://www.nature.com/articles/s42003-021-01654-9)
2433

25-
### Contributing
34+
#### Contributing
2635

2736
If you have feature requests or questions regarding the design of experiments join our [slack group](https://join.slack.com/t/dlstream/shared_invite/zt-jpy2olk1-CuJu0ZylGg_SLbO7zBkcrg)!
2837

2938
We are constantly working to update and increase the capabilities of DLStream.
3039
We welcome all feedback and input from your side.
31-
Also, do not hesitate to contact us for collaborations.
40+
3241

3342
### 1. [Updated Installation & Testing](https://github.com/SchwarzNeuroconLab/DeepLabStream/wiki/Installation-&-Testing)
3443

@@ -49,7 +58,7 @@ Also, do not hesitate to contact us for collaborations.
4958
### 7. [Adapting an existing experiment to your own needs](https://github.com/SchwarzNeuroconLab/DeepLabStream/wiki/Adapting-an-existing-experiment-to-your-own-needs)
5059

5160

52-
61+
5362
### How to use DeepLabStream
5463

5564
Just run

experiments/custom/experiments.py

Lines changed: 235 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,246 @@
1010
import time
1111
from functools import partial
1212
from collections import Counter
13-
from experiments.custom.stimulus_process import ClassicProtocolProcess, SimpleProtocolProcess,Timer, ExampleProtocolProcess
14-
from experiments.custom.triggers import ScreenTrigger, RegionTrigger, OutsideTrigger, DirectionTrigger, SpeedTrigger
13+
from experiments.custom.stimulus_process import ClassicProtocolProcess, SimpleProtocolProcess,Timer\
14+
, ExampleProtocolProcess
15+
from experiments.custom.triggers import ScreenTrigger, RegionTrigger, OutsideTrigger, DirectionTrigger\
16+
, SpeedTrigger, SocialInteractionTrigger
1517
from utils.plotter import plot_triggers_response
1618
from utils.analysis import angle_between_vectors
1719
from experiments.custom.stimulation import show_visual_stim_img,laser_switch
1820

1921

22+
"""Social or multiple animal experiments in combination with SLEAP or non-flattened maDLC pose estimation"""
23+
24+
class ExampleSocialInteractionExperiment:
25+
"""
26+
In this experiment the skeleton/instance of each animal will be considers for the trigger,
27+
any animal can trigger the stimulation (the first one to result in TRUE).
28+
29+
Simple class to contain all of the experiment properties
30+
Uses multiprocess to ensure the best possible performance and
31+
to showcase that it is possible to work with any type of equipment, even timer-dependent
32+
"""
33+
def __init__(self):
34+
self.experiment_finished = False
35+
self._process = ExampleProtocolProcess()
36+
self._proximity_threshold = 30
37+
self._min_animals = 2
38+
self._event = None
39+
self._current_trial = None
40+
self._max_reps = 999
41+
self._trial_count = {trial: 0 for trial in self._trials}
42+
self._trial_timers = {trial: Timer(10) for trial in self._trials}
43+
self._exp_timer = Timer(600)
44+
45+
def check_skeleton(self, frame, skeletons):
46+
"""
47+
Checking passed animal skeletons for a pre-defined set of conditions
48+
Outputting the visual representation, if exist
49+
Advancing trials according to inherent logic of an experiment
50+
:param frame: frame, on which animal skeleton was found
51+
:param skeletons: skeletons, consisting of multiple joints of an animal
52+
"""
53+
self.check_exp_timer() # checking if experiment is still on
54+
for trial in self._trial_count:
55+
# checking if any trial hit a predefined cap
56+
if self._trial_count[trial] >= self._max_reps:
57+
self.stop_experiment()
58+
59+
if not self.experiment_finished:
60+
result, response = False, None
61+
#checking if enough animals were detected
62+
if len(skeletons) >= self._min_animals:
63+
for trial in self._trials:
64+
# check if social interaction trigger is true
65+
result, response = self._trials[trial]['trigger'](skeletons=skeletons)
66+
plot_triggers_response(frame, response)
67+
if result:
68+
if self._current_trial is None:
69+
if not self._trial_timers[trial].check_timer():
70+
self._current_trial = trial
71+
self._trial_timers[trial].reset()
72+
self._trial_count[trial] += 1
73+
print(trial, self._trial_count[trial])
74+
else:
75+
if self._current_trial == trial:
76+
self._current_trial = None
77+
self._trial_timers[trial].start()
78+
79+
self._process.set_trial(self._current_trial)
80+
else:
81+
pass
82+
return result, response
83+
84+
@property
85+
def _trials(self):
86+
"""
87+
Defining the trials
88+
"""
89+
identification_dict = dict(active={'animal': 1
90+
, 'bp': ['bp0']
91+
}
92+
,passive = {'animal': 0
93+
, 'bp': ['bp2']
94+
}
95+
)
96+
97+
interaction_trigger = SocialInteractionTrigger(threshold= self._proximity_threshold
98+
, identification_dict = identification_dict
99+
, interaction_type = 'proximity'
100+
, debug=True
101+
)
102+
103+
trials = {'DLStream_test': dict(trigger=interaction_trigger.check_skeleton,
104+
count=0)}
105+
return trials
106+
107+
def check_exp_timer(self):
108+
"""
109+
Checking the experiment timer
110+
"""
111+
if not self._exp_timer.check_timer():
112+
print("Experiment is finished")
113+
print("Time ran out.")
114+
self.stop_experiment()
115+
116+
def start_experiment(self):
117+
"""
118+
Start the experiment
119+
"""
120+
self._process.start()
121+
if not self.experiment_finished:
122+
self._exp_timer.start()
123+
124+
def stop_experiment(self):
125+
"""
126+
Stop the experiment and reset the timer
127+
"""
128+
self.experiment_finished = True
129+
print('Experiment completed!')
130+
self._exp_timer.reset()
131+
# don't forget to end the process!
132+
self._process.end()
133+
134+
def get_trial(self):
135+
"""
136+
Check which trial is going on right now
137+
"""
138+
return self._current_trial
139+
140+
141+
class ExampleMultipleAnimalExperiment:
142+
"""
143+
In this experiment the skeleton/instance of each animal will be considers for the trigger,
144+
any animal can trigger the stimulation (the first one to result in TRUE).
145+
146+
Simple class to contain all of the experiment properties
147+
Uses multiprocess to ensure the best possible performance and
148+
to showcase that it is possible to work with any type of equipment, even timer-dependent
149+
"""
150+
151+
def __init__(self):
152+
self.experiment_finished = False
153+
self._process = ExampleProtocolProcess()
154+
self._green_point = (550, 163)
155+
self._radius = 40
156+
self._dist_threshold = 80
157+
self._event = None
158+
self._current_trial = None
159+
self._max_reps = 10
160+
self._trial_count = {trial: 0 for trial in self._trials}
161+
self._trial_timers = {trial: Timer(10) for trial in self._trials}
162+
self._exp_timer = Timer(600)
163+
164+
def check_skeleton(self,frame,skeletons):
165+
"""
166+
Checking each passed animal skeleton for a pre-defined set of conditions
167+
Outputting the visual representation, if exist
168+
Advancing trials according to inherent logic of an experiment
169+
:param frame: frame, on which animal skeleton was found
170+
:param skeletons: skeletons, consisting of multiple joints of an animal
171+
"""
172+
self.check_exp_timer() # checking if experiment is still on
173+
for trial in self._trial_count:
174+
# checking if any trial hit a predefined cap
175+
if self._trial_count[trial] >= self._max_reps:
176+
self.stop_experiment()
177+
178+
if not self.experiment_finished:
179+
result,response = False,None
180+
for trial in self._trials:
181+
# check for all trials if condition is met
182+
result_list = []
183+
for skeleton in skeletons:
184+
# checking each skeleton for trigger success
185+
result,response = self._trials[trial]['trigger'](skeleton=skeleton)
186+
# if one of the triggers is true, break the loop and continue (the first True)
187+
if result:
188+
break
189+
plot_triggers_response(frame,response)
190+
if result:
191+
if self._current_trial is None:
192+
if not self._trial_timers[trial].check_timer():
193+
self._current_trial = trial
194+
self._trial_timers[trial].reset()
195+
self._trial_count[trial] += 1
196+
print(trial,self._trial_count[trial])
197+
else:
198+
if self._current_trial == trial:
199+
self._current_trial = None
200+
self._trial_timers[trial].start()
201+
202+
self._process.set_trial(self._current_trial)
203+
return result,response
204+
205+
@property
206+
def _trials(self):
207+
"""
208+
Defining the trials
209+
"""
210+
green_roi = RegionTrigger('circle',self._green_point,self._radius * 2 + 7.5,'bp1')
211+
trials = {'Greenbar_whiteback': dict(trigger=green_roi.check_skeleton,
212+
count=0)}
213+
return trials
214+
215+
def check_exp_timer(self):
216+
"""
217+
Checking the experiment timer
218+
"""
219+
if not self._exp_timer.check_timer():
220+
print("Experiment is finished")
221+
print("Time ran out.")
222+
self.stop_experiment()
223+
224+
def start_experiment(self):
225+
"""
226+
Start the experiment
227+
"""
228+
self._process.start()
229+
if not self.experiment_finished:
230+
self._exp_timer.start()
231+
232+
def stop_experiment(self):
233+
"""
234+
Stop the experiment and reset the timer
235+
"""
236+
self.experiment_finished = True
237+
print('Experiment completed!')
238+
self._exp_timer.reset()
239+
# don't forget to end the process!
240+
self._process.end()
241+
242+
def get_trial(self):
243+
"""
244+
Check which trial is going on right now
245+
"""
246+
return self._current_trial
247+
248+
249+
"""Single animal or flattened multi animal pose estimation experiments (e.g. different fur color)
250+
or by use of the FLATTEN_MA parameter in advanced settings"""
251+
252+
20253
class ExampleExperiment:
21254
"""
22255
Simple class to contain all of the experiment properties

experiments/custom/stimulation.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,8 @@ def show_visual_stim_img(type='background', name='vistim'):
5151
# Show image when called
5252
visual = {'background': dict(path=r"./experiments/src/whiteback_1920_1080.png"),
5353
'Greenbar_whiteback': dict(path=r"./experiments/src/greenbar_whiteback_1920_1080.png"),
54-
'Bluebar_whiteback': dict(path=r"./experiments/src/bluebar_whiteback_1920_1080.png")}
54+
'Bluebar_whiteback': dict(path=r"./experiments/src/bluebar_whiteback_1920_1080.png"),
55+
'DLStream_test': dict(path=r"./experiments/src/stuckinaloop.jpg")}
5556
# load image unchanged (-1), greyscale (0) or color (1)
5657
img = cv2.imread(visual[type]['path'], -1)
5758
converted_image = np.uint8(img)

experiments/custom/stimulus_process.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,16 +68,16 @@ def get_start_time(self):
6868

6969
def example_protocol_run(condition_q: mp.Queue):
7070
current_trial = None
71-
dmod_device = DigitalModDevice('Dev1/PFI0')
71+
#dmod_device = DigitalModDevice('Dev1/PFI0')
7272
while True:
7373
if condition_q.full():
7474
current_trial = condition_q.get()
7575
if current_trial is not None:
76-
show_visual_stim_img(img_type=current_trial, name='inside')
77-
dmod_device.toggle()
76+
show_visual_stim_img(type=current_trial, name='DlStream')
77+
#dmod_device.toggle()
7878
else:
79-
show_visual_stim_img(name='inside')
80-
dmod_device.turn_off()
79+
show_visual_stim_img(name='DlStream')
80+
#dmod_device.turn_off()
8181

8282
if cv2.waitKey(1) & 0xFF == ord('q'):
8383
break

0 commit comments

Comments
 (0)