Skip to content

Commit 95cb8e0

Browse files
committed
Merge branch 'release/2.12.0' into mtnn
2 parents 7ce79a8 + 748154a commit 95cb8e0

File tree

90 files changed

+2840
-5434
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

90 files changed

+2840
-5434
lines changed

.github/workflows/ibllib_ci.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ jobs:
1717
fail-fast: false # Whether to stop execution of other instances
1818
max-parallel: 4
1919
matrix:
20-
os: ["ubuntu-latest"] # "windows-latest"] # , "macos-latest"
21-
python-version: ["3.8"] # "3.7",
20+
os: ["ubuntu-latest"]
21+
python-version: ["3.8"]
2222
steps:
2323
- uses: actions/checkout@v2
2424
- name: Set up Python ${{ matrix.python-version }}

MANIFEST.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
include ibllib/atlas/allen_structure_tree.csv
22
include ibllib/atlas/beryl.npy
33
include ibllib/atlas/cosmos.npy
4+
include ibllib/atlas/swanson.npy
45
include ibllib/atlas/mappings.pqt
56
include ibllib/io/extractors/extractor_types.json
67
include brainbox/tests/wheel_test.p

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@ The library is currently 2 main modules:
1212
[Release Notes here](release_notes.md)
1313

1414
## Requirements
15-
**OS**: Deployed on Linux and Windows. Minimally tested for Mac.
15+
**OS**: Only tested on Linux. Windows and Mac may work, but are not supported.
1616

17-
**Python Module**: Python 3.7 or higher, we develop on 3.8.
17+
**Python Module**: Python 3.8 or higher
1818

1919
## Installation, documentation and examples
2020
https://docs.internationalbrainlab.org
@@ -25,7 +25,7 @@ See https://int-brain-lab.github.io/iblenv/07_contribution.html
2525

2626
We use gitflow and Semantic Versioning.
2727

28-
Before commiting to your branch:
28+
Before committing to your branch:
2929
- run tests
3030
- flake8
3131
This is also enforced by continuous integration.

brainbox/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1+
import logging
12
try:
23
import one
34
except ModuleNotFoundError:
4-
logging.getLogger('ibllib').error('Missing dependency, please run `pip install ONE-api`')
5+
logging.getLogger(__name__).error('Missing dependency, please run `pip install ONE-api`')

brainbox/behavior/dlc.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import scipy.interpolate as interpolate
1212
from scipy.stats import zscore
1313

14-
from ibllib.dsp.smooth import smooth_interpolate_savgol
14+
from neurodsp.smooth import smooth_interpolate_savgol
1515
from brainbox.processing import bincount2D
1616
import brainbox.behavior.wheel as bbox_wheel
1717

@@ -458,16 +458,17 @@ def plot_motion_energy_hist(camera_dict, trials_df):
458458
motion_energy = zscore(camera_dict[cam]['motion_energy'], nan_policy='omit')
459459
try:
460460
start_idx = insert_idx(camera_dict[cam]['times'], start_window)
461+
end_idx = np.array(start_idx + int(WINDOW_LEN * SAMPLING[cam]), dtype='int64')
462+
me_all = [motion_energy[start_idx[i]:end_idx[i]] for i in range(len(start_idx))]
463+
me_all = [m for m in me_all if len(m) > 0]
464+
times = np.arange(len(me_all[0])) / SAMPLING[cam] + WINDOW_LAG
465+
me_mean = np.mean(me_all, axis=0)
466+
me_std = np.std(me_all, axis=0) / np.sqrt(len(me_all))
467+
plt.plot(times, me_mean, label=f'{cam} cam', color=colors[cam], linewidth=2)
468+
plt.fill_between(times, me_mean + me_std, me_mean - me_std, color=colors[cam], alpha=0.2)
461469
except ValueError:
462-
logger.error("Camera.times are outside of the trial windows")
463-
raise
464-
end_idx = np.array(start_idx + int(WINDOW_LEN * SAMPLING[cam]), dtype='int64')
465-
me_all = [motion_energy[start_idx[i]:end_idx[i]] for i in range(len(start_idx))]
466-
times = np.arange(len(me_all[0])) / SAMPLING[cam] + WINDOW_LAG
467-
me_mean = np.mean(me_all, axis=0)
468-
me_std = np.std(me_all, axis=0) / np.sqrt(len(me_all))
469-
plt.plot(times, me_mean, label=f'{cam} cam', color=colors[cam], linewidth=2)
470-
plt.fill_between(times, me_mean + me_std, me_mean - me_std, color=colors[cam], alpha=0.2)
470+
logger.error(f"{cam}Camera camera.times are outside of the trial windows")
471+
missing_data.append(cam)
471472
except AttributeError:
472473
logger.warning(f"Cannot load motion energy and/or times data for {cam} camera")
473474
missing_data.append(cam)
@@ -484,7 +485,7 @@ def plot_motion_energy_hist(camera_dict, trials_df):
484485
if len(missing_data) > 0:
485486
ax = plt.gca()
486487
ax.text(.95, .35, f"Data incomplete for\n{' and '.join(missing_data)} camera", color='r', fontsize=10,
487-
horizontalalignment='right', verticalalignment='center', transform=ax.transAxes)
488+
fontweight='bold', horizontalalignment='right', verticalalignment='center', transform=ax.transAxes)
488489
return plt.gca()
489490

490491

brainbox/behavior/wheel.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -213,10 +213,8 @@ def movements(t, pos, freq=1000, pos_thresh=8, t_thresh=.2, min_gap=.1, pos_thre
213213
if i2proc[-1] == t.size - 1:
214214
break
215215

216-
moving = max_disp > pos_thresh # for each window is the change in position greater than
217-
# our threshold?
218-
moving = np.insert(moving, 0, False) # First sample should always be not moving to ensure
219-
# we have an onset
216+
moving = max_disp > pos_thresh # for each window is the change in position greater than our threshold?
217+
moving = np.insert(moving, 0, False) # First sample should always be not moving to ensure we have an onset
220218
moving[-1] = False # Likewise, ensure we always end on an offset
221219

222220
onset_samps = np.where(~moving[:-1] & moving[1:])[0]

brainbox/io/one.py

Lines changed: 29 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,13 @@
1010

1111
from one.api import ONE, One
1212
import one.alf.io as alfio
13+
from one.alf.files import get_alf_path
1314
from one.alf import cache
15+
from neuropixel import SITES_COORDINATES, TIP_SIZE_UM, trace_header
16+
import spikeglx
1417

1518
from iblutil.util import Bunch
16-
from ibllib.io import spikeglx
1719
from ibllib.io.extractors.training_wheel import extract_wheel_moves, extract_first_movement_times
18-
from ibllib.ephys.neuropixel import SITES_COORDINATES, TIP_SIZE_UM, trace_header
1920
from ibllib.atlas import atlas, AllenAtlas
2021
from ibllib.pipes import histology
2122
from ibllib.pipes.ephys_alignment import EphysAlignment
@@ -123,7 +124,7 @@ def _channels_alf2bunch(channels, brain_regions=None):
123124

124125

125126
def _load_spike_sorting(eid, one=None, collection=None, revision=None, return_channels=True, dataset_types=None,
126-
brain_regions=None, return_collection=False):
127+
brain_regions=None):
127128
"""
128129
Generic function to load spike sorting according data using ONE.
129130
@@ -168,7 +169,7 @@ def _load_spike_sorting(eid, one=None, collection=None, revision=None, return_ch
168169
collections = one.list_collections(eid, filename='spikes*', collection=collection, revision=revision)
169170
if len(collections) == 0:
170171
_logger.warning(f"eid {eid}: no collection found with collection filter: {collection}, revision: {revision}")
171-
pnames = list(set([c.split('/')[1] for c in collections]))
172+
pnames = list(set(c.split('/')[1] for c in collections))
172173
spikes, clusters, channels = ({} for _ in range(3))
173174

174175
spike_attributes, cluster_attributes = _get_attributes(dataset_types)
@@ -246,7 +247,7 @@ def channel_locations_interpolation(channels_aligned, channels=None, brain_regio
246247
if channels is None:
247248
channels = {'localCoordinates': np.c_[h['x'], h['y']]}
248249
nch = channels['localCoordinates'].shape[0]
249-
if set(['x', 'y', 'z']).issubset(set(channels_aligned.keys())):
250+
if {'x', 'y', 'z'}.issubset(set(channels_aligned.keys())):
250251
channels_aligned = _channels_bunch2alf(channels_aligned)
251252
if 'localCoordinates' in channels_aligned.keys():
252253
aligned_depths = channels_aligned['localCoordinates'][:, 1]
@@ -350,7 +351,7 @@ def _load_channel_locations_traj(eid, probe=None, one=None, revision=None, align
350351
return channels
351352

352353

353-
def load_channel_locations(eid, probe=None, one=None, aligned=False, brain_atlas=None, return_source=False):
354+
def load_channel_locations(eid, probe=None, one=None, aligned=False, brain_atlas=None):
354355
"""
355356
Load the brain locations of each channel for a given session/probe
356357
@@ -367,8 +368,6 @@ def load_channel_locations(eid, probe=None, one=None, aligned=False, brain_atlas
367368
Whether to get the latest user aligned channel when not resolved or use histology track
368369
brain_atlas : ibllib.atlas.BrainAtlas
369370
Brain atlas object (default: Allen atlas)
370-
return_source: bool
371-
if True returns the source of the channel lcoations (default False)
372371
Returns
373372
-------
374373
dict of one.alf.io.AlfBunch
@@ -412,7 +411,6 @@ def load_spike_sorting_fast(eid, one=None, probe=None, dataset_types=None, spike
412411
:param dataset_types: additional spikes/clusters objects to add to the standard default list
413412
:param spike_sorter: name of the spike sorting you want to load (None for default)
414413
:param collection: name of the spike sorting collection to load - exclusive with spike sorter name ex: "alf/probe00"
415-
:param return_channels: (bool) defaults to False otherwise tries and load channels from disk
416414
:param brain_regions: ibllib.atlas.regions.BrainRegions object - will label acronyms if provided
417415
:param nested: if a single probe is required, do not output a dictionary with the probe name as key
418416
:param return_collection: (False) if True, will return the collection used to load
@@ -454,7 +452,6 @@ def load_spike_sorting(eid, one=None, probe=None, dataset_types=None, spike_sort
454452
:param probe: name of probe to load in, if not given all probes for session will be loaded
455453
:param dataset_types: additional spikes/clusters objects to add to the standard default list
456454
:param spike_sorter: name of the spike sorting you want to load (None for default)
457-
:param return_channels: (bool) defaults to False otherwise tries and load channels from disk
458455
:param brain_regions: ibllib.atlas.regions.BrainRegions object - will label acronyms if provided
459456
:param return_collection:(bool - False) if True, returns the collection for loading the data
460457
:return: spikes, clusters (dict of bunch, 1 bunch per probe)
@@ -677,7 +674,7 @@ def load_wheel_reaction_times(eid, one=None):
677674
eid : [str, UUID, Path, dict]
678675
Experiment session identifier; may be a UUID, URL, experiment reference string
679676
details dict or Path
680-
one : oneibl.one.OneAlyx, optional
677+
one : one.api.OneAlyx, optional
681678
one object to use for loading. Will generate internal one if not used, by default None
682679
683680
Returns
@@ -721,7 +718,7 @@ def load_trials_df(eid, one=None, maxlen=None, t_before=0., t_after=0., ret_whee
721718
eid : [str, UUID, Path, dict]
722719
Experiment session identifier; may be a UUID, URL, experiment reference string
723720
details dict or Path
724-
one : oneibl.one.OneAlyx, optional
721+
one : one.api.OneAlyx, optional
725722
one object to use for loading. Will generate internal one if not used, by default None
726723
maxlen : float, optional
727724
Maximum trial length for inclusion in df. Trials where feedback - response is longer
@@ -881,16 +878,17 @@ class SpikeSortingLoader:
881878
SpikeSortingLoader(eid=eid, pname='probe00', one=one)
882879
- From a local session and probe name:
883880
SpikeSortingLoader(session_path=session_path, pname='probe00')
881+
NB: When no ONE instance is passed, any datasets that are loaded will not be recorded.
884882
"""
885-
one: ONE = None
883+
one: One = None
886884
atlas: None = None
887885
pid: str = None
888886
eid: str = ''
889887
pname: str = ''
890-
# the following properties are the outcome of the post init funciton
891888
session_path: Path = ''
889+
# the following properties are the outcome of the post init function
892890
collections: list = None
893-
datasets: list = None # list of all datasets belonging to the sesion
891+
datasets: list = None # list of all datasets belonging to the session
894892
# the following properties are the outcome of a reading function
895893
files: dict = None
896894
collection: str = ''
@@ -907,11 +905,14 @@ def __post_init__(self):
907905
self.session_path = self.one.eid2path(self.eid)
908906
# fully local providing a session path
909907
else:
910-
self.one = One(cache_dir=self.session_path.parents[2], mode='local')
911-
df_sessions = cache._make_sessions_df(self.session_path)
912-
self.one._cache['sessions'] = df_sessions.set_index('id')
913-
self.one._cache['datasets'] = cache._make_datasets_df(self.session_path, hash_files=False)
914-
self.eid = str(self.session_path.relative_to(self.session_path.parents[2]))
908+
if self.one:
909+
self.eid = self.one.to_eid(self.session_path)
910+
else:
911+
self.one = One(cache_dir=self.session_path.parents[2], mode='local')
912+
df_sessions = cache._make_sessions_df(self.session_path)
913+
self.one._cache['sessions'] = df_sessions.set_index('id')
914+
self.one._cache['datasets'] = cache._make_datasets_df(self.session_path, hash_files=False)
915+
self.eid = str(self.session_path.relative_to(self.session_path.parents[2]))
915916
# populates default properties
916917
self.collections = self.one.list_collections(
917918
self.eid, filename='spikes*', collection=f"alf/{self.pname}*")
@@ -932,7 +933,7 @@ def _get_attributes(dataset_types):
932933
cluster_attributes = list(set(CLUSTERS_ATTRIBUTES + cluster_attributes))
933934
return spike_attributes, cluster_attributes
934935

935-
def _get_spike_sorting_collection(self, spike_sorter='pykilosort', revision=None):
936+
def _get_spike_sorting_collection(self, spike_sorter='pykilosort'):
936937
"""
937938
Filters a list or array of collections to get the relevant spike sorting dataset
938939
if there is a pykilosort, load it
@@ -943,17 +944,18 @@ def _get_spike_sorting_collection(self, spike_sorter='pykilosort', revision=None
943944
_logger.debug(f"selecting: {collection} to load amongst candidates: {self.collections}")
944945
return collection
945946

946-
def download_spike_sorting_object(self, obj, spike_sorter='pykilosort', dataset_types=None):
947+
def download_spike_sorting_object(self, obj, spike_sorter='pykilosort', dataset_types=None, collection=None):
947948
"""
948949
Downloads an ALF object
949950
:param obj: object name, str between 'spikes', 'clusters' or 'channels'
950951
:param spike_sorter: (defaults to 'pykilosort')
951-
:param dataset_types: list of extra dataset types
952+
:param dataset_types: list of extra dataset types, for example ['spikes.samples']
953+
:param collection: string specifiying the collection, for example 'alf/probe01/pykilosort'
952954
:return:
953955
"""
954956
if len(self.collections) == 0:
955957
return {}, {}, {}
956-
self.collection = self._get_spike_sorting_collection(spike_sorter=spike_sorter)
958+
self.collection = collection or self._get_spike_sorting_collection(spike_sorter=spike_sorter)
957959
_logger.debug(f"loading spike sorting from {self.collection}")
958960
spike_attributes, cluster_attributes = self._get_attributes(dataset_types)
959961
attributes = {'spikes': spike_attributes, 'clusters': cluster_attributes, 'channels': None,
@@ -983,7 +985,7 @@ def load_spike_sorting(self, **kwargs):
983985
- alf: the final version of channel locations, same as resolved with the difference that data is on file
984986
- resolved: channel locations alignments have been agreed upon
985987
- aligned: channel locations have been aligned, but review or other alignments are pending, potentially not accurate
986-
- traced: the histology track has been recovered from microscopy, however the depths may not match, inacurate data
988+
- traced: the histology track has been recovered from microscopy, however the depths may not match, inaccurate data
987989
988990
:param spike_sorter: (defaults to 'pykilosort')
989991
:param dataset_types: list of extra dataset types
@@ -1035,4 +1037,5 @@ def merge_clusters(spikes, clusters, channels, cache_dir=None):
10351037
@property
10361038
def url(self):
10371039
"""Gets flatiron URL for the session"""
1038-
return str(self.session_path).replace(str(self.one.alyx.cache_dir), 'https://ibl.flatironinstitute.org')
1040+
webclient = getattr(self.one, '_web_client', None)
1041+
return webclient.rel_path2url(get_alf_path(self.session_path)) if webclient else None

0 commit comments

Comments
 (0)