-
Notifications
You must be signed in to change notification settings - Fork 8
Open
Description
I created a SortingAnalyzer object from the pipeline output.
sorting_folder = session_folder / "processed_data/spike_sorting_output/"
postproc_folder = sorting_folder / "postprocessed/"
# Group data
group0_path = postproc_folder / "experiment1_Record Node 101#Neuropix-PXI-100.ProbeA_recording1_group0.zarr"
# Load the postprocessed sorting data
sorting_analyzer = load_sorting_analyzer(group0_path)
SortingAnalyzer: 96 channels - 122 units - 1 segments - zarr - sparse
Loaded 13 extensions: correlograms, isi_histograms, noise_levels, principal_components, quality_metrics, random_spikes, spike_amplitudes, spike_locations, template_metrics, template_similarity, templates, unit_locations, waveforms
However, I'm having trouble to export it. I fail to load the preprocessed recording:
preproc_rec_group0_path = sorting_folder / "preprocessed/experiment1_Record Node 101#Neuropix-PXI-100.ProbeA_recording1_group0.json"
raw_ephys_data_path = session_folder / "raw_ephys_data"
recording_preprocessed = si.load(
preproc_rec_group0_path,
base_folder=raw_ephys_data_path
)
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
Cell In[7], line 3
1 preproc_rec_group0_path = sorting_folder / "preprocessed/experiment1_Record Node 101#Neuropix-PXI-100.ProbeA_recording1_group0.json"
2 raw_ephys_data_path = session_folder / "raw_ephys_data"
----> 3 recording_preprocessed = si.load(
4 preproc_rec_group0_path,
5 base_folder=raw_ephys_data_path
6 )
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/loading.py:100, in load(file_or_folder_or_dict, **kwargs)
98 if object_type is None:
99 raise ValueError(_error_msg.format(file_path=file_path))
--> 100 return _load_object_from_dict(d, object_type, base_folder=base_folder)
102 elif is_local and file_path.is_dir():
104 folder = file_path
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/loading.py:175, in _load_object_from_dict(d, object_type, base_folder)
173 def _load_object_from_dict(d, object_type, base_folder=None):
174 if object_type in ("Recording", "Sorting", "Recording|Sorting"):
--> 175 return BaseExtractor.from_dict(d, base_folder=base_folder)
177 elif object_type == "Templates":
178 from spikeinterface.core import Templates
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/base.py:571, in BaseExtractor.from_dict(dictionary, base_folder)
569 assert base_folder is not None, "When relative_paths=True, need to provide base_folder"
570 dictionary = make_paths_absolute(dictionary, base_folder)
--> 571 extractor = _load_extractor_from_dict(dictionary)
572 folder_metadata = dictionary.get("folder_metadata", None)
573 if folder_metadata is not None:
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/base.py:1108, in _load_extractor_from_dict(dic)
1106 for name, value in dic["kwargs"].items():
1107 if is_dict_extractor(value):
-> 1108 new_kwargs[name] = _load_extractor_from_dict(value)
1109 elif isinstance(value, dict):
1110 new_kwargs[name] = {k: transform_dict_to_extractor(v) for k, v in value.items()}
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/base.py:1108, in _load_extractor_from_dict(dic)
1106 for name, value in dic["kwargs"].items():
1107 if is_dict_extractor(value):
-> 1108 new_kwargs[name] = _load_extractor_from_dict(value)
1109 elif isinstance(value, dict):
1110 new_kwargs[name] = {k: transform_dict_to_extractor(v) for k, v in value.items()}
[... skipping similar frames: _load_extractor_from_dict at line 1108 (4 times)]
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/base.py:1108, in _load_extractor_from_dict(dic)
1106 for name, value in dic["kwargs"].items():
1107 if is_dict_extractor(value):
-> 1108 new_kwargs[name] = _load_extractor_from_dict(value)
1109 elif isinstance(value, dict):
1110 new_kwargs[name] = {k: transform_dict_to_extractor(v) for k, v in value.items()}
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/core/base.py:1127, in _load_extractor_from_dict(dic)
1121 warnings.warn(
1122 f"Versions are not the same. This might lead to compatibility errors. "
1123 f"Using {class_name.split('.')[0]}=={dic['version']} is recommended"
1124 )
1126 # Initialize the extractor
-> 1127 extractor = extractor_class(**new_kwargs)
1129 extractor._annotations.update(dic["annotations"])
1130 for k, v in dic["properties"].items():
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/openephys.py:158, in OpenEphysBinaryRecordingExtractor.__init__(self, folder_path, load_sync_channel, load_sync_timestamps, experiment_names, stream_id, stream_name, block_index, all_annotations)
146 def __init__(
147 self,
148 folder_path,
(...)
155 all_annotations=False,
156 ):
157 neo_kwargs = self.map_to_neo_kwargs(folder_path, load_sync_channel, experiment_names)
--> 158 NeoBaseRecordingExtractor.__init__(
159 self,
160 stream_id=stream_id,
161 stream_name=stream_name,
162 block_index=block_index,
163 all_annotations=all_annotations,
164 **neo_kwargs,
165 )
166 # get streams to find correct probe
167 stream_names, stream_ids = self.get_streams(folder_path, load_sync_channel, experiment_names)
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/neobaseextractor.py:188, in NeoBaseRecordingExtractor.__init__(self, stream_id, stream_name, block_index, all_annotations, use_names_as_ids, **neo_kwargs)
158 def __init__(
159 self,
160 stream_id: Optional[str] = None,
(...)
165 **neo_kwargs: Dict[str, Any],
166 ) -> None:
167 """
168 Initialize a NeoBaseRecordingExtractor instance.
169
(...)
185
186 """
--> 188 _NeoBaseExtractor.__init__(self, block_index, **neo_kwargs)
190 kwargs = dict(all_annotations=all_annotations)
191 if block_index is not None:
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/neobaseextractor.py:27, in _NeoBaseExtractor.__init__(self, block_index, **neo_kwargs)
23 def __init__(self, block_index, **neo_kwargs):
24
25 # Avoids double initiation of the neo reader if it was already done in the __init__ of the child class
26 if not hasattr(self, "neo_reader"):
---> 27 self.neo_reader = self.get_neo_io_reader(self.NeoRawIOClass, **neo_kwargs)
29 if self.neo_reader.block_count() > 1 and block_index is None:
30 raise Exception(
31 "This dataset is multi-block. Spikeinterface can load one block at a time. "
32 "Use 'block_index' to select the block to be loaded."
33 )
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/neobaseextractor.py:66, in _NeoBaseExtractor.get_neo_io_reader(cls, raw_class, **neo_kwargs)
64 neoIOclass = getattr(rawio_module, raw_class)
65 neo_reader = neoIOclass(**neo_kwargs)
---> 66 neo_reader.parse_header()
68 return neo_reader
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/neo/rawio/baserawio.py:211, in BaseRawIO.parse_header(self)
197 """
198 Parses the header of the file(s) to allow for faster computations
199 for all other functions
200
201 """
202 # this must create
203 # self.header['nb_block']
204 # self.header['nb_segment']
(...)
208 # self.header['spike_channels']
209 # self.header['event_channels']
--> 211 self._parse_header()
212 self._check_stream_signal_channel_characteristics()
213 self.is_header_parsed = True
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/neo/rawio/openephysbinaryrawio.py:85, in OpenEphysBinaryRawIO._parse_header(self)
81 def _parse_header(self):
82 folder_structure, all_streams, nb_block, nb_segment_per_block, possible_experiments = explore_folder(
83 self.dirname, self.experiment_names
84 )
---> 85 check_folder_consistency(folder_structure, possible_experiments)
86 self.folder_structure = folder_structure
88 # all streams are consistent across blocks and segments.
89 # also checks that 'continuous' and 'events' folder are present
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/neo/rawio/openephysbinaryrawio.py:732, in check_folder_consistency(folder_structure, possible_experiment_names)
730 if segment_stream_names is None:
731 segment_stream_names = stream_names
--> 732 assert segment_stream_names == stream_names, (
733 "Inconsistent continuous streams across segments! Streams for different "
734 "segments in the same experiment must be the same. Check your open ephys "
735 "folder."
736 )
738 # check that "continuous" streams across blocks (experiments)
739 block_stream_names = None
AssertionError: Inconsistent continuous streams across segments! Streams for different segments in the same experiment must be the same. Check your open ephys folder.
And the export function requires the recording;
from spikeinterface.exporters import export_to_phy
output_folder = session_folder / "phy_output"
export_to_phy(sorting_analyzer=sorting_analyzer, output_folder=output_folder)
/home/prevosto/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/exporters/to_phy.py:150: UserWarning: Recording will not be copied since sorting_analyzer is recordingless.
warnings.warn("Recording will not be copied since sorting_analyzer is recordingless.")
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
Cell In[6], line 10
8 output_folder = session_folder / "phy_output"
9 # the export process is fast because everything is pre-computed
---> 10 export_to_phy(sorting_analyzer=sorting_analyzer, output_folder=output_folder)
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/exporters/to_phy.py:230, in export_to_phy(sorting_analyzer, output_folder, compute_pc_features, compute_amplitudes, sparsity, copy_binary, remove_if_exists, template_mode, add_quality_metrics, add_template_metrics, additional_properties, dtype, verbose, use_relative_path, **job_kwargs)
226 sorting_analyzer.compute("principal_components", n_components=5, mode="by_channel_local", **job_kwargs)
228 pca_extension = sorting_analyzer.get_extension("principal_components")
--> 230 pca_extension.run_for_all_spikes(output_folder / "pc_features.npy", **job_kwargs)
232 max_num_channels_pc = max(len(chan_inds) for chan_inds in used_sparsity.unit_id_to_channel_indices.values())
233 pc_feature_ind = -np.ones((len(unit_ids), max_num_channels_pc), dtype="int64")
File ~/.conda/envs/aind-ephys-pipeline/lib/python3.10/site-packages/spikeinterface/postprocessing/principal_component.py:372, in ComputePrincipalComponents.run_for_all_spikes(self, file_path, verbose, **job_kwargs)
370 sorting_analyzer = self.sorting_analyzer
371 sorting = sorting_analyzer.sorting
--> 372 assert (
373 sorting_analyzer.has_recording() or sorting_analyzer.has_temporary_recording()
374 ), "To compute PCA projections for all spikes, the sorting analyzer needs the recording"
375 recording = sorting_analyzer.recording
377 # assert sorting.get_num_segments() == 1
AssertionError: To compute PCA projections for all spikes, the sorting analyzer needs the recording
Is there a way to create a SortingAnalyzer object from the separate groups, that can still be associated to the recording data?
Metadata
Metadata
Assignees
Labels
No labels