@@ -597,10 +597,17 @@ def _parse_header(self):
597597 for c in range (spike_channels .size ):
598598 st_ann = seg_ann ["spikes" ][c ]
599599 channel_id , unit_id = self .internal_unit_ids [c ]
600- unit_tag = {0 : "unclassified" , 255 : "noise" }.get (unit_id , str (unit_id ))
601600 st_ann ["channel_id" ] = channel_id
602601 st_ann ["unit_id" ] = unit_id
603- st_ann ["unit_tag" ] = unit_tag
602+ if unit_id == 0 :
603+ st_ann ["unit_classification" ] = "unclassified"
604+ elif 1 <= unit_id <= 16 :
605+ st_ann ["unit_classification" ] = "sorted"
606+ elif unit_id == 255 :
607+ st_ann ["unit_classification" ] = "noise"
608+ else : # 17-254 are reserved
609+ st_ann ["unit_classification" ] = "reserved"
610+ st_ann ["unit_tag" ] = st_ann ["unit_classification" ]
604611 st_ann ["description" ] = f"SpikeTrain channel_id: { channel_id } , unit_id: { unit_id } "
605612 st_ann ["file_origin" ] = self ._filenames ["nev" ] + ".nev"
606613
@@ -1058,7 +1065,10 @@ def __read_nsx_data_variant_a(self, nsx_nb):
10581065 filename = "." .join ([self ._filenames ["nsx" ], f"ns{ nsx_nb } " ])
10591066
10601067 # get shape of data
1061- shape = (int (self .__nsx_params ["2.1" ](nsx_nb )["nb_data_points" ]), int (self .__nsx_basic_header [nsx_nb ]["channel_count" ]))
1068+ shape = (
1069+ int (self .__nsx_params ["2.1" ](nsx_nb )["nb_data_points" ]),
1070+ int (self .__nsx_basic_header [nsx_nb ]["channel_count" ]),
1071+ )
10621072 offset = int (self .__nsx_params ["2.1" ](nsx_nb )["bytes_in_headers" ])
10631073
10641074 # read nsx data
@@ -1251,7 +1261,19 @@ def __read_nev_data(self, nev_data_masks, nev_data_types):
12511261 # read all raw data packets and markers
12521262 dt0 = [("timestamp" , ts_format ), ("packet_id" , "uint16" ), ("value" , f"S{ data_size - header_skip } " )]
12531263
1254- raw_data = np .memmap (filename , offset = header_size , dtype = dt0 , mode = "r" )
1264+ # expected number of data packets. We are not sure why, but it seems we can get partial data packets
1265+ # based on blackrock's own code this is okay so applying an int to round down is necessary to obtain the
1266+ # memory map of full packets and toss the partial packet.
1267+ # See reference: https://github.com/BlackrockNeurotech/Python-Utilities/blob/fa75aa671680306788e10d3d8dd625f9da4ea4f6/brpylib/brpylib.py#L580-L587
1268+ n_packets = int ((self .__get_file_size (filename ) - header_size ) / data_size )
1269+
1270+ raw_data = np .memmap (
1271+ filename ,
1272+ offset = header_size ,
1273+ dtype = dt0 ,
1274+ shape = (n_packets ,),
1275+ mode = "r" ,
1276+ )
12551277
12561278 masks = self .__nev_data_masks (raw_data ["packet_id" ])
12571279 types = self .__nev_data_types (data_size )
@@ -1794,7 +1816,7 @@ def __nev_params(self, param_name):
17941816 hour = self .__nev_basic_header ["hour" ],
17951817 minute = self .__nev_basic_header ["minute" ],
17961818 second = self .__nev_basic_header ["second" ],
1797- microsecond = self .__nev_basic_header ["millisecond" ],
1819+ microsecond = int ( self .__nev_basic_header ["millisecond" ]) * 1000 ,
17981820 ),
17991821 "max_res" : self .__nev_basic_header ["timestamp_resolution" ],
18001822 "channel_ids" : self .__nev_ext_header [b"NEUEVWAV" ]["electrode_id" ],
0 commit comments