Skip to content

Commit 6ad5e15

Browse files
author
sprenger
committed
[elphy] reintroduce elphyio and add tests
1 parent f97bd06 commit 6ad5e15

File tree

2 files changed

+48
-27
lines changed

2 files changed

+48
-27
lines changed

neo/io/elphyio.py

Lines changed: 33 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -561,6 +561,8 @@ def get_protocol_and_version(self):
561561

562562
def get_title(self):
563563
title_length, title = struct.unpack('<B20s', self.file.read(21))
564+
if hasattr(title, 'decode'):
565+
title = title.decode()
564566
return str(title[0:title_length])
565567

566568
def get_user_file_info(self):
@@ -672,8 +674,10 @@ def get_protocol_and_version(self):
672674
def get_title(self):
673675
title_length = read_from_char(self.file, 'B')
674676
title, = struct.unpack('<%ss' % title_length, self.file.read(title_length))
677+
if hasattr(title, 'decode'):
678+
title = title.decode()
675679
self.file.seek(self.file.tell() + 255 - title_length)
676-
return str(title)
680+
return title
677681

678682
def get_user_file_info(self):
679683
header = dict()
@@ -900,10 +904,14 @@ def __init__(self, layout):
900904
assert not ((n_channels < 1) or (n_channels > 16)), "bad number of channels"
901905
nbpt = read_from_char(fileobj, 'h')
902906
l_xu, x_unit = struct.unpack('<B3s', fileobj.read(4))
907+
if hasattr(x_unit, 'decode'):
908+
x_unit = x_unit.decode()
903909
# extract units for each channel
904910
y_units = list()
905911
for i in range(1, 7):
906912
l_yu, y_unit = struct.unpack('<B3s', fileobj.read(4))
913+
if hasattr(y_unit, 'decode'):
914+
y_unit = y_unit.decode()
907915
y_units.append(y_unit[0:l_yu])
908916

909917
# extract i1, i2, x1, x2 and compute dX and X0
@@ -1223,6 +1231,8 @@ def __init__(self, layout, identifier, start, size, fixed_length=None, size_form
12231231
Y0_ar = list()
12241232
for _ in range(0, 16):
12251233
l_yu, yu, dY, Y0 = struct.unpack('<B10sdd', layout.file.read(27))
1234+
if hasattr(yu, 'decode'):
1235+
yu = yu.decode()
12261236
y_units.append(yu[0:l_yu])
12271237
dY_ar.append(dY)
12281238
Y0_ar.append(Y0)
@@ -1529,6 +1539,8 @@ def __init__(self, layout, identifier, start, size, fixed_length=None, size_form
15291539
self.Y0_ar = list()
15301540
for _ in range(0, n_channels):
15311541
l_yu, y_unit, dY, Y0 = struct.unpack('<B10sdd', fileobj.read(27))
1542+
if hasattr(y_unit, 'decode'):
1543+
y_unit = y_unit.decode()
15321544
self.y_units.append(y_unit[0:l_yu])
15331545
self.dY_ar.append(dY)
15341546
self.Y0_ar.append(Y0)
@@ -1682,7 +1694,7 @@ def least_common_multiple(a, b):
16821694
"""
16831695
Return the value of the least common multiple.
16841696
"""
1685-
return (a * b) / gcd(a, b)
1697+
return int((a * b) / gcd(a, b))
16861698

16871699

16881700
# --------------------------------------------------------
@@ -1954,7 +1966,7 @@ def load_channel_data(self, ep, ch):
19541966
# reshape bytes from the sample size
19551967
dt = np.dtype(numpy_map[sample_symbol])
19561968
dt.newbyteorder('<')
1957-
return np.frombuffer(raw.reshape([len(raw) / sample_size, sample_size]), dt)
1969+
return np.frombuffer(raw.reshape([int(len(raw) / sample_size), sample_size]), dt)
19581970

19591971
def apply_op(self, np_array, value, op_type):
19601972
"""
@@ -2459,7 +2471,7 @@ def get_blocks_stored_in_episode(self, ep):
24592471
if (blk_1 == blk_2) or (i_2 < i_1):
24602472
return [k for k in data_blocks if self.blocks.index(k) > i_1]
24612473
else:
2462-
return [k for k in data_blocks if self.blocks.index(k) in xrange(i_1, i_2)]
2474+
return [k for k in data_blocks if self.blocks.index(k) in range(i_1, i_2)]
24632475

24642476
def set_cyberk_blocks(self):
24652477
ck_blocks = list()
@@ -2529,10 +2541,9 @@ def sub_sampling(self, ep, ch):
25292541
return block.ks_block.k_sampling[ch - 1] if block.ks_block else 1
25302542

25312543
def aggregate_size(self, block, ep):
2532-
ag_count = self.aggregate_sample_count(block)
25332544
ag_size = 0
2534-
for ch in range(1, ag_count + 1):
2535-
if (block.ks_block.k_sampling[ch - 1] != 0):
2545+
for ch in range(1, len(block.ks_block.k_sampling)):
2546+
if block.ks_block.k_sampling[ch - 1] != 0:
25362547
ag_size += self.sample_size(ep, ch)
25372548
return ag_size
25382549

@@ -2653,7 +2664,7 @@ def aggregate_sample_count(self, block):
26532664
count = 0
26542665
for i in range(0, block.ep_block.n_channels):
26552666
if block.ks_block.k_sampling[i] > 0:
2656-
count += lcm0 / block.ks_block.k_sampling[i]
2667+
count += int(lcm0 / block.ks_block.k_sampling[i])
26572668

26582669
return count
26592670

@@ -3016,6 +3027,8 @@ def create_sub_block(self, block, sub_offset):
30163027
self.file.seek(sub_offset)
30173028
sub_ident_size = read_from_char(self.file, 'B')
30183029
sub_identifier, = struct.unpack('<%ss' % sub_ident_size, self.file.read(sub_ident_size))
3030+
if hasattr(sub_identifier, 'decode'):
3031+
sub_identifier = sub_identifier.decode()
30193032
sub_data_size = read_from_char(self.file, 'H')
30203033
sub_data_offset = sub_offset + sub_ident_size + 3
30213034
size_format = "H"
@@ -3100,6 +3113,8 @@ def create_header(self, layout):
31003113
def create_block(self, layout, offset):
31013114
self.file.seek(offset)
31023115
ident_size, identifier = struct.unpack('<B15s', self.file.read(16))
3116+
if hasattr(identifier, 'decode'):
3117+
identifier = identifier.decode()
31033118
identifier = identifier[0:ident_size]
31043119
size = read_from_char(self.file, 'h')
31053120
block_type = self.select_block_subclass(identifier)
@@ -3138,6 +3153,8 @@ def create_header(self, layout):
31383153
def create_block(self, layout, offset):
31393154
self.file.seek(offset)
31403155
ident_size, identifier = struct.unpack('<B15s', self.file.read(16))
3156+
if hasattr(identifier, 'decode'):
3157+
identifier = identifier.decode()
31413158
# block title size is 7 or 15 bytes
31423159
# 7 is for sequence blocs
31433160
if identifier.startswith('DAC2SEQ'):
@@ -3185,6 +3202,8 @@ def create_block(self, layout, offset):
31853202
size = read_from_char(self.file, 'l')
31863203
ident_size = read_from_char(self.file, 'B')
31873204
identifier, = struct.unpack('<%ss' % ident_size, self.file.read(ident_size))
3205+
if hasattr(identifier, 'decode'):
3206+
identifier = identifier.decode()
31883207
block_type = self.select_block_subclass(identifier)
31893208
block = block_type(layout, identifier, offset, size, size_format='l')
31903209
self.file.seek(0)
@@ -3390,10 +3409,10 @@ def get_nomenclature(self):
33903409
"""
33913410
self.file.seek(0)
33923411
length, title = struct.unpack('<B15s', self.file.read(16))
3393-
self.file.seek(0)
3394-
title = title[0:length]
33953412
if hasattr(title, 'decode'):
33963413
title = title.decode()
3414+
self.file.seek(0)
3415+
title = title[0:length]
33973416
if title not in factories:
33983417
title = "format is not implemented ('{}' not in {})".format(
33993418
title, str(factories.keys()))
@@ -3809,7 +3828,7 @@ def read_block(self, lazy=False, ):
38093828

38103829
# create a segment containing all analog,
38113830
# tag and event channels for the episode
3812-
if self.elphy_file.n_episodes is None:
3831+
if self.elphy_file.n_episodes in [None, 0]:
38133832
print("File '%s' appears to have no episodes" % (self.filename))
38143833
return block
38153834
for episode in range(1, self.elphy_file.n_episodes + 1):
@@ -4207,10 +4226,10 @@ def read_segment(self, episode):
42074226
analog_signal = AnalogSignal(
42084227
signal.data['y'],
42094228
units=signal.y_unit,
4210-
t_start=signal.t_start * getattr(pq, signal.x_unit.strip()),
4211-
t_stop=signal.t_stop * getattr(pq, signal.x_unit.strip()),
4229+
t_start=signal.t_start * getattr(pq, signal.x_unit.strip().decode()),
4230+
t_stop=signal.t_stop * getattr(pq, signal.x_unit.strip().decode()),
42124231
# sampling_rate = signal.sampling_frequency * pq.kHz,
4213-
sampling_period=signal.sampling_period * getattr(pq, signal.x_unit.strip()),
4232+
sampling_period=signal.sampling_period * getattr(pq, signal.x_unit.strip().decode()),
42144233
channel_name="episode {}, channel {}".format(int(episode + 1), int(channel + 1))
42154234
)
42164235
analog_signal.segment = segment

neo/test/iotest/test_elphyio.py

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -8,23 +8,25 @@
88
from neo.test.iotest.common_io_test import BaseTestIO
99

1010

11-
12-
class TestElanIO(BaseTestIO, unittest.TestCase):
11+
class TestElphyIO(BaseTestIO, unittest.TestCase):
1312
ioclass = ElphyIO
14-
files_to_test = ['DATA1.DAT', 'ElphyExample.DAT',
15-
'ElphyExample_Mode1.dat', 'ElphyExample_Mode2.dat',
16-
'ElphyExample_Mode3.dat']
17-
files_to_download = ['DATA1.DAT', 'ElphyExample.DAT',
18-
'ElphyExample_Mode1.dat', 'ElphyExample_Mode2.dat',
19-
'ElphyExample_Mode3.dat']
13+
entities_to_download = [
14+
'elphy'
15+
]
16+
entities_to_test = ['elphy/DATA1.DAT',
17+
'elphy/ElphyExample.DAT',
18+
'elphy/ElphyExample_Mode1.dat',
19+
'elphy/ElphyExample_Mode2.dat',
20+
'elphy/ElphyExample_Mode3.dat']
2021

2122
def test_read_data(self):
22-
io = ElphyIO(self.get_filename_path('DATA1.DAT'))
23-
bl = io.read_block()
24-
25-
print(bl)
26-
23+
for filename in self.entities_to_test:
24+
io = ElphyIO(self.get_local_path(filename))
25+
bl = io.read_block()
2726

27+
self.assertTrue(len(bl.segments) > 0)
28+
# ensure that at least one data object is generated for each file
29+
self.assertTrue(any(list(bl.segments[0].size.values())))
2830

2931
if __name__ == "__main__":
3032
unittest.main()

0 commit comments

Comments
 (0)