Skip to content

Commit d62facc

Browse files
committed
Fixed typos
1 parent be485c6 commit d62facc

File tree

3 files changed

+93
-85
lines changed

3 files changed

+93
-85
lines changed

nibabel/benchmarks/bench_streamlines.py

Lines changed: 30 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,20 @@ def bench_load_trk():
4747
tractogram = Tractogram(points, affine_to_rasmm=np.eye(4))
4848
TrkFile(tractogram).save(trk_file)
4949

50-
loaded_streamlines_old = [d[0]-0.5 for d in tv.read(trk_file, points_space="rasmm")[0]]
50+
streamlines_old = [d[0] - 0.5
51+
for d in tv.read(trk_file, points_space="rasmm")[0]]
5152
mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
52-
print("Old: Loaded %d streamlines in %6.2f" % (NB_STREAMLINES, mtime_old))
53-
54-
loaded_streamlines_new = nib.streamlines.load(trk_file, lazy_load=False).streamlines
55-
mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)', repeat)
56-
print("\nNew: Loaded %d streamlines in %6.2f" % (NB_STREAMLINES, mtime_new))
57-
print("Speedup of %2f" % (mtime_old/mtime_new))
58-
59-
for s1, s2 in zip(loaded_streamlines_new, loaded_streamlines_old):
53+
print("Old: Loaded {:,} streamlines in {:6.2f}".format(NB_STREAMLINES,
54+
mtime_old))
55+
56+
trk = nib.streamlines.load(trk_file, lazy_load=False)
57+
streamlines_new = trk.streamlines
58+
mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)',
59+
repeat)
60+
print("\nNew: Loaded {:,} streamlines in {:6.2}".format(NB_STREAMLINES,
61+
mtime_new))
62+
print("Speedup of {:.2f}".format(mtime_old / mtime_new))
63+
for s1, s2 in zip(streamlines_new, streamlines_old):
6064
assert_array_equal(s1, s2)
6165

6266
# Points and scalars
@@ -68,13 +72,24 @@ def bench_load_trk():
6872
affine_to_rasmm=np.eye(4))
6973
TrkFile(tractogram).save(trk_file)
7074

71-
mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
72-
print("Old: Loaded %d streamlines with scalars in %6.2f" % (NB_STREAMLINES, mtime_old))
73-
74-
mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)', repeat)
75-
print("New: Loaded %d streamlines with scalars in %6.2f" % (NB_STREAMLINES, mtime_new))
76-
print("Speedup of %2f" % (mtime_old/mtime_new))
75+
streamlines_old = [d[0] - 0.5
76+
for d in tv.read(trk_file, points_space="rasmm")[0]]
7777

78+
scalars_old = [d[1]
79+
for d in tv.read(trk_file, points_space="rasmm")[0]]
80+
mtime_old = measure('tv.read(trk_file, points_space="rasmm")', repeat)
81+
msg = "Old: Loaded {:,} streamlines with scalars in {:6.2f}"
82+
print(msg.format(NB_STREAMLINES, mtime_old))
83+
84+
trk = nib.streamlines.load(trk_file, lazy_load=False)
85+
scalars_new = trk.tractogram.data_per_point['scalars']
86+
mtime_new = measure('nib.streamlines.load(trk_file, lazy_load=False)',
87+
repeat)
88+
msg = "New: Loaded {:,} streamlines with scalars in {:6.2f}"
89+
print(msg.format(NB_STREAMLINES, mtime_new))
90+
print("Speedup of {:2f}".format(mtime_old / mtime_new))
91+
for s1, s2 in zip(scalars_new, scalars_old):
92+
assert_array_equal(s1, s2)
7893

7994
if __name__ == '__main__':
8095
bench_load_trk()

nibabel/streamlines/array_sequence.py

Lines changed: 61 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -80,67 +80,6 @@ def __init__(self, iterable=None, buffer_size=4):
8080

8181
coroutine.close() # Terminate coroutine.
8282

83-
def _extend_using_coroutine(self, buffer_size=4):
84-
""" Creates a coroutine allowing to append elements.
85-
86-
Parameters
87-
----------
88-
buffer_size : float, optional
89-
Size (in Mb) for memory pre-allocation.
90-
91-
Returns
92-
-------
93-
coroutine
94-
Coroutine object which expects the values to be appended to this
95-
array sequence.
96-
97-
Notes
98-
-----
99-
This method is essential for
100-
:func:`create_arraysequences_from_generator` as it allows for an
101-
efficient way of creating multiple array sequences in a hyperthreaded
102-
fashion and still benefit from the memory buffering. Whitout this
103-
method the alternative would be to use :meth:`append` which does
104-
not have such buffering mechanism and thus is at least one order of
105-
magnitude slower.
106-
"""
107-
offsets = []
108-
lengths = []
109-
110-
offset = 0 if len(self) == 0 else self._offsets[-1] + self._lengths[-1]
111-
try:
112-
first_element = True
113-
while True:
114-
e = (yield)
115-
e = np.asarray(e)
116-
if first_element:
117-
first_element = False
118-
n_rows_buffer = int(buffer_size * 1024**2 // e.nbytes)
119-
new_shape = (n_rows_buffer,) + e.shape[1:]
120-
if len(self) == 0:
121-
self._data = np.empty(new_shape, dtype=e.dtype)
122-
123-
end = offset + len(e)
124-
if end > len(self._data):
125-
# Resize needed, adding `len(e)` items plus some buffer.
126-
nb_points = len(self._data)
127-
nb_points += len(e) + n_rows_buffer
128-
self._data.resize((nb_points,) + self.common_shape)
129-
130-
offsets.append(offset)
131-
lengths.append(len(e))
132-
self._data[offset:offset + len(e)] = e
133-
offset += len(e)
134-
135-
except GeneratorExit:
136-
pass
137-
138-
self._offsets = np.concatenate([self._offsets, offsets], axis=0)
139-
self._lengths = np.concatenate([self._lengths, lengths], axis=0)
140-
141-
# Clear unused memory.
142-
self._data.resize((offset,) + self.common_shape)
143-
14483
@property
14584
def is_array_sequence(self):
14685
return True
@@ -238,6 +177,67 @@ def extend(self, elements):
238177
self._lengths = np.r_[self._lengths, elements._lengths]
239178
self._offsets = np.r_[self._offsets, offsets]
240179

180+
def _extend_using_coroutine(self, buffer_size=4):
181+
""" Creates a coroutine allowing to append elements.
182+
183+
Parameters
184+
----------
185+
buffer_size : float, optional
186+
Size (in Mb) for memory pre-allocation.
187+
188+
Returns
189+
-------
190+
coroutine
191+
Coroutine object which expects the values to be appended to this
192+
array sequence.
193+
194+
Notes
195+
-----
196+
This method is essential for
197+
:func:`create_arraysequences_from_generator` as it allows for an
198+
efficient way of creating multiple array sequences in a hyperthreaded
199+
fashion and still benefit from the memory buffering. Whitout this
200+
method the alternative would be to use :meth:`append` which does
201+
not have such buffering mechanism and thus is at least one order of
202+
magnitude slower.
203+
"""
204+
offsets = []
205+
lengths = []
206+
207+
offset = 0 if len(self) == 0 else self._offsets[-1] + self._lengths[-1]
208+
try:
209+
first_element = True
210+
while True:
211+
e = (yield)
212+
e = np.asarray(e)
213+
if first_element:
214+
first_element = False
215+
n_rows_buffer = int(buffer_size * 1024**2 // e.nbytes)
216+
new_shape = (n_rows_buffer,) + e.shape[1:]
217+
if len(self) == 0:
218+
self._data = np.empty(new_shape, dtype=e.dtype)
219+
220+
end = offset + len(e)
221+
if end > len(self._data):
222+
# Resize needed, adding `len(e)` items plus some buffer.
223+
nb_points = len(self._data)
224+
nb_points += len(e) + n_rows_buffer
225+
self._data.resize((nb_points,) + self.common_shape)
226+
227+
offsets.append(offset)
228+
lengths.append(len(e))
229+
self._data[offset:offset + len(e)] = e
230+
offset += len(e)
231+
232+
except GeneratorExit:
233+
pass
234+
235+
self._offsets = np.r_[self._offsets, offsets].astype(np.intp)
236+
self._lengths = np.r_[self._lengths, lengths].astype(np.intp)
237+
238+
# Clear unused memory.
239+
self._data.resize((offset,) + self.common_shape)
240+
241241
def copy(self):
242242
""" Creates a copy of this :class:`ArraySequence` object.
243243

nibabel/streamlines/trk.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -601,8 +601,8 @@ def _read(fileobj, header):
601601
data : tuple of ndarrays
602602
Streamline data: points, scalars, properties.
603603
points: ndarray of shape (n_pts, 3)
604-
scalars: ndarray of shape (n_pts, nb_scalars_per_per_point)
605-
properties: ndarray of shape (nb_properties_per_per_point,)
604+
scalars: ndarray of shape (n_pts, nb_scalars_per_point)
605+
properties: ndarray of shape (nb_properties_per_point,)
606606
"""
607607
i4_dtype = np.dtype(header[Field.ENDIANNESS] + "i4")
608608
f4_dtype = np.dtype(header[Field.ENDIANNESS] + "f4")
@@ -664,13 +664,6 @@ def _read(fileobj, header):
664664
def __str__(self):
665665
""" Gets a formatted string of the header of a TRK file.
666666
667-
Parameters
668-
----------
669-
fileobj : string or file-like object
670-
If string, a filename; otherwise an open file-like object
671-
pointing to TRK file (and ready to read from the beginning
672-
of the header).
673-
674667
Returns
675668
-------
676669
info : string

0 commit comments

Comments
 (0)