Skip to content

Commit 400e1cb

Browse files
committed
DOC: reformat, add to some docstrings / comments
Reformat for 78 character length; one typo. Add some extra comments.
1 parent 4554ab0 commit 400e1cb

File tree

3 files changed

+51
-42
lines changed

3 files changed

+51
-42
lines changed

nibabel/arraywriters.py

Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -414,10 +414,10 @@ def _iu2iu(self):
414414
if self._out_dtype.kind == 'u':
415415
# We're checking for a sign flip. This can only work for uint
416416
# output, because, for int output, the abs min of the type is
417-
# greater than the abs max, so the data either fit into the range
418-
# (tested for in _do_scaling), or this test can't pass
419-
# Need abs that deals with max neg ints. abs problem only arises
420-
# when all the data is set to max neg integer value
417+
# greater than the abs max, so the data either fits into the range
418+
# (tested for in _do_scaling), or this test can't pass. Need abs
419+
# that deals with max neg ints. abs problem only arises when all
420+
# the data is set to max neg integer value
421421
imax = np.iinfo(self._out_dtype).max
422422
if mx <= 0 and int_abs(mn) <= imax: # sign flip enough?
423423
# -1.0 * arr will be in scaler_dtype precision
@@ -589,7 +589,8 @@ def _iu2iu(self):
589589
super(SlopeInterArrayWriter, self)._iu2iu()
590590

591591
def _range_scale(self, in_min, in_max):
592-
""" Calculate scaling, intercept based on data range and output type """
592+
""" Calculate scaling, intercept based on data range and output type
593+
"""
593594
if in_max == in_min: # Only one number in array
594595
self.slope = 1.
595596
self.inter = in_min
@@ -604,10 +605,10 @@ def _range_scale(self, in_min, in_max):
604605
in_min, in_max = np.array([in_min, in_max], dtype=big_float)
605606
in_range = np.diff([in_min, in_max])
606607
else: # max possible (u)int range is 2**64-1 (int64, uint64)
607-
# int_to_float covers this range. On windows longdouble is the same
608-
# as double so in_range will be 2**64 - thus overestimating slope
609-
# slightly. Casting to int needed to allow in_max-in_min to be larger than
610-
# the largest (u)int value
608+
# int_to_float covers this range. On windows longdouble is the
609+
# same as double so in_range will be 2**64 - thus overestimating
610+
# slope slightly. Casting to int needed to allow in_max-in_min to
611+
# be larger than the largest (u)int value
611612
in_min, in_max = as_int(in_min), as_int(in_max)
612613
in_range = int_to_float(in_max - in_min, big_float)
613614
# Cast to float for later processing.
@@ -624,13 +625,13 @@ def _range_scale(self, in_min, in_max):
624625
# raise an error when writing
625626
out_min, out_max = shared_range(working_dtype, out_dtype)
626627
out_min, out_max = np.array((out_min, out_max), dtype = big_float)
627-
# We want maximum precision for the calculations. Casting will
628-
# not lose precision because min/max are of fp type.
628+
# We want maximum precision for the calculations. Casting will not lose
629+
# precision because min/max are of fp type.
629630
assert [v.dtype.kind for v in (out_min, out_max)] == ['f', 'f']
630631
out_range = out_max - out_min
631632
"""
632-
Think of the input values as a line starting (left) at in_min and ending
633-
(right) at in_max.
633+
Think of the input values as a line starting (left) at in_min and
634+
ending (right) at in_max.
634635
635636
The output values will be a line starting at out_min and ending at
636637
out_max.
@@ -666,20 +667,20 @@ def _range_scale(self, in_min, in_max):
666667
We can't change the range of the saved data (the whole range of the
667668
integer type) or the range of the output data (the values we input). We
668669
can change the intermediate values ``saved_data * slope`` by choosing
669-
the sign of the slope to match the in_min or in_max to the left or right
670-
end of the saved data range.
670+
the sign of the slope to match the in_min or in_max to the left or
671+
right end of the saved data range.
671672
672-
If the out_dtype is signed int, then abs(out_min) = abs(out_max) + 1 and
673-
the absolute value and therefore precision for values at the left and
674-
right of the saved data range are very similar (e.g. -128 * slope, 127 *
675-
slope respectively).
673+
If the out_dtype is signed int, then abs(out_min) = abs(out_max) + 1
674+
and the absolute value and therefore precision for values at the left
675+
and right of the saved data range are very similar (e.g. -128 * slope,
676+
127 * slope respectively).
676677
677-
If the out_dtype is unsigned int, then the absolute value at the left is
678-
0 and the precision is much higher than for the right end of the range
679-
(e.g. 0 * slope, 255 * slope).
678+
If the out_dtype is unsigned int, then the absolute value at the left
679+
is 0 and the precision is much higher than for the right end of the
680+
range (e.g. 0 * slope, 255 * slope).
680681
681-
If the out_dtype is unsigned int then we choose the sign of the slope to
682-
match the smaller of the in_min, in_max to the zero end of the saved
682+
If the out_dtype is unsigned int then we choose the sign of the slope
683+
to match the smaller of the in_min, in_max to the zero end of the saved
683684
range.
684685
"""
685686
if out_min == 0 and np.abs(in_max) < np.abs(in_min):

nibabel/casting.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,8 @@ def shared_range(flt_type, int_type):
107107
""" Min and max in float type that are >=min, <=max in integer type
108108
109109
This is not as easy as it sounds, because the float type may not be able to
110-
exactly represent the max or min integer values, so we have to find the next
111-
exactly representable floating point value to do the thresholding.
110+
exactly represent the max or min integer values, so we have to find the
111+
next exactly representable floating point value to do the thresholding.
112112
113113
Parameters
114114
----------

nibabel/tests/test_spm99analyze.py

Lines changed: 24 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,8 @@ def test_data_scaling(self):
8181
assert_true(np.all(data == data_back))
8282

8383

84-
class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, HeaderScalingMixin):
84+
class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader,
85+
HeaderScalingMixin):
8586
header_class = Spm99AnalyzeHeader
8687

8788
def test_empty(self):
@@ -210,9 +211,10 @@ def test_header_scaling(self):
210211
# For images that implement scaling, test effect of scaling
211212
#
212213
# This tests the affect of creating an image with a header containing
213-
# the scaling, then writing the image and reading again. So the scaling
214-
# can be affected by the processing of the header when creating the
215-
# image, or by interpretation of the scaling when creating the array.
214+
# the scaling, then writing the image and reading again. So the
215+
# scaling can be affected by the processing of the header when creating
216+
# the image, or by interpretation of the scaling when creating the
217+
# array.
216218
#
217219
# Analyze does not implement any scaling, but this test class is the
218220
# base class for all Analyze-derived classes, such as NIfTI
@@ -227,9 +229,12 @@ def test_header_scaling(self):
227229
if not hdr_class.has_data_intercept:
228230
return
229231
invalid_inters = (np.nan, np.inf, -np.inf)
230-
invalid_pairs = tuple(itertools.product(invalid_slopes, invalid_inters))
231-
bad_slopes_good_inter = tuple(itertools.product(invalid_slopes, (0, 1)))
232-
good_slope_bad_inters = tuple(itertools.product((1, 2), invalid_inters))
232+
invalid_pairs = tuple(
233+
itertools.product(invalid_slopes, invalid_inters))
234+
bad_slopes_good_inter = tuple(
235+
itertools.product(invalid_slopes, (0, 1)))
236+
good_slope_bad_inters = tuple(
237+
itertools.product((1, 2), invalid_inters))
233238
for slope, inter in (invalid_pairs + bad_slopes_good_inter +
234239
good_slope_bad_inters):
235240
self.assert_null_scaling(arr, slope, inter)
@@ -240,8 +245,8 @@ def _check_write_scaling(self,
240245
effective_slope,
241246
effective_inter):
242247
# Test that explicit set of slope / inter forces write of data using
243-
# this slope, inter
244-
# We use this helper function for children of the Analyze header
248+
# this slope, inter. We use this helper function for children of the
249+
# Analyze header
245250
img_class = self.image_class
246251
arr = np.arange(24, dtype=np.float32).reshape((2, 3, 4))
247252
# We're going to test rounding later
@@ -316,11 +321,12 @@ def test_int_int_scaling(self):
316321

317322
@scipy_skip
318323
def test_no_scaling(self):
319-
# Test writing image converting types when no scaling
324+
# Test writing image converting types when not calculating scaling
320325
img_class = self.image_class
321326
hdr_class = img_class.header_class
322327
hdr = hdr_class()
323328
supported_types = supported_np_types(hdr)
329+
# Any old non-default slope and intercept
324330
slope = 2
325331
inter = 10 if hdr.has_data_intercept else 0
326332
for in_dtype, out_dtype in itertools.product(
@@ -331,6 +337,7 @@ def test_no_scaling(self):
331337
arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype)
332338
img = img_class(arr, np.eye(4), hdr)
333339
img.set_data_dtype(out_dtype)
340+
# Setting the scaling means we don't calculate it later
334341
img.header.set_slope_inter(slope, inter)
335342
with np.errstate(invalid='ignore'):
336343
rt_img = bytesio_round_trip(img)
@@ -437,16 +444,17 @@ def test_mat_read(self):
437444
to_111 = np.eye(4)
438445
to_111[:3,3] = 1
439446
assert_array_equal(mats['mat'], np.dot(aff, from_111))
440-
# The M matrix does not include flips, so if we only
441-
# have the M matrix in the mat file, and we have default flipping, the
442-
# mat resulting should have a flip. The 'mat' matrix does include flips
443-
# and so should be unaffected by the flipping. If both are present we
444-
# prefer the the 'mat' matrix.
447+
# The M matrix does not include flips, so if we only have the M matrix
448+
# in the mat file, and we have default flipping, the mat resulting
449+
# should have a flip. The 'mat' matrix does include flips and so
450+
# should be unaffected by the flipping. If both are present we prefer
451+
# the the 'mat' matrix.
445452
assert_true(img.header.default_x_flip) # check the default
446453
flipper = np.diag([-1,1,1,1])
447454
assert_array_equal(mats['M'], np.dot(aff, np.dot(flipper, from_111)))
448455
mat_fileobj.seek(0)
449-
savemat(mat_fileobj, dict(M=np.diag([3,4,5,1]), mat=np.diag([6,7,8,1])))
456+
savemat(mat_fileobj,
457+
dict(M=np.diag([3,4,5,1]), mat=np.diag([6,7,8,1])))
450458
# Check we are preferring the 'mat' matrix
451459
r_img = img_klass.from_file_map(fm)
452460
assert_array_equal(r_img.get_data(), arr)

0 commit comments

Comments
 (0)