Skip to content

Commit bda026e

Browse files
committed
RF - use full range for scaling
I was previously using the range calculated from the shared range. This was to prevent the values overflowing into inf, for float -> int save, followed by int -> float load (overflow on applying the scaling on load). However, the apply_read_scaling deals with this problem, so the full range is better (because it doesn't truncate the range) and is also easier to understand.
1 parent 82bd4ba commit bda026e

File tree

1 file changed

+38
-32
lines changed

1 file changed

+38
-32
lines changed

nibabel/arraywriters.py

Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def __init__(self, array, out_dtype=None)
2323

2424
import numpy as np
2525

26-
from .casting import shared_range, int_to_float, as_int, int_abs
26+
from .casting import int_to_float, as_int, int_abs
2727
from .volumeutils import finite_range, array_to_file
2828

2929

@@ -298,13 +298,12 @@ def _iu2iu(self):
298298
if self._out_dtype.kind == 'u':
299299
# We're checking for a sign flip. This can only work for uint
300300
# output, because, for int output, the abs min of the type is
301-
# greater than the abs max, so the data either fit into the range in
302-
# the test above, or this test won't pass
303-
shared_min, shared_max = shared_range(self.scaler_dtype,
304-
self._out_dtype)
301+
# greater than the abs max, so the data either fit into the range
302+
# (tested for in _do_scaling), or this test can't pass
305303
# Need abs that deals with max neg ints. abs problem only arises
306304
# when all the data is set to max neg integer value
307-
if mx <= 0 and int_abs(mn) <= shared_max: # sign flip enough?
305+
imax = np.iinfo(self._out_dtype).max
306+
if mx <= 0 and int_abs(mn) <= imax: # sign flip enough?
308307
# -1.0 * arr will be in scaler_dtype precision
309308
self.slope = -1.0
310309
return
@@ -313,27 +312,29 @@ def _iu2iu(self):
313312
def _range_scale(self):
314313
""" Calculate scaling based on data range and output type """
315314
mn, mx = self.finite_range() # These can be floats or integers
316-
# We need to allow for precision of the type to which we will scale
317-
# These will be floats of type scaler_dtype
318-
shared_min, shared_max = shared_range(self.scaler_dtype,
319-
self._out_dtype)
320-
# But we want maximum precision for the calculations
321-
shared_min, shared_max = np.array([shared_min, shared_max],
322-
dtype = np.longdouble)
315+
out_dtype = self._out_dtype
316+
if out_dtype.kind == 'f':
317+
t_mn_mx = np.finfo(out_dtype).min, np.finfo(out_dtype).max
318+
# But we want maximum precision for the calculations. Casting will
319+
# not lose precision because min/max are of fp type.
320+
t_min, t_max = np.array(t_mn_mx, dtype = np.longdouble)
321+
else: # (u)int
322+
t_mn_mx = np.iinfo(out_dtype).min, np.iinfo(out_dtype).max
323+
t_min, t_max = [int_to_float(v, np.longdouble) for v in t_mn_mx]
323324
if self._out_dtype.kind == 'u':
324325
if mn < 0 and mx > 0:
325326
raise WriterError('Cannot scale negative and positive '
326327
'numbers to uint without intercept')
327328
if mx <= 0: # All input numbers <= 0
328-
self.slope = mn / shared_max
329+
self.slope = mn / t_max
329330
else: # All input numbers > 0
330-
self.slope = mx / shared_max
331+
self.slope = mx / t_max
331332
return
332-
# Scaling to int. We need the bigger slope of (mn/shared_min) and
333-
# (mx/shared_max). If the mn or the max is the wrong side of 0, that
333+
# Scaling to int. We need the bigger slope of (mn/t_min) and
334+
# (mx/t_max). If the mn or the max is the wrong side of 0, that
334335
# will make these negative and so they won't worry us
335-
mx_slope = mx / shared_max
336-
mn_slope = mn / shared_min
336+
mx_slope = mx / t_max
337+
mn_slope = mn / t_min
337338
self.slope = np.max([mx_slope, mn_slope])
338339

339340

@@ -418,31 +419,26 @@ def to_fileobj(self, fileobj, order='F', nan2zero=True):
418419

419420
def _iu2iu(self):
420421
# (u)int to (u)int
421-
mn, mx = self.finite_range()
422-
shared_min, shared_max = shared_range(self.scaler_dtype,
423-
self._out_dtype)
422+
mn, mx = [as_int(v) for v in self.finite_range()]
424423
# range may be greater than the largest integer for this type.
425424
# as_int needed to work round numpy 1.4.1 int casting bug
426-
type_range = as_int(shared_max) - as_int(shared_min)
427-
mn2mx = as_int(mx) - as_int(mn)
425+
out_dtype = self._out_dtype
426+
t_min, t_max = np.iinfo(out_dtype).min, np.iinfo(out_dtype).max
427+
type_range = as_int(t_max) - as_int(t_min)
428+
mn2mx = mx - mn
428429
if mn2mx <= type_range: # offset enough?
429-
self.inter = mn - shared_min
430+
self.inter = mn - t_min
430431
return
431432
# Try slope options (sign flip) and then range scaling
432433
super(SlopeInterArrayWriter, self)._iu2iu()
433434

434435
def _range_scale(self):
435436
""" Calculate scaling, intercept based on data range and output type """
436437
mn, mx = self.finite_range() # Values of self.array.dtype type
438+
out_dtype = self._out_dtype
437439
if mx == mn: # Only one number in array
438440
self.inter = mn
439441
return
440-
# We need to allow for precision of the type to which we will scale
441-
# These will be floats of type scaler_dtype
442-
shared_min, shared_max = shared_range(self.scaler_dtype,
443-
self._out_dtype)
444-
scaled_mn2mx = np.diff(np.array([shared_min, shared_max],
445-
dtype=np.longdouble))
446442
# Straight mx-mn can overflow.
447443
if mn.dtype.kind == 'f': # Already floats
448444
# float64 and below cast correctly to longdouble. Longdouble needs
@@ -454,8 +450,18 @@ def _range_scale(self):
454450
# slightly. Casting to int needed to allow mx-mn to be larger than
455451
# the largest (u)int value
456452
mn2mx = int_to_float(as_int(mx) - as_int(mn), np.longdouble)
453+
if out_dtype.kind == 'f':
454+
# Type range, these are also floats
455+
t_mn_mx = np.finfo(out_dtype).min, np.finfo(out_dtype).max
456+
else:
457+
t_mn_mx = np.iinfo(out_dtype).min, np.iinfo(out_dtype).max
458+
t_mn_mx= [int_to_float(v, np.longdouble) for v in t_mn_mx]
459+
# We want maximum precision for the calculations. Casting will
460+
# not lose precision because min/max are of fp type.
461+
assert [v.dtype.kind for v in t_mn_mx] == ['f', 'f']
462+
scaled_mn2mx = np.diff(np.array(t_mn_mx, dtype = np.longdouble))
457463
slope = mn2mx / scaled_mn2mx
458-
self.inter = mn - shared_min * slope
464+
self.inter = mn - t_mn_mx[0] * slope
459465
self.slope = slope
460466
if not np.all(np.isfinite([self.slope, self.inter])):
461467
raise ScalingError("Slope / inter not both finite")

0 commit comments

Comments
 (0)