Skip to content

Commit 0a1aaab

Browse files
committed
RF+TST - use apply scaling upcast for image load
Use safer apply_read_scaling routine to - er - apply the scaling from the header to the read data. Make tests to check for upcasting. The tests revealed that the upcasting was mostly happening already due to accidental float casting when getting the slope and intercept for nifti1 and spm2analyze. Expand tests of the underly int_scinter_ftype routine
1 parent fc170c4 commit 0a1aaab

File tree

6 files changed

+66
-24
lines changed

6 files changed

+66
-24
lines changed

nibabel/analyze.py

Lines changed: 6 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,8 @@
8585
import numpy as np
8686

8787
from .volumeutils import (native_code, swapped_code, make_dt_codes, allopen,
88-
shape_zoom_affine, array_from_file, seek_tell)
88+
shape_zoom_affine, array_from_file, seek_tell,
89+
apply_read_scaling)
8990
from .arraywriters import make_array_writer, get_slope_inter, WriterError
9091
from .wrapstruct import WrapStruct
9192
from .spatialimages import (HeaderDataError, HeaderTypeError,
@@ -485,24 +486,10 @@ def data_from_fileobj(self, fileobj):
485486
data = self.raw_data_from_fileobj(fileobj)
486487
# get scalings from header. Value of None means not present in header
487488
slope, inter = self.get_slope_inter()
488-
if slope is None or (slope==1.0 and (inter is None or inter == 0)):
489-
return data
490-
# in-place multiplication and addition on integer types leads to
491-
# integer output types, and disastrous integer rounding.
492-
# We'd like to do inplace if we can, to save memory
493-
is_flt = data.dtype.kind in 'fc'
494-
if slope != 1.0:
495-
if is_flt:
496-
data *= slope
497-
else:
498-
data = data * slope
499-
is_flt = True
500-
if inter:
501-
if is_flt:
502-
data += inter
503-
else:
504-
data = data + inter
505-
return data
489+
slope = 1.0 if slope is None else slope
490+
inter = 0.0 if inter is None else inter
491+
# Upcast as necessary for big slopes, intercepts
492+
return apply_read_scaling(data, slope, inter)
506493

507494
def data_to_fileobj(self, data, fileobj):
508495
''' Write `data` to `fileobj`, maybe modifying `self`

nibabel/nifti1.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -825,6 +825,8 @@ def get_slope_inter(self):
825825
>>> hdr.get_slope_inter()
826826
(1.0, None)
827827
'''
828+
# Note that we are returning float (float64) scalefactors and
829+
# intercepts, although they are stored as np.float32.
828830
scale = float(self['scl_slope'])
829831
dc_offset = float(self['scl_inter'])
830832
if scale == 0 or not np.isfinite(scale):

nibabel/tests/test_nifti1.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,21 @@ def test_from_eg_file(self):
6262
assert_equal(hdr['magic'], asbytes('ni1'))
6363
assert_equal(hdr['sizeof_hdr'], 348)
6464

65+
def test_big_scaling(self):
66+
# Test that upcasting works for huge scalefactors
67+
# See tests for apply_read_scaling in test_utils
68+
hdr = self.header_class()
69+
hdr.set_data_shape((2,1,1))
70+
hdr.set_data_dtype(np.int16)
71+
sio = BytesIO()
72+
dtt = np.float32
73+
# This will generate a huge scalefactor
74+
finf = np.finfo(dtt)
75+
data = np.array([finf.min, finf.max], dtype=dtt)[:,None, None]
76+
hdr.data_to_fileobj(data, sio)
77+
data_back = hdr.data_from_fileobj(sio)
78+
assert_true(np.allclose(data, data_back))
79+
6580
def test_nifti_log_checks(self):
6681
# in addition to analyze header checks
6782
HC = self.header_class

nibabel/tests/test_spm99analyze.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,20 @@ def test_scaling(self):
5353
data_back2 = hdr.data_from_fileobj(S3)
5454
assert_array_equal(data_back, data_back2, 4)
5555

56+
def test_big_scaling(self):
57+
# Test that upcasting works for huge scalefactors
58+
# See tests for apply_read_scaling in test_utils
59+
hdr = self.header_class()
60+
hdr.set_data_shape((1,1,1))
61+
hdr.set_data_dtype(np.int16)
62+
sio = BytesIO()
63+
dtt = np.float32
64+
# This will generate a huge scalefactor
65+
data = np.array([np.finfo(dtt).max], dtype=dtt)[:,None, None]
66+
hdr.data_to_fileobj(data, sio)
67+
data_back = hdr.data_from_fileobj(sio)
68+
assert_true(np.allclose(data, data_back))
69+
5670
def test_origin_checks(self):
5771
HC = self.header_class
5872
# origin

nibabel/tests/test_utils.py

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@
2828
shape_zoom_affine,
2929
rec2dict)
3030

31+
from ..casting import flt2nmant, floor_log2
32+
3133
from numpy.testing import (assert_array_almost_equal,
3234
assert_array_equal)
3335

@@ -233,16 +235,28 @@ def test_apply_scaling():
233235
assert_true(apply_read_scaling(arr) is arr)
234236
assert_true(apply_read_scaling(arr, np.float64(1.0)) is arr)
235237
assert_true(apply_read_scaling(arr, inter=np.float64(0)) is arr)
236-
# Check upcast
238+
f32, f64 = np.float32, np.float64
239+
f32_arr = np.zeros((1,), dtype=f32)
240+
i16_arr = np.zeros((1,), dtype=np.int16)
241+
# Check float upcast (not the normal numpy scalar rule)
242+
# This is the normal rule - no upcast from scalar
243+
assert_equal((f32_arr * f64(1)).dtype, np.float32)
244+
assert_equal((f32_arr + f64(1)).dtype, np.float32)
245+
# The function does upcast though
237246
ret = apply_read_scaling(np.float32(0), np.float64(2))
238247
assert_equal(ret.dtype, np.float64)
239248
ret = apply_read_scaling(np.float32(0), inter=np.float64(2))
240249
assert_equal(ret.dtype, np.float64)
241250
# Check integer inf upcast
242-
arr = np.zeros((1,), dtype=np.int32)
243-
big = np.array([np.finfo(np.float32).max], dtype=np.float32)
244-
assert_equal(apply_read_scaling(arr, big).dtype, np.float64)
245-
assert_equal(apply_read_scaling(arr, 1.0, big).dtype, np.float64)
251+
big = f32(np.finfo(f32).max)
252+
# Normally this would not upcast
253+
assert_equal((i16_arr * big).dtype, np.float32)
254+
# An equivalent case is a little hard to find for the intercept
255+
big_delta = np.float32(2**(floor_log2(big)-flt2nmant(np.float32)))
256+
assert_equal((i16_arr * big_delta + big).dtype, np.float32)
257+
# Upcasting does occur with this routine
258+
assert_equal(apply_read_scaling(i16_arr, big).dtype, np.float64)
259+
assert_equal(apply_read_scaling(i16_arr, big_delta, big).dtype, np.float64)
246260
assert_equal(apply_read_scaling(np.int8(0), -1.0, 0.0).dtype, np.float32)
247261
assert_equal(apply_read_scaling(np.int8(0), 1e38, 0.0).dtype, np.float64)
248262
assert_equal(apply_read_scaling(np.int8(0), -1e38, 0.0).dtype, np.float64)

nibabel/volumeutils.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -915,6 +915,16 @@ def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32):
915915
True
916916
>>> int_scinter_ftype(np.int8, 1e38, 0.0) == np.float64
917917
True
918+
919+
Notes
920+
-----
921+
It is difficult to make floats overflow with just addition because the
922+
deltas are so large at the extremes of floating point. For example::
923+
924+
>>> arr = np.array([np.finfo(np.float32).max], dtype=np.float32)
925+
>>> res = arr + np.iinfo(np.int16).max
926+
>>> arr == res
927+
array([ True], dtype=bool)
918928
"""
919929
floats = np.sctypes['float']
920930
def_ind = floats.index(default)

0 commit comments

Comments
 (0)