Skip to content

Commit dbf2a8d

Browse files
author
Chris Gorgolewski
committed
fixed spacing
1 parent 132b228 commit dbf2a8d

File tree

1 file changed

+14
-51
lines changed

1 file changed

+14
-51
lines changed

nibabel/tests/test_volumeutils.py

Lines changed: 14 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -99,9 +99,7 @@ def make_array(n, bytes):
9999
# Check whether file, gzip file, bz2 file reread memory from cache
100100
fname = "test.bin"
101101
with InTemporaryDirectory():
102-
for n, opener in itertools.product(
103-
(256, 1024, 2560, 25600), (open, gzip.open, BZ2File)
104-
):
102+
for n, opener in itertools.product((256, 1024, 2560, 25600), (open, gzip.open, BZ2File)):
105103
in_arr = np.arange(n, dtype=dtype)
106104
# Write array to file
107105
fobj_w = opener(fname, "wb")
@@ -297,9 +295,7 @@ def test_array_to_file():
297295
ndt = dt.newbyteorder(code)
298296
for allow_intercept in (True, False):
299297
with suppress_warnings(): # deprecated
300-
scale, intercept, mn, mx = calculate_scale(
301-
arr, ndt, allow_intercept
302-
)
298+
scale, intercept, mn, mx = calculate_scale(arr, ndt, allow_intercept)
303299
data_back = write_return(arr, str_io, ndt, 0, intercept, scale)
304300
assert_array_almost_equal(arr, data_back)
305301
# Test array-like
@@ -332,13 +328,7 @@ def test_a2f_upscale():
332328
str_io = BytesIO()
333329
# We need to provide mn, mx for function to be able to calculate upcasting
334330
array_to_file(
335-
arr,
336-
str_io,
337-
np.uint8,
338-
intercept=inter,
339-
divslope=slope,
340-
mn=info["min"],
341-
mx=info["max"],
331+
arr, str_io, np.uint8, intercept=inter, divslope=slope, mn=info["min"], mx=info["max"],
342332
)
343333
raw = array_from_file(arr.shape, np.uint8, str_io)
344334
back = apply_read_scaling(raw, slope, inter)
@@ -495,9 +485,7 @@ def test_a2f_big_scalers():
495485
# We need nan2zero=False because we can't represent 0 in the input, given
496486
# the scaling and the output range.
497487
with suppress_warnings(): # overflow
498-
array_to_file(
499-
arr, str_io, np.int8, intercept=np.float32(2 ** 120), nan2zero=False
500-
)
488+
array_to_file(arr, str_io, np.int8, intercept=np.float32(2 ** 120), nan2zero=False)
501489
data_back = array_from_file(arr.shape, np.int8, str_io)
502490
assert_array_equal(data_back, [-128, -128, 127])
503491
# Scales also if mx, mn specified? Same notes and complaints as for the test
@@ -522,9 +510,7 @@ def test_a2f_big_scalers():
522510
assert_array_equal(data_back, [-128, 0, 127])
523511
# with mn, mx specified?
524512
str_io.seek(0)
525-
array_to_file(
526-
arr, str_io, np.int8, mn=info["min"], mx=info["max"], divslope=np.float32(0.5)
527-
)
513+
array_to_file(arr, str_io, np.int8, mn=info["min"], mx=info["max"], divslope=np.float32(0.5))
528514
data_back = array_from_file(arr.shape, np.int8, str_io)
529515
assert_array_equal(data_back, [-128, 0, 127])
530516

@@ -562,11 +548,7 @@ def test_a2f_scaled_unscaled():
562548
if in_dtype in CFLOAT_TYPES and not mn_out <= nan_fill <= mx_out:
563549
with pytest.raises(ValueError):
564550
array_to_file(
565-
arr,
566-
fobj,
567-
out_dtype=out_dtype,
568-
divslope=divslope,
569-
intercept=intercept,
551+
arr, fobj, out_dtype=out_dtype, divslope=divslope, intercept=intercept,
570552
)
571553
continue
572554
with suppress_warnings():
@@ -614,9 +596,7 @@ def test_a2f_nanpos():
614596

615597
def test_a2f_bad_scaling():
616598
# Test that pathological scalers raise an error
617-
NUMERICAL_TYPES = sum(
618-
[np.sctypes[key] for key in ["int", "uint", "float", "complex"]], []
619-
)
599+
NUMERICAL_TYPES = sum([np.sctypes[key] for key in ["int", "uint", "float", "complex"]], [])
620600
for in_type, out_type, slope, inter in itertools.product(
621601
NUMERICAL_TYPES,
622602
NUMERICAL_TYPES,
@@ -687,9 +667,7 @@ def test_a2f_nan2zero_range():
687667
with pytest.raises(ValueError):
688668
write_return(arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2)
689669
# OK with nan2zero false
690-
back_arr = write_return(
691-
arr, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=False
692-
)
670+
back_arr = write_return(arr, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=False)
693671
assert_array_equal([-128, -128, -128, nan_cast], back_arr)
694672

695673

@@ -763,12 +741,8 @@ def test_apply_scaling():
763741
assert apply_read_scaling(np.int8(0), f32(-1e38), f32(0.0)).dtype == np.float64
764742
# Non-zero intercept still generates floats
765743
assert_dt_equal(apply_read_scaling(i16_arr, 1.0, 1.0).dtype, float)
766-
assert_dt_equal(
767-
apply_read_scaling(np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, float
768-
)
769-
assert_dt_equal(
770-
apply_read_scaling(np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, float
771-
)
744+
assert_dt_equal(apply_read_scaling(np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, float)
745+
assert_dt_equal(apply_read_scaling(np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, float)
772746

773747

774748
def test_apply_read_scaling_ints():
@@ -856,15 +830,9 @@ def test_best_write_scale_ftype():
856830
for dtt in IUINT_TYPES + FLOAT_TYPES:
857831
arr = np.arange(10, dtype=dtt)
858832
assert best_write_scale_ftype(arr, 1, 0) == better_float_of(dtt, np.float32)
859-
assert best_write_scale_ftype(arr, 1, 0, np.float64) == better_float_of(
860-
dtt, np.float64
861-
)
862-
assert best_write_scale_ftype(arr, np.float32(2), 0) == better_float_of(
863-
dtt, np.float32
864-
)
865-
assert best_write_scale_ftype(arr, 1, np.float32(1)) == better_float_of(
866-
dtt, np.float32
867-
)
833+
assert best_write_scale_ftype(arr, 1, 0, np.float64) == better_float_of(dtt, np.float64)
834+
assert best_write_scale_ftype(arr, np.float32(2), 0) == better_float_of(dtt, np.float32)
835+
assert best_write_scale_ftype(arr, 1, np.float32(1)) == better_float_of(dtt, np.float32)
868836
# Overflowing ints with scaling results in upcast
869837
best_vals = ((np.float32, np.float64),)
870838
if np.longdouble in OK_FLOATS:
@@ -1248,12 +1216,7 @@ def assert_rt(
12481216

12491217
# check defense against modifying data in-place
12501218
for in_cast, pre_clips, inter, slope, post_clips, nan_fill in itp(
1251-
(None, np.float32),
1252-
(None, (-1, 25)),
1253-
(0.0, 1.0),
1254-
(1.0, 0.5),
1255-
(None, (-2, 49)),
1256-
(None, 1),
1219+
(None, np.float32), (None, (-1, 25)), (0.0, 1.0), (1.0, 0.5), (None, (-2, 49)), (None, 1),
12571220
):
12581221
data = np.arange(24).astype(np.float32)
12591222
assert_rt(

0 commit comments

Comments
 (0)