Skip to content

Commit 31f3583

Browse files
committed
adding parameterised tests for all stripe algorithms using zenodo data
1 parent 7a5ca45 commit 31f3583

File tree

9 files changed

+233
-57
lines changed

9 files changed

+233
-57
lines changed

.scripts/download_zenodo.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,15 @@ def calculate_md5(filename):
1818

1919
def download_zenodo_files(output_dir: Path):
2020
"""
21-
Download all files from Zenodo record 14833590 and verify their checksums.
21+
Download all files from Zenodo record 14938787 and verify their checksums.
2222
2323
Args:
2424
output_dir: Directory where files should be downloaded
2525
"""
2626
try:
27-
print("Fetching files from Zenodo record 14833590...")
27+
print("Fetching files from Zenodo record 14938787...")
2828
with urllib.request.urlopen(
29-
"https://zenodo.org/api/records/14833590"
29+
"https://zenodo.org/api/records/14938787"
3030
) as response:
3131
data = json.loads(response.read())
3232

httomolibgpu/misc/morph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def data_resampler(
112112
113113
Parameters
114114
----------
115-
data : cp.ndarray
115+
data : cp.ndarray
116116
3d cupy array.
117117
newshape : list
118118
2d list that defines the 2D slice shape of new shape data.

httomolibgpu/prep/stripe.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -317,9 +317,8 @@ def _rs_dead(sinogram, snr, size, matindex, norm=True):
317317
ids = cp.searchsorted(listx, listxmiss)
318318
weights = (listxmiss - listx[ids - 1]) / (listx[ids] - listx[ids - 1])
319319
# direct interpolation without making an extra copy
320-
sinogram[:, listxmiss] = (
321-
sinogram[:, listx[ids - 1]] +
322-
weights * (sinogram[:, listx[ids]] - sinogram[:, listx[ids - 1]])
320+
sinogram[:, listxmiss] = sinogram[:, listx[ids - 1]] + weights * (
321+
sinogram[:, listx[ids]] - sinogram[:, listx[ids - 1]]
323322
)
324323

325324
# Remove residual stripes
@@ -416,7 +415,7 @@ def raven_filter(
416415
# Removing padding
417416
data = data[pad_y : height - pad_y, :, pad_x : width - pad_x].real
418417

419-
return data
418+
return cp.require(data, requirements="C")
420419

421420

422421
def _create_matindex(nrow, ncol):

httomolibgpu/recon/algorithm.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ def FBP(
8383
out some undesirable artifacts. The values outside the given diameter will be set to zero.
8484
It is recommended to keep the value in the range [0.7-1.0].
8585
neglog: bool
86-
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
87-
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
86+
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
87+
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
8888
gpu_id : int
8989
A GPU device index to perform operation on.
9090
@@ -137,8 +137,8 @@ def LPRec(
137137
out some undesirable artifacts. The values outside the given diameter will be set to zero.
138138
It is recommended to keep the value in the range [0.7-1.0].
139139
neglog: bool
140-
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
141-
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
140+
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
141+
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
142142
143143
Returns
144144
-------
@@ -189,8 +189,8 @@ def SIRT(
189189
nonnegativity : bool, optional
190190
Impose nonnegativity constraint on reconstructed image.
191191
neglog: bool
192-
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
193-
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
192+
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
193+
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
194194
gpu_id : int, optional
195195
A GPU device index to perform operation on.
196196
@@ -254,8 +254,8 @@ def CGLS(
254254
nonnegativity : bool, optional
255255
Impose nonnegativity constraint on reconstructed image.
256256
neglog: bool
257-
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
258-
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
257+
Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
258+
assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
259259
gpu_id : int, optional
260260
A GPU device index to perform operation on.
261261
@@ -277,6 +277,7 @@ def CGLS(
277277
cp._default_memory_pool.free_all_blocks()
278278
return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
279279

280+
280281
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
281282
def _instantiate_direct_recon_class(
282283
data: cp.ndarray,
@@ -352,10 +353,11 @@ def _instantiate_iterative_recon_class(
352353
)
353354
return RecToolsCP
354355

356+
355357
def _take_neg_log(data: cp.ndarray) -> cp.ndarray:
356358
"""Taking negative log"""
357-
data[data<=0] = 1
359+
data[data <= 0] = 1
358360
data = -cp.log(data)
359361
data[cp.isnan(data)] = 6.0
360362
data[cp.isinf(data)] = 0
361-
return data
363+
return data

tests/test_misc/test_morph.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from numpy.testing import assert_allclose
77
from httomolibgpu.misc.morph import sino_360_to_180, data_resampler
88

9+
910
@pytest.mark.parametrize(
1011
"overlap, rotation",
1112
[

tests/test_prep/stripe_cpu_reference.py

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2,26 +2,29 @@
22
import pyfftw
33
import pyfftw.interfaces.numpy_fft as fft
44

5+
56
def raven_filter_numpy(
6-
sinogram,
7-
uvalue: int = 20,
8-
nvalue: int = 4,
9-
vvalue: int = 2,
10-
pad_y: int = 20,
11-
pad_x: int = 20,
12-
pad_method: str = "edge"):
13-
7+
sinogram,
8+
uvalue: int = 20,
9+
nvalue: int = 4,
10+
vvalue: int = 2,
11+
pad_y: int = 20,
12+
pad_x: int = 20,
13+
pad_method: str = "edge",
14+
):
1415
# Parameters
1516
v0 = vvalue
1617
n = nvalue
1718
u0 = uvalue
1819

1920
# Make a padded copy
20-
sinogram_padded = np.pad(sinogram, ((pad_y,pad_y), (0, 0), (pad_x,pad_x)), pad_method)
21-
21+
sinogram_padded = np.pad(
22+
sinogram, ((pad_y, pad_y), (0, 0), (pad_x, pad_x)), pad_method
23+
)
24+
2225
# Size
2326
height, images, width = sinogram_padded.shape
24-
27+
2528
# Generate filter function
2629
centerx = np.ceil(width / 2.0) - 1.0
2730
centery = np.int16(np.ceil(height / 2.0) - 1)
@@ -32,19 +35,19 @@ def raven_filter_numpy(
3235
filtershapepad2d = np.zeros((row2 - row1, filtershape.size))
3336
filtershapepad2d[:] = np.float64(filtershape)
3437
filtercomplex = filtershapepad2d + filtershapepad2d * 1j
35-
38+
3639
# Generate filter objects
37-
a = pyfftw.empty_aligned((height, images, width), dtype='complex128', n=16)
38-
b = pyfftw.empty_aligned((height, images, width), dtype='complex128', n=16)
39-
c = pyfftw.empty_aligned((height, images, width), dtype='complex128', n=16)
40-
d = pyfftw.empty_aligned((height, images, width), dtype='complex128', n=16)
41-
fft_object = pyfftw.FFTW(a, b, axes=(0, 2))
42-
ifft_object = pyfftw.FFTW(c, d, axes=(0, 2), direction='FFTW_BACKWARD')
43-
40+
a = pyfftw.empty_aligned((height, images, width), dtype="complex128", n=16)
41+
b = pyfftw.empty_aligned((height, images, width), dtype="complex128", n=16)
42+
c = pyfftw.empty_aligned((height, images, width), dtype="complex128", n=16)
43+
d = pyfftw.empty_aligned((height, images, width), dtype="complex128", n=16)
44+
fft_object = pyfftw.FFTW(a, b, axes=(0, 2))
45+
ifft_object = pyfftw.FFTW(c, d, axes=(0, 2), direction="FFTW_BACKWARD")
46+
4447
sino = fft.fftshift(fft_object(sinogram_padded), axes=(0, 2))
4548
for m in range(sino.shape[1]):
4649
sino[row1:row2, m] = sino[row1:row2, m] * filtercomplex
4750
sino = ifft_object(fft.ifftshift(sino, axes=(0, 2)))
48-
sinogram = sino[pad_y:height-pad_y, :, pad_x:width-pad_x]
51+
sinogram = sino[pad_y : height - pad_y, :, pad_x : width - pad_x]
4952

50-
return sinogram.real
53+
return sinogram.real

tests/test_prep/test_stripe.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from numpy.testing import assert_allclose
1515
from .stripe_cpu_reference import raven_filter_numpy
1616

17+
1718
def test_remove_stripe_ti_on_data(data, flats, darks):
1819
# --- testing the CuPy implementation from TomoCupy ---#
1920
data = normalize(data, flats, darks, cutoff=10, minus_log=True)
@@ -53,6 +54,7 @@ def test_remove_stripe_ti_on_data(data, flats, darks):
5354
# np.median(corrected_data), np.median(corrected_host_data), rtol=1e-6
5455
# )
5556

57+
5658
def test_stripe_removal_sorting_cupy(data, flats, darks):
5759
# --- testing the CuPy port of TomoPy's implementation ---#
5860
data = normalize(data, flats, darks, cutoff=10, minus_log=True)
@@ -67,6 +69,7 @@ def test_stripe_removal_sorting_cupy(data, flats, darks):
6769
assert corrected_data.dtype == np.float32
6870
assert corrected_data.flags.c_contiguous
6971

72+
7073
def test_stripe_raven_cupy(data, flats, darks):
7174
# --- testing the CuPy port of TomoPy's implementation ---#
7275

@@ -82,13 +85,16 @@ def test_stripe_raven_cupy(data, flats, darks):
8285
assert data_after_raven_gpu.dtype == np.float32
8386
assert data_after_raven_gpu.shape == data_after_raven_cpu.shape
8487

88+
8589
@pytest.mark.parametrize("uvalue", [20, 50, 100])
8690
@pytest.mark.parametrize("nvalue", [2, 4, 6])
8791
@pytest.mark.parametrize("vvalue", [2, 4])
8892
@pytest.mark.parametrize("pad_x", [0, 10, 20])
8993
@pytest.mark.parametrize("pad_y", [0, 10, 20])
9094
@cp.testing.numpy_cupy_allclose(rtol=0, atol=3e-01)
91-
def test_stripe_raven_parameters_cupy(ensure_clean_memory, xp, uvalue, nvalue, vvalue, pad_x, pad_y):
95+
def test_stripe_raven_parameters_cupy(
96+
ensure_clean_memory, xp, uvalue, nvalue, vvalue, pad_x, pad_y
97+
):
9298
# because it's random, we explicitly seed and use numpy only, to match the data
9399
np.random.seed(12345)
94100
data = np.random.random_sample(size=(256, 5, 512)).astype(np.float32) * 2.0 + 0.001
@@ -97,14 +103,15 @@ def test_stripe_raven_parameters_cupy(ensure_clean_memory, xp, uvalue, nvalue, v
97103
if xp.__name__ == "numpy":
98104
results = raven_filter_numpy(
99105
data, uvalue=uvalue, nvalue=nvalue, vvalue=vvalue, pad_x=pad_x, pad_y=pad_y
100-
).astype(np.float32)
106+
).astype(np.float32)
101107
else:
102108
results = raven_filter(
103109
data, uvalue=uvalue, nvalue=nvalue, vvalue=vvalue, pad_x=pad_x, pad_y=pad_y
104110
).get()
105111

106112
return xp.asarray(results)
107113

114+
108115
@pytest.mark.perf
109116
def test_stripe_removal_sorting_cupy_performance(ensure_clean_memory):
110117
data_host = (
@@ -154,6 +161,7 @@ def test_remove_stripe_ti_performance(ensure_clean_memory):
154161

155162
assert "performance in ms" == duration_ms
156163

164+
157165
@pytest.mark.perf
158166
def test_raven_filter_performance(ensure_clean_memory):
159167
data_host = (
@@ -178,6 +186,7 @@ def test_raven_filter_performance(ensure_clean_memory):
178186

179187
assert "performance in ms" == duration_ms
180188

189+
181190
def test_remove_all_stripe_on_data(data, flats, darks):
182191
# --- testing the CuPy implementation from TomoCupy ---#
183192
data = normalize(data, flats, darks, cutoff=10, minus_log=True)
@@ -190,8 +199,12 @@ def test_remove_all_stripe_on_data(data, flats, darks):
190199
)
191200
assert_allclose(np.median(data_after_stripe_removal), 0.015338, rtol=1e-04)
192201
assert_allclose(np.max(data_after_stripe_removal), 2.298123, rtol=1e-05)
193-
assert_allclose(np.median(data_after_stripe_removal, axis=(1, 2)).sum(), 2.788046, rtol=1e-6)
194-
assert_allclose(np.median(data_after_stripe_removal, axis=(0, 1)).sum(), 28.661312, rtol=1e-6)
202+
assert_allclose(
203+
np.median(data_after_stripe_removal, axis=(1, 2)).sum(), 2.788046, rtol=1e-6
204+
)
205+
assert_allclose(
206+
np.median(data_after_stripe_removal, axis=(0, 1)).sum(), 28.661312, rtol=1e-6
207+
)
195208

196209
data = None #: free up GPU memory
197210
# make sure the output is float32

zenodo-tests/conftest.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,22 @@ def i12_dataset3(i12_dataset3_file):
8989
)
9090

9191

92+
@pytest.fixture(scope="session")
93+
def i12_dataset4_file(test_data_path):
94+
in_file = os.path.join(test_data_path, "i12_dataset4.npz")
95+
return np.load(in_file)
96+
97+
98+
@pytest.fixture
99+
def i12_dataset4(i12_dataset4_file):
100+
return (
101+
cp.asarray(i12_dataset4_file["projdata"]),
102+
i12_dataset4_file["angles"],
103+
cp.asarray(i12_dataset4_file["flats"]),
104+
cp.asarray(i12_dataset4_file["darks"]),
105+
)
106+
107+
92108
@pytest.fixture(scope="session")
93109
def i13_dataset1_file(test_data_path):
94110
in_file = os.path.join(test_data_path, "i13_dataset1.npz")

0 commit comments

Comments
 (0)