diff --git a/pyproject.toml b/pyproject.toml index 9f970f47..b0065624 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -223,6 +223,7 @@ env = "PYTHONHASHSEED=0" markers = [ "random_gtab_data: Custom marker for random gtab data tests", "random_dwi_data: Custom marker for random dwi data tests", + "random_pet_data: Custom marker for random pet data tests", "random_uniform_ndim_data: Custom marker for random multi-dimensional data tests", "random_uniform_spatial_data: Custom marker for random spatial data tests", ] diff --git a/test/conftest.py b/test/conftest.py index b645e4fd..65811ede 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -297,3 +297,31 @@ def setup_random_dwi_data(request, setup_random_gtab_data): gradients, b0_thres, ) + + +@pytest.fixture(autouse=True) +def setup_random_pet_data(request): + """Automatically generate random PET data for tests.""" + marker = request.node.get_closest_marker("random_pet_data") + + n_frames = 5 + vol_size = (4, 4, 4) + midframe = np.arange(n_frames, dtype=np.float32) + 1 + total_duration = float(n_frames + 1) + if marker: + n_frames, vol_size, midframe, total_duration = marker.args + + rng = request.node.rng + + pet_dataobj, affine = _generate_random_uniform_spatial_data( + request, (*vol_size, n_frames), 0.0, 1.0 + ) + brainmask_dataobj = rng.choice([True, False], size=vol_size).astype(np.uint8) + + return ( + pet_dataobj, + affine, + brainmask_dataobj, + midframe, + total_duration, + ) diff --git a/test/test_analysis.py b/test/test_analysis.py index 65796e53..58acca70 100644 --- a/test/test_analysis.py +++ b/test/test_analysis.py @@ -152,9 +152,7 @@ def test_identify_spikes(request): fd = rng.normal(0, 5, n_samples) threshold = 2.0 - expected_indices = np.asarray( - [82, 83, 160, 179, 208, 219, 229, 233, 383, 389, 402, 421, 423, 439, 444] - ) + expected_indices = np.asarray([42, 48, 61, 80, 98, 103, 113, 143, 324, 387, 422, 436, 449]) expected_mask = np.zeros(n_samples, dtype=bool) expected_mask[expected_indices] = True diff --git a/test/test_data_pet.py b/test/test_data_pet.py index 76e99ca9..adaa032b 100644 --- a/test/test_data_pet.py +++ b/test/test_data_pet.py @@ -31,52 +31,59 @@ from nifreeze.data.pet import PET, from_nii -def test_from_nii_requires_frame_time(tmp_path): - data = np.zeros((2, 2, 2, 2), dtype=np.float32) - img = nb.Nifti1Image(data, np.eye(4)) - fname = tmp_path / "pet.nii.gz" - img.to_filename(fname) - - with pytest.raises(RuntimeError, match="frame_time must be provided"): - from_nii(fname) - +@pytest.fixture +def random_dataset(setup_random_pet_data) -> PET: + """Create a PET dataset with random data for testing.""" + + ( + pet_dataobj, + affine, + brainmask_dataobj, + midframe, + total_duration, + ) = setup_random_pet_data -def _create_dataset(): - rng = np.random.default_rng(12345) - data = rng.random((4, 4, 4, 5), dtype=np.float32) - affine = np.eye(4, dtype=np.float32) - mask = np.ones((4, 4, 4), dtype=bool) - midframe = np.array([10, 20, 30, 40, 50], dtype=np.float32) return PET( - dataobj=data, + dataobj=pet_dataobj, affine=affine, - brainmask=mask, + brainmask=brainmask_dataobj, midframe=midframe, - total_duration=60.0, + total_duration=total_duration, ) -def test_pet_set_transform_updates_motion_affines(): - dataset = _create_dataset() +@pytest.mark.random_uniform_spatial_data((2, 2, 2, 2), 0.0, 1.0) +def test_from_nii_requires_frame_time(setup_random_uniform_spatial_data, tmp_path): + data, affine = setup_random_uniform_spatial_data + img = nb.Nifti1Image(data, affine) + fname = tmp_path / "pet.nii.gz" + img.to_filename(fname) + + with pytest.raises(RuntimeError, match="frame_time must be provided"): + from_nii(fname) + + +@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0) +def test_pet_set_transform_updates_motion_affines(random_dataset): idx = 2 - data_before = np.copy(dataset.dataobj[..., idx]) + data_before = np.copy(random_dataset.dataobj[..., idx]) affine = np.eye(4) - dataset.set_transform(idx, affine) + random_dataset.set_transform(idx, affine) - np.testing.assert_allclose(dataset.dataobj[..., idx], data_before) - assert dataset.motion_affines is not None - assert len(dataset.motion_affines) == len(dataset) - assert isinstance(dataset.motion_affines[idx], Affine) - np.testing.assert_array_equal(dataset.motion_affines[idx].matrix, affine) + np.testing.assert_allclose(random_dataset.dataobj[..., idx], data_before) + assert random_dataset.motion_affines is not None + assert len(random_dataset.motion_affines) == len(random_dataset) + assert isinstance(random_dataset.motion_affines[idx], Affine) + np.testing.assert_array_equal(random_dataset.motion_affines[idx].matrix, affine) - vol, aff, time = dataset[idx] - assert aff is dataset.motion_affines[idx] + vol, aff, time = random_dataset[idx] + assert aff is random_dataset.motion_affines[idx] -def test_pet_load(tmp_path): - data = np.zeros((2, 2, 2, 2), dtype=np.float32) - affine = np.eye(4) +@pytest.mark.random_uniform_spatial_data((2, 2, 2, 2), 0.0, 1.0) +def test_pet_load(setup_random_uniform_spatial_data, tmp_path): + data, affine = setup_random_uniform_spatial_data img = nb.Nifti1Image(data, affine) fname = tmp_path / "pet.nii.gz" img.to_filename(fname) diff --git a/test/test_integration_pet.py b/test/test_integration_pet.py index 91c8fdd0..68b2a318 100644 --- a/test/test_integration_pet.py +++ b/test/test_integration_pet.py @@ -24,51 +24,57 @@ import types import numpy as np +import pytest from nifreeze.data.pet import PET from nifreeze.estimator import PETMotionEstimator -def _pet_dataset(n_frames=3): - rng = np.random.default_rng(42) - data = rng.random((2, 2, 2, n_frames), dtype=np.float32) - affine = np.eye(4, dtype=np.float32) - mask = np.ones((2, 2, 2), dtype=bool) - midframe = np.arange(n_frames, dtype=np.float32) + 1 +@pytest.fixture +def random_dataset(setup_random_pet_data) -> PET: + """Create a PET dataset with random data for testing.""" + + ( + pet_dataobj, + affine, + brainmask_dataobj, + midframe, + total_duration, + ) = setup_random_pet_data + return PET( - dataobj=data, + dataobj=pet_dataobj, affine=affine, - brainmask=mask, + brainmask=brainmask_dataobj, midframe=midframe, - total_duration=float(n_frames + 1), + total_duration=total_duration, ) -def test_lofo_split_shapes(tmp_path): - ds = _pet_dataset(4) +@pytest.mark.random_pet_data(4, (2, 2, 2), np.asarray([1.0, 2.0, 3.0, 4.0]), 5.0) +def test_lofo_split_shapes(random_dataset, tmp_path): idx = 2 - (train_data, train_times), (test_data, test_time) = ds.lofo_split(idx) - assert train_data.shape[-1] == ds.dataobj.shape[-1] - 1 - np.testing.assert_array_equal(test_data, ds.dataobj[..., idx]) - np.testing.assert_array_equal(train_times, np.delete(ds.midframe, idx)) - assert test_time == ds.midframe[idx] + (train_data, train_times), (test_data, test_time) = random_dataset.lofo_split(idx) + assert train_data.shape[-1] == random_dataset.dataobj.shape[-1] - 1 + np.testing.assert_array_equal(test_data, random_dataset.dataobj[..., idx]) + np.testing.assert_array_equal(train_times, np.delete(random_dataset.midframe, idx)) + assert test_time == random_dataset.midframe[idx] -def test_to_from_filename_roundtrip(tmp_path): - ds = _pet_dataset(3) +@pytest.mark.random_pet_data(3, (2, 2, 2), np.asarray([1.0, 2.0, 3.0]), 4.0) +def test_to_from_filename_roundtrip(random_dataset, tmp_path): out_file = tmp_path / "petdata" - ds.to_filename(out_file) + random_dataset.to_filename(out_file) assert (tmp_path / "petdata.h5").exists() loaded = PET.from_filename(tmp_path / "petdata.h5") - np.testing.assert_allclose(loaded.dataobj, ds.dataobj) - np.testing.assert_allclose(loaded.affine, ds.affine) - np.testing.assert_allclose(loaded.midframe, ds.midframe) - assert loaded.total_duration == ds.total_duration - + np.testing.assert_allclose(loaded.dataobj, random_dataset.dataobj) + np.testing.assert_allclose(loaded.affine, random_dataset.affine) + np.testing.assert_allclose(loaded.midframe, random_dataset.midframe) + assert loaded.total_duration == random_dataset.total_duration -def test_pet_motion_estimator_run(monkeypatch): - ds = _pet_dataset(3) +@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0) +def test_pet_motion_estimator_run(random_dataset, monkeypatch): class DummyModel: def __init__(self, dataset, timepoints, xlim): self.dataset = dataset @@ -76,7 +82,7 @@ def __init__(self, dataset, timepoints, xlim): def fit_predict(self, index): if index is None: return None - return np.zeros(ds.shape3d, dtype=np.float32) + return np.zeros(self.dataset.shape3d, dtype=np.float32) monkeypatch.setattr("nifreeze.estimator.PETModel", DummyModel) @@ -90,7 +96,7 @@ def run(self, cwd=None): monkeypatch.setattr("nifreeze.estimator.Registration", DummyRegistration) estimator = PETMotionEstimator(None) - affines = estimator.run(ds) - assert len(affines) == len(ds) + affines = estimator.run(random_dataset) + assert len(affines) == len(random_dataset) for mat in affines: np.testing.assert_array_equal(mat, np.eye(4)) diff --git a/test/test_model_pet.py b/test/test_model_pet.py index a4c07dd9..c4bd2c0f 100644 --- a/test/test_model_pet.py +++ b/test/test_model_pet.py @@ -28,27 +28,33 @@ from nifreeze.model.pet import PETModel -def _create_dataset(): - rng = np.random.default_rng(12345) - data = rng.random((4, 4, 4, 5), dtype=np.float32) - affine = np.eye(4, dtype=np.float32) - mask = np.ones((4, 4, 4), dtype=bool) - midframe = np.array([10, 20, 30, 40, 50], dtype=np.float32) +@pytest.fixture +def random_dataset(setup_random_pet_data) -> PET: + """Create a PET dataset with random data for testing.""" + + ( + pet_dataobj, + affine, + brainmask_dataobj, + midframe, + total_duration, + ) = setup_random_pet_data + return PET( - dataobj=data, + dataobj=pet_dataobj, affine=affine, - brainmask=mask, + brainmask=brainmask_dataobj, midframe=midframe, - total_duration=60.0, + total_duration=total_duration, ) -def test_petmodel_fit_predict(): - dataset = _create_dataset() +@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0) +def test_petmodel_fit_predict(random_dataset): model = PETModel( - dataset=dataset, - timepoints=dataset.midframe, - xlim=dataset.total_duration, + dataset=random_dataset, + timepoints=random_dataset.midframe, + xlim=random_dataset.total_duration, smooth_fwhm=0, thresh_pct=0, ) @@ -58,19 +64,19 @@ def test_petmodel_fit_predict(): assert model.is_fitted # Predict at a specific timepoint - vol = model.fit_predict(dataset.midframe[2]) - assert vol.shape == dataset.shape3d - assert vol.dtype == dataset.dataobj.dtype + vol = model.fit_predict(random_dataset.midframe[2]) + assert vol.shape == random_dataset.shape3d + assert vol.dtype == random_dataset.dataobj.dtype -def test_petmodel_invalid_init(): - dataset = _create_dataset() +@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0) +def test_petmodel_invalid_init(random_dataset): with pytest.raises(TypeError): - PETModel(dataset=dataset) + PETModel(dataset=random_dataset) -def test_petmodel_time_check(): - dataset = _create_dataset() +@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0) +def test_petmodel_time_check(random_dataset): bad_times = np.array([0, 10, 20, 30, 50], dtype=np.float32) with pytest.raises(ValueError): - PETModel(dataset=dataset, timepoints=bad_times, xlim=60.0) + PETModel(dataset=random_dataset, timepoints=bad_times, xlim=60.0)