Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,7 @@ env = "PYTHONHASHSEED=0"
markers = [
"random_gtab_data: Custom marker for random gtab data tests",
"random_dwi_data: Custom marker for random dwi data tests",
"random_pet_data: Custom marker for random pet data tests",
"random_uniform_ndim_data: Custom marker for random multi-dimensional data tests",
"random_uniform_spatial_data: Custom marker for random spatial data tests",
]
Expand Down
28 changes: 28 additions & 0 deletions test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,3 +297,31 @@ def setup_random_dwi_data(request, setup_random_gtab_data):
gradients,
b0_thres,
)


@pytest.fixture(autouse=True)
def setup_random_pet_data(request):
"""Automatically generate random PET data for tests."""
marker = request.node.get_closest_marker("random_pet_data")

n_frames = 5
vol_size = (4, 4, 4)
midframe = np.arange(n_frames, dtype=np.float32) + 1
total_duration = float(n_frames + 1)
if marker:
n_frames, vol_size, midframe, total_duration = marker.args

rng = request.node.rng

pet_dataobj, affine = _generate_random_uniform_spatial_data(
request, (*vol_size, n_frames), 0.0, 1.0
)
brainmask_dataobj = rng.choice([True, False], size=vol_size).astype(np.uint8)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The error

   >           raise ValueError('could not interpret dimensions')
  E           ValueError: could not interpret dimensions
  
  /home/runner/work/nifreeze/nifreeze/.tox/py312/lib/python3.12/site-packages/scipy/sparse/_base.py:862: ValueError

in
https://github.com/nipreps/nifreeze/actions/runs/16487019808/job/46613619943?pr=181#step:11:3671

seems to happen because brainmask_dataobj is of type np.uint8. I used that for the sake of consistency, but the fact it should may be a boolean has been raised previously. If that is changed to bool the test at issue passes. If one instantiates the mask in the removed _create_dataset function as a np.uint8 the same error is raised.


return (
pet_dataobj,
affine,
brainmask_dataobj,
midframe,
total_duration,
)
4 changes: 1 addition & 3 deletions test/test_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,9 +152,7 @@ def test_identify_spikes(request):
fd = rng.normal(0, 5, n_samples)
threshold = 2.0

expected_indices = np.asarray(
[82, 83, 160, 179, 208, 219, 229, 233, 383, 389, 402, 421, 423, 439, 444]
)
expected_indices = np.asarray([42, 48, 61, 80, 98, 103, 113, 143, 324, 387, 422, 436, 449])
expected_mask = np.zeros(n_samples, dtype=bool)
expected_mask[expected_indices] = True

Expand Down
71 changes: 39 additions & 32 deletions test/test_data_pet.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,52 +31,59 @@
from nifreeze.data.pet import PET, from_nii


def test_from_nii_requires_frame_time(tmp_path):
data = np.zeros((2, 2, 2, 2), dtype=np.float32)
img = nb.Nifti1Image(data, np.eye(4))
fname = tmp_path / "pet.nii.gz"
img.to_filename(fname)

with pytest.raises(RuntimeError, match="frame_time must be provided"):
from_nii(fname)

@pytest.fixture
def random_dataset(setup_random_pet_data) -> PET:
"""Create a PET dataset with random data for testing."""

(
pet_dataobj,
affine,
brainmask_dataobj,
midframe,
total_duration,
) = setup_random_pet_data

def _create_dataset():
rng = np.random.default_rng(12345)
data = rng.random((4, 4, 4, 5), dtype=np.float32)
affine = np.eye(4, dtype=np.float32)
mask = np.ones((4, 4, 4), dtype=bool)
midframe = np.array([10, 20, 30, 40, 50], dtype=np.float32)
return PET(
dataobj=data,
dataobj=pet_dataobj,
affine=affine,
brainmask=mask,
brainmask=brainmask_dataobj,
midframe=midframe,
total_duration=60.0,
total_duration=total_duration,
)


def test_pet_set_transform_updates_motion_affines():
dataset = _create_dataset()
@pytest.mark.random_uniform_spatial_data((2, 2, 2, 2), 0.0, 1.0)
def test_from_nii_requires_frame_time(setup_random_uniform_spatial_data, tmp_path):
data, affine = setup_random_uniform_spatial_data
img = nb.Nifti1Image(data, affine)
fname = tmp_path / "pet.nii.gz"
img.to_filename(fname)

with pytest.raises(RuntimeError, match="frame_time must be provided"):
from_nii(fname)


@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0)
def test_pet_set_transform_updates_motion_affines(random_dataset):
idx = 2
data_before = np.copy(dataset.dataobj[..., idx])
data_before = np.copy(random_dataset.dataobj[..., idx])

affine = np.eye(4)
dataset.set_transform(idx, affine)
random_dataset.set_transform(idx, affine)

np.testing.assert_allclose(dataset.dataobj[..., idx], data_before)
assert dataset.motion_affines is not None
assert len(dataset.motion_affines) == len(dataset)
assert isinstance(dataset.motion_affines[idx], Affine)
np.testing.assert_array_equal(dataset.motion_affines[idx].matrix, affine)
np.testing.assert_allclose(random_dataset.dataobj[..., idx], data_before)
assert random_dataset.motion_affines is not None
assert len(random_dataset.motion_affines) == len(random_dataset)
assert isinstance(random_dataset.motion_affines[idx], Affine)
np.testing.assert_array_equal(random_dataset.motion_affines[idx].matrix, affine)

vol, aff, time = dataset[idx]
assert aff is dataset.motion_affines[idx]
vol, aff, time = random_dataset[idx]
assert aff is random_dataset.motion_affines[idx]


def test_pet_load(tmp_path):
data = np.zeros((2, 2, 2, 2), dtype=np.float32)
affine = np.eye(4)
@pytest.mark.random_uniform_spatial_data((2, 2, 2, 2), 0.0, 1.0)
def test_pet_load(setup_random_uniform_spatial_data, tmp_path):
data, affine = setup_random_uniform_spatial_data
img = nb.Nifti1Image(data, affine)
fname = tmp_path / "pet.nii.gz"
img.to_filename(fname)
Expand Down
64 changes: 35 additions & 29 deletions test/test_integration_pet.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,59 +24,65 @@
import types

import numpy as np
import pytest

from nifreeze.data.pet import PET
from nifreeze.estimator import PETMotionEstimator


def _pet_dataset(n_frames=3):
rng = np.random.default_rng(42)
data = rng.random((2, 2, 2, n_frames), dtype=np.float32)
affine = np.eye(4, dtype=np.float32)
mask = np.ones((2, 2, 2), dtype=bool)
midframe = np.arange(n_frames, dtype=np.float32) + 1
@pytest.fixture
def random_dataset(setup_random_pet_data) -> PET:
"""Create a PET dataset with random data for testing."""

(
pet_dataobj,
affine,
brainmask_dataobj,
midframe,
total_duration,
) = setup_random_pet_data

return PET(
dataobj=data,
dataobj=pet_dataobj,
affine=affine,
brainmask=mask,
brainmask=brainmask_dataobj,
midframe=midframe,
total_duration=float(n_frames + 1),
total_duration=total_duration,
)


def test_lofo_split_shapes(tmp_path):
ds = _pet_dataset(4)
@pytest.mark.random_pet_data(4, (2, 2, 2), np.asarray([1.0, 2.0, 3.0, 4.0]), 5.0)
def test_lofo_split_shapes(random_dataset, tmp_path):
idx = 2
(train_data, train_times), (test_data, test_time) = ds.lofo_split(idx)
assert train_data.shape[-1] == ds.dataobj.shape[-1] - 1
np.testing.assert_array_equal(test_data, ds.dataobj[..., idx])
np.testing.assert_array_equal(train_times, np.delete(ds.midframe, idx))
assert test_time == ds.midframe[idx]
(train_data, train_times), (test_data, test_time) = random_dataset.lofo_split(idx)
assert train_data.shape[-1] == random_dataset.dataobj.shape[-1] - 1
np.testing.assert_array_equal(test_data, random_dataset.dataobj[..., idx])
np.testing.assert_array_equal(train_times, np.delete(random_dataset.midframe, idx))
assert test_time == random_dataset.midframe[idx]


def test_to_from_filename_roundtrip(tmp_path):
ds = _pet_dataset(3)
@pytest.mark.random_pet_data(3, (2, 2, 2), np.asarray([1.0, 2.0, 3.0]), 4.0)
def test_to_from_filename_roundtrip(random_dataset, tmp_path):
out_file = tmp_path / "petdata"
ds.to_filename(out_file)
random_dataset.to_filename(out_file)
assert (tmp_path / "petdata.h5").exists()
loaded = PET.from_filename(tmp_path / "petdata.h5")
np.testing.assert_allclose(loaded.dataobj, ds.dataobj)
np.testing.assert_allclose(loaded.affine, ds.affine)
np.testing.assert_allclose(loaded.midframe, ds.midframe)
assert loaded.total_duration == ds.total_duration

np.testing.assert_allclose(loaded.dataobj, random_dataset.dataobj)
np.testing.assert_allclose(loaded.affine, random_dataset.affine)
np.testing.assert_allclose(loaded.midframe, random_dataset.midframe)
assert loaded.total_duration == random_dataset.total_duration

def test_pet_motion_estimator_run(monkeypatch):
ds = _pet_dataset(3)

@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0)
def test_pet_motion_estimator_run(random_dataset, monkeypatch):
class DummyModel:
def __init__(self, dataset, timepoints, xlim):
self.dataset = dataset

def fit_predict(self, index):
if index is None:
return None
return np.zeros(ds.shape3d, dtype=np.float32)
return np.zeros(self.dataset.shape3d, dtype=np.float32)

monkeypatch.setattr("nifreeze.estimator.PETModel", DummyModel)

Expand All @@ -90,7 +96,7 @@ def run(self, cwd=None):
monkeypatch.setattr("nifreeze.estimator.Registration", DummyRegistration)

estimator = PETMotionEstimator(None)
affines = estimator.run(ds)
assert len(affines) == len(ds)
affines = estimator.run(random_dataset)
assert len(affines) == len(random_dataset)
for mat in affines:
np.testing.assert_array_equal(mat, np.eye(4))
52 changes: 29 additions & 23 deletions test/test_model_pet.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,27 +28,33 @@
from nifreeze.model.pet import PETModel


def _create_dataset():
rng = np.random.default_rng(12345)
data = rng.random((4, 4, 4, 5), dtype=np.float32)
affine = np.eye(4, dtype=np.float32)
mask = np.ones((4, 4, 4), dtype=bool)
midframe = np.array([10, 20, 30, 40, 50], dtype=np.float32)
@pytest.fixture
def random_dataset(setup_random_pet_data) -> PET:
"""Create a PET dataset with random data for testing."""

(
pet_dataobj,
affine,
brainmask_dataobj,
midframe,
total_duration,
) = setup_random_pet_data

return PET(
dataobj=data,
dataobj=pet_dataobj,
affine=affine,
brainmask=mask,
brainmask=brainmask_dataobj,
midframe=midframe,
total_duration=60.0,
total_duration=total_duration,
)


def test_petmodel_fit_predict():
dataset = _create_dataset()
@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0)
def test_petmodel_fit_predict(random_dataset):
model = PETModel(
dataset=dataset,
timepoints=dataset.midframe,
xlim=dataset.total_duration,
dataset=random_dataset,
timepoints=random_dataset.midframe,
xlim=random_dataset.total_duration,
smooth_fwhm=0,
thresh_pct=0,
)
Expand All @@ -58,19 +64,19 @@ def test_petmodel_fit_predict():
assert model.is_fitted

# Predict at a specific timepoint
vol = model.fit_predict(dataset.midframe[2])
assert vol.shape == dataset.shape3d
assert vol.dtype == dataset.dataobj.dtype
vol = model.fit_predict(random_dataset.midframe[2])
assert vol.shape == random_dataset.shape3d
assert vol.dtype == random_dataset.dataobj.dtype


def test_petmodel_invalid_init():
dataset = _create_dataset()
@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0)
def test_petmodel_invalid_init(random_dataset):
with pytest.raises(TypeError):
PETModel(dataset=dataset)
PETModel(dataset=random_dataset)


def test_petmodel_time_check():
dataset = _create_dataset()
@pytest.mark.random_pet_data(5, (4, 4, 4), np.asarray([10.0, 20.0, 30.0, 40.0, 50.0]), 60.0)
def test_petmodel_time_check(random_dataset):
bad_times = np.array([0, 10, 20, 30, 50], dtype=np.float32)
with pytest.raises(ValueError):
PETModel(dataset=dataset, timepoints=bad_times, xlim=60.0)
PETModel(dataset=random_dataset, timepoints=bad_times, xlim=60.0)
Loading