Skip to content

Commit 79936ea

Browse files
authored
Merge pull request #99 from rmarkello/map_civet
[ENH] Adds function to map CIVET to FreeSurfer
2 parents e8e3aaa + e48e5e9 commit 79936ea

File tree

7 files changed

+294
-9
lines changed

7 files changed

+294
-9
lines changed

docs/api.rst

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,7 @@ Functions to download atlases and templates
149149
:toctree: generated/
150150

151151
fetch_cammoun2012
152+
fetch_civet
152153
fetch_conte69
153154
fetch_fsaverage
154155
fetch_pauli2018
@@ -196,6 +197,24 @@ Functions to generate (pseudo-random) datasets
196197
spin_data
197198
spin_parcels
198199

200+
.. _ref_civet:
201+
202+
:mod:`netneurotools.civet` - CIVET compatibility functions
203+
----------------------------------------------------------
204+
205+
.. automodule:: netneurotools.civet
206+
:no-members:
207+
:no-inherited-members:
208+
209+
.. currentmodule:: netneurotools.civet
210+
211+
.. autosummary::
212+
:template: function.rst
213+
:toctree: generated/
214+
215+
read_civet
216+
civet_to_freesurfer
217+
199218
.. _ref_utils:
200219

201220
:mod:`netneurotools.utils` - Miscellaneous, grab bag utilities

netneurotools/civet.py

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
Functions for working with CIVET data (ugh)
4+
"""
5+
6+
import nibabel as nib
7+
import numpy as np
8+
from scipy.interpolate import griddata
9+
10+
from .datasets import fetch_civet, fetch_fsaverage
11+
12+
_MNI305to152 = np.array([[0.9975, -0.0073, 0.0176, -0.0429],
13+
[0.0146, 1.0009, -0.0024, 1.5496],
14+
[-0.0130, -0.0093, 0.9971, 1.1840],
15+
[0.0000, 0.0000, 0.0000, 1.0000]])
16+
17+
18+
def read_civet(fname):
19+
"""
20+
Reads a CIVET-style .obj geometry file
21+
22+
Parameters
23+
----------
24+
fname : str or os.PathLike
25+
Filepath to .obj file
26+
27+
Returns
28+
-------
29+
vertices : (N, 3)
30+
triangles : (T, 3)
31+
"""
32+
33+
k, polygons = 0, []
34+
with open(fname, 'r') as src:
35+
n_vert = int(src.readline().split()[6])
36+
vertices = np.zeros((n_vert, 3))
37+
for i, line in enumerate(src):
38+
if i < n_vert:
39+
vertices[i] = [float(i) for i in line.split()]
40+
elif i >= (2 * n_vert) + 5:
41+
if not line.strip():
42+
k = 1
43+
elif k == 1:
44+
polygons.extend([int(i) for i in line.split()])
45+
46+
triangles = np.reshape(np.asarray(polygons), (-1, 3))
47+
48+
return vertices, triangles
49+
50+
51+
def civet_to_freesurfer(brainmap, surface='mid', version='v1',
52+
freesurfer='fsaverage6', method='nearest',
53+
data_dir=None):
54+
"""
55+
Projects `brainmap` in CIVET space to `freesurfer` fsaverage space
56+
57+
Uses a nearest-neighbor projection based on the geometry of the vertices
58+
59+
Parameters
60+
----------
61+
brainmap : array_like
62+
CIVET brainmap to be converted to freesurfer space
63+
surface : {'white', 'mid'}, optional
64+
Which CIVET surface to use for geometry of `brainmap`. Default: 'mid'
65+
version : {'v1', 'v2'}, optional
66+
Which CIVET version to use for geometry of `brainmap`. Default: 'v1'
67+
freesurfer : str, optional
68+
Which version of FreeSurfer space to project data to. Must be one of
69+
{'fsaverage', 'fsaverage3', 'fsaverage4', 'fsaverage5', 'fsaverage6'}.
70+
Default: 'fsaverage6'
71+
method : {'nearest', 'linear'}, optional
72+
What method of interpolation to use when projecting the data between
73+
surfaces. Default: 'nearest'
74+
data_dir : str, optional
75+
Path to use as data directory. If not specified, will check for
76+
environmental variable 'NNT_DATA'; if that is not set, will use
77+
`~/nnt-data` instead. Default: None
78+
79+
Returns
80+
-------
81+
data : np.ndarray
82+
Provided `brainmap` mapped to FreeSurfer
83+
"""
84+
85+
brainmap = np.asarray(brainmap)
86+
densities = (81924, 327684)
87+
n_vert = brainmap.shape[0]
88+
if n_vert not in densities:
89+
raise ValueError('Unable to interpret `brainmap` space; provided '
90+
'array must have length in {}. Received: {}'
91+
.format(densities, n_vert))
92+
93+
n_vert = n_vert // 2
94+
icbm = fetch_civet(density='41k' if n_vert == 40962 else '164k',
95+
version=version, data_dir=data_dir, verbose=0)[surface]
96+
fsavg = fetch_fsaverage(version=freesurfer, data_dir=data_dir, verbose=0)
97+
fsavg = fsavg['pial' if surface == 'mid' else surface]
98+
99+
data = []
100+
for n, hemi in enumerate(('lh', 'rh')):
101+
sl = slice(n_vert * n, n_vert * (n + 1))
102+
vert_cv, _ = read_civet(getattr(icbm, hemi))
103+
vert_fs = nib.affines.apply_affine(
104+
_MNI305to152, nib.freesurfer.read_geometry(getattr(fsavg, hemi))[0]
105+
)
106+
data.append(griddata(vert_cv, brainmap[sl], vert_fs, method=method))
107+
108+
return np.hstack(data)

netneurotools/data/osf.json

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,33 @@
113113
"md5": "8f75b95c0e47ae935d10745baefa2c49"
114114
}
115115
},
116+
"tpl-civet": {
117+
"v1": {
118+
"civet41k": {
119+
"url": [
120+
"mb37e",
121+
"601daffd84ecf800fe031868"
122+
],
123+
"md5": "b27219c876464992e1b61da1c60d8d6e"
124+
}
125+
},
126+
"v2": {
127+
"civet41k": {
128+
"url": [
129+
"mb37e",
130+
"601dafe77ad0a80119d9483c"
131+
],
132+
"md5": "a47b015e471c6a800d236f107fda5b4a"
133+
},
134+
"civet164k": {
135+
"url": [
136+
"mb37e",
137+
"601dafe87ad0a8011ad94938"
138+
],
139+
"md5": "02537ea65d5366acd8de729022a34bab"
140+
}
141+
}
142+
},
116143
"ds-connectomes": {
117144
"celegans": {
118145
"url": [

netneurotools/datasets/__init__.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,13 @@
66
'fetch_cammoun2012', 'fetch_pauli2018', 'fetch_fsaverage', 'fetch_conte69',
77
'fetch_connectome', 'available_connectomes', 'fetch_vazquez_rodriguez2019',
88
'fetch_mirchi2018', 'make_correlated_xy', 'fetch_schaefer2018',
9-
'fetch_hcp_standards', 'fetch_voneconomo', 'fetch_mmpall'
9+
'fetch_hcp_standards', 'fetch_voneconomo', 'fetch_mmpall', 'fetch_civet'
1010
]
1111

1212
from .fetchers import (fetch_cammoun2012, fetch_pauli2018, fetch_fsaverage,
1313
fetch_conte69, fetch_connectome, available_connectomes,
1414
fetch_vazquez_rodriguez2019, fetch_schaefer2018,
15-
fetch_hcp_standards, fetch_voneconomo, fetch_mmpall)
15+
fetch_hcp_standards, fetch_voneconomo, fetch_mmpall,
16+
fetch_civet)
1617
from .generators import (make_correlated_xy)
1718
from .mirchi import (fetch_mirchi2018)

netneurotools/datasets/fetchers.py

Lines changed: 93 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from .utils import _get_data_dir, _get_dataset_info
1717
from ..utils import check_fs_subjid
1818

19-
ANNOT = namedtuple('Surface', ('lh', 'rh'))
19+
SURFACE = namedtuple('Surface', ('lh', 'rh'))
2020

2121

2222
def fetch_cammoun2012(version='MNI152NLin2009aSym', data_dir=None, url=None,
@@ -137,7 +137,7 @@ def fetch_cammoun2012(version='MNI152NLin2009aSym', data_dir=None, url=None,
137137
if version == 'MNI152NLin2009aSym':
138138
keys += ['info']
139139
elif version in ('fslr32k', 'fsaverage', 'fsaverage5', 'fsaverage6'):
140-
data = [ANNOT(*data[i:i + 2]) for i in range(0, len(data), 2)]
140+
data = [SURFACE(*data[i:i + 2]) for i in range(0, len(data), 2)]
141141
else:
142142
data = [data[::2][i:i + 2] for i in range(0, len(data) // 2, 2)]
143143
# deal with the fact that last scale is split into three files :sigh:
@@ -212,7 +212,7 @@ def fetch_conte69(data_dir=None, url=None, resume=True, verbose=1):
212212
data[-1] = json.load(src)
213213

214214
# bundle hemispheres together
215-
data = [ANNOT(*data[:-1][i:i + 2]) for i in range(0, 6, 2)] + [data[-1]]
215+
data = [SURFACE(*data[:-1][i:i + 2]) for i in range(0, 6, 2)] + [data[-1]]
216216

217217
return Bunch(**dict(zip(keys + ['info'], data)))
218218

@@ -339,7 +339,7 @@ def fetch_fsaverage(version='fsaverage', data_dir=None, url=None, resume=True,
339339
files=[(op.join(dataset_name, f), url, opts)
340340
for f in filenames])
341341

342-
data = [ANNOT(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)]
342+
data = [SURFACE(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)]
343343

344344
return Bunch(**dict(zip(keys, data)))
345345

@@ -564,7 +564,7 @@ def fetch_schaefer2018(version='fsaverage', data_dir=None, url=None,
564564
data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose)
565565

566566
if suffix == 'annot':
567-
data = [ANNOT(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)]
567+
data = [SURFACE(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)]
568568

569569
return Bunch(**dict(zip(keys, data)))
570570

@@ -678,7 +678,7 @@ def fetch_mmpall(version='fslr32k', data_dir=None, url=None, resume=True,
678678
files = [(op.join(dataset_name, version, f), url, opts) for f in filenames]
679679
data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose)
680680

681-
return ANNOT(*data)
681+
return SURFACE(*data)
682682

683683

684684
def fetch_voneconomo(data_dir=None, url=None, resume=True, verbose=1):
@@ -734,6 +734,92 @@ def fetch_voneconomo(data_dir=None, url=None, resume=True, verbose=1):
734734
] + ['atl-vonEconomoKoskinas_info.csv']
735735
files = [(op.join(dataset_name, f), url, opts) for f in filenames]
736736
data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose)
737-
data = [ANNOT(*data[:-1:2])] + [ANNOT(*data[1:-1:2])] + [data[-1]]
737+
data = [SURFACE(*data[:-1:2])] + [SURFACE(*data[1:-1:2])] + [data[-1]]
738+
739+
return Bunch(**dict(zip(keys, data)))
740+
741+
742+
def fetch_civet(density='41k', version='v1', data_dir=None, url=None,
743+
resume=True, verbose=1):
744+
"""
745+
Fetches CIVET surface files
746+
747+
Parameters
748+
----------
749+
density : {'41k', '164k'}, optional
750+
Which density of the CIVET-space geometry files to fetch. The
751+
high-resolution '164k' surface only exists for version 'v2'
752+
version : {'v1, 'v2'}, optional
753+
Which version of the CIVET surfaces to use. Default: 'v2'
754+
data_dir : str, optional
755+
Path to use as data directory. If not specified, will check for
756+
environmental variable 'NNT_DATA'; if that is not set, will use
757+
`~/nnt-data` instead. Default: None
758+
url : str, optional
759+
URL from which to download data. Default: None
760+
resume : bool, optional
761+
Whether to attempt to resume partial download, if possible. Default:
762+
True
763+
verbose : int, optional
764+
Modifies verbosity of download, where higher numbers mean more updates.
765+
Default: 1
766+
767+
Returns
768+
-------
769+
filenames : :class:`sklearn.utils.Bunch`
770+
Dictionary-like object with keys ['mid', 'white'] containing geometry
771+
files for CIVET surface. Note for version 'v1' the 'mid' and 'white'
772+
files are identical.
773+
774+
References
775+
----------
776+
Y. Ad-Dab’bagh, O. Lyttelton, J.-S. Muehlboeck, C. Lepage, D. Einarson, K.
777+
Mok, O. Ivanov, R. Vincent, J. Lerch, E. Fombonne, A. C. Evans, The CIVET
778+
image-processing environment: A fully automated comprehensive pipeline for
779+
anatomical neuroimaging research. Proceedings of the 12th Annual Meeting of
780+
the Organization for Human Brain Mapping (2006).
781+
782+
Notes
783+
-----
784+
License: https://github.com/aces/CIVET_Full_Project/blob/master/LICENSE
785+
"""
786+
787+
densities = ['41k', '164k']
788+
if density not in densities:
789+
raise ValueError('The density of CIVET requested "{}" does not exist. '
790+
'Must be one of {}'.format(density, densities))
791+
versions = ['v1', 'v2']
792+
if version not in versions:
793+
raise ValueError('The version of CIVET requested "{}" does not exist. '
794+
'Must be one of {}'.format(version, versions))
795+
796+
if version == 'v1' and density == '164k':
797+
raise ValueError('The "164k" density CIVET surface only exists for '
798+
'version "v2"')
799+
800+
dataset_name = 'tpl-civet'
801+
keys = ['mid', 'white']
802+
803+
data_dir = _get_data_dir(data_dir=data_dir)
804+
info = _get_dataset_info(dataset_name)[version]['civet{}'.format(density)]
805+
if url is None:
806+
url = info['url']
807+
808+
opts = {
809+
'uncompress': True,
810+
'md5sum': info['md5'],
811+
'move': '{}.tar.gz'.format(dataset_name)
812+
}
813+
filenames = [
814+
op.join(dataset_name, version, 'civet{}'.format(density),
815+
'tpl-civet_space-ICBM152_hemi-{}_den-{}_{}.obj'
816+
.format(hemi, density, surf))
817+
for surf in keys for hemi in ['L', 'R']
818+
]
819+
820+
data = _fetch_files(data_dir, resume=resume, verbose=verbose,
821+
files=[(f, url, opts) for f in filenames])
822+
823+
data = [SURFACE(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)]
738824

739825
return Bunch(**dict(zip(keys, data)))

netneurotools/tests/test_civet.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
For testing netneurotools.civet functionality
4+
"""
5+
6+
import numpy as np
7+
import pytest
8+
9+
from netneurotools import civet, datasets
10+
11+
12+
@pytest.fixture(scope='module')
13+
def civet_surf(tmp_path_factory):
14+
tmpdir = str(tmp_path_factory.getbasetemp())
15+
return datasets.fetch_civet(data_dir=tmpdir, verbose=0)['mid']
16+
17+
18+
def test_read_civet(civet_surf):
19+
vertices, triangles = civet.read_civet(civet_surf.lh)
20+
assert len(vertices) == 40962
21+
assert len(triangles) == 81920
22+
assert np.all(triangles.max(axis=0) < vertices.shape[0])
23+
24+
25+
def test_civet_to_freesurfer():
26+
brainmap = np.random.rand(81924)
27+
out = civet.civet_to_freesurfer(brainmap)
28+
out2 = civet.civet_to_freesurfer(brainmap, method='linear')
29+
assert out.shape[0] == out2.shape[0] == 81924
30+
31+
with pytest.raises(ValueError):
32+
civet.civet_to_freesurfer(np.random.rand(10))

netneurotools/tests/test_datasets.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,18 @@ def test_get_dataset_info(dset, expected):
183183
utils._get_dataset_info('notvalid')
184184

185185

186+
@pytest.mark.parametrize('version', [
187+
'v1', 'v2'
188+
])
189+
def test_fetch_civet(tmpdir, version):
190+
civet = datasets.fetch_civet(version=version, data_dir=tmpdir, verbose=0)
191+
for key in ('mid', 'white'):
192+
assert key in civet
193+
for hemi in ('lh', 'rh'):
194+
assert hasattr(civet[key], hemi)
195+
assert os.path.isfile(getattr(civet[key], hemi))
196+
197+
186198
def test_get_data_dir(tmpdir):
187199
data_dir = utils._get_data_dir(tmpdir)
188200
assert os.path.isdir(data_dir)

0 commit comments

Comments
 (0)