|
16 | 16 | from .utils import _get_data_dir, _get_dataset_info |
17 | 17 | from ..utils import check_fs_subjid |
18 | 18 |
|
19 | | -ANNOT = namedtuple('Surface', ('lh', 'rh')) |
| 19 | +SURFACE = namedtuple('Surface', ('lh', 'rh')) |
20 | 20 |
|
21 | 21 |
|
22 | 22 | def fetch_cammoun2012(version='MNI152NLin2009aSym', data_dir=None, url=None, |
@@ -137,7 +137,7 @@ def fetch_cammoun2012(version='MNI152NLin2009aSym', data_dir=None, url=None, |
137 | 137 | if version == 'MNI152NLin2009aSym': |
138 | 138 | keys += ['info'] |
139 | 139 | elif version in ('fslr32k', 'fsaverage', 'fsaverage5', 'fsaverage6'): |
140 | | - data = [ANNOT(*data[i:i + 2]) for i in range(0, len(data), 2)] |
| 140 | + data = [SURFACE(*data[i:i + 2]) for i in range(0, len(data), 2)] |
141 | 141 | else: |
142 | 142 | data = [data[::2][i:i + 2] for i in range(0, len(data) // 2, 2)] |
143 | 143 | # deal with the fact that last scale is split into three files :sigh: |
@@ -212,7 +212,7 @@ def fetch_conte69(data_dir=None, url=None, resume=True, verbose=1): |
212 | 212 | data[-1] = json.load(src) |
213 | 213 |
|
214 | 214 | # bundle hemispheres together |
215 | | - data = [ANNOT(*data[:-1][i:i + 2]) for i in range(0, 6, 2)] + [data[-1]] |
| 215 | + data = [SURFACE(*data[:-1][i:i + 2]) for i in range(0, 6, 2)] + [data[-1]] |
216 | 216 |
|
217 | 217 | return Bunch(**dict(zip(keys + ['info'], data))) |
218 | 218 |
|
@@ -339,7 +339,7 @@ def fetch_fsaverage(version='fsaverage', data_dir=None, url=None, resume=True, |
339 | 339 | files=[(op.join(dataset_name, f), url, opts) |
340 | 340 | for f in filenames]) |
341 | 341 |
|
342 | | - data = [ANNOT(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)] |
| 342 | + data = [SURFACE(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)] |
343 | 343 |
|
344 | 344 | return Bunch(**dict(zip(keys, data))) |
345 | 345 |
|
@@ -564,7 +564,7 @@ def fetch_schaefer2018(version='fsaverage', data_dir=None, url=None, |
564 | 564 | data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose) |
565 | 565 |
|
566 | 566 | if suffix == 'annot': |
567 | | - data = [ANNOT(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)] |
| 567 | + data = [SURFACE(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)] |
568 | 568 |
|
569 | 569 | return Bunch(**dict(zip(keys, data))) |
570 | 570 |
|
@@ -678,7 +678,7 @@ def fetch_mmpall(version='fslr32k', data_dir=None, url=None, resume=True, |
678 | 678 | files = [(op.join(dataset_name, version, f), url, opts) for f in filenames] |
679 | 679 | data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose) |
680 | 680 |
|
681 | | - return ANNOT(*data) |
| 681 | + return SURFACE(*data) |
682 | 682 |
|
683 | 683 |
|
684 | 684 | def fetch_voneconomo(data_dir=None, url=None, resume=True, verbose=1): |
@@ -734,6 +734,92 @@ def fetch_voneconomo(data_dir=None, url=None, resume=True, verbose=1): |
734 | 734 | ] + ['atl-vonEconomoKoskinas_info.csv'] |
735 | 735 | files = [(op.join(dataset_name, f), url, opts) for f in filenames] |
736 | 736 | data = _fetch_files(data_dir, files=files, resume=resume, verbose=verbose) |
737 | | - data = [ANNOT(*data[:-1:2])] + [ANNOT(*data[1:-1:2])] + [data[-1]] |
| 737 | + data = [SURFACE(*data[:-1:2])] + [SURFACE(*data[1:-1:2])] + [data[-1]] |
| 738 | + |
| 739 | + return Bunch(**dict(zip(keys, data))) |
| 740 | + |
| 741 | + |
| 742 | +def fetch_civet(density='41k', version='v1', data_dir=None, url=None, |
| 743 | + resume=True, verbose=1): |
| 744 | + """ |
| 745 | + Fetches CIVET surface files |
| 746 | +
|
| 747 | + Parameters |
| 748 | + ---------- |
| 749 | + density : {'41k', '164k'}, optional |
| 750 | + Which density of the CIVET-space geometry files to fetch. The |
| 751 | + high-resolution '164k' surface only exists for version 'v2' |
| 752 | + version : {'v1, 'v2'}, optional |
| 753 | + Which version of the CIVET surfaces to use. Default: 'v2' |
| 754 | + data_dir : str, optional |
| 755 | + Path to use as data directory. If not specified, will check for |
| 756 | + environmental variable 'NNT_DATA'; if that is not set, will use |
| 757 | + `~/nnt-data` instead. Default: None |
| 758 | + url : str, optional |
| 759 | + URL from which to download data. Default: None |
| 760 | + resume : bool, optional |
| 761 | + Whether to attempt to resume partial download, if possible. Default: |
| 762 | + True |
| 763 | + verbose : int, optional |
| 764 | + Modifies verbosity of download, where higher numbers mean more updates. |
| 765 | + Default: 1 |
| 766 | +
|
| 767 | + Returns |
| 768 | + ------- |
| 769 | + filenames : :class:`sklearn.utils.Bunch` |
| 770 | + Dictionary-like object with keys ['mid', 'white'] containing geometry |
| 771 | + files for CIVET surface. Note for version 'v1' the 'mid' and 'white' |
| 772 | + files are identical. |
| 773 | +
|
| 774 | + References |
| 775 | + ---------- |
| 776 | + Y. Ad-Dab’bagh, O. Lyttelton, J.-S. Muehlboeck, C. Lepage, D. Einarson, K. |
| 777 | + Mok, O. Ivanov, R. Vincent, J. Lerch, E. Fombonne, A. C. Evans, The CIVET |
| 778 | + image-processing environment: A fully automated comprehensive pipeline for |
| 779 | + anatomical neuroimaging research. Proceedings of the 12th Annual Meeting of |
| 780 | + the Organization for Human Brain Mapping (2006). |
| 781 | +
|
| 782 | + Notes |
| 783 | + ----- |
| 784 | + License: https://github.com/aces/CIVET_Full_Project/blob/master/LICENSE |
| 785 | + """ |
| 786 | + |
| 787 | + densities = ['41k', '164k'] |
| 788 | + if density not in densities: |
| 789 | + raise ValueError('The density of CIVET requested "{}" does not exist. ' |
| 790 | + 'Must be one of {}'.format(density, densities)) |
| 791 | + versions = ['v1', 'v2'] |
| 792 | + if version not in versions: |
| 793 | + raise ValueError('The version of CIVET requested "{}" does not exist. ' |
| 794 | + 'Must be one of {}'.format(version, versions)) |
| 795 | + |
| 796 | + if version == 'v1' and density == '164k': |
| 797 | + raise ValueError('The "164k" density CIVET surface only exists for ' |
| 798 | + 'version "v2"') |
| 799 | + |
| 800 | + dataset_name = 'tpl-civet' |
| 801 | + keys = ['mid', 'white'] |
| 802 | + |
| 803 | + data_dir = _get_data_dir(data_dir=data_dir) |
| 804 | + info = _get_dataset_info(dataset_name)[version]['civet{}'.format(density)] |
| 805 | + if url is None: |
| 806 | + url = info['url'] |
| 807 | + |
| 808 | + opts = { |
| 809 | + 'uncompress': True, |
| 810 | + 'md5sum': info['md5'], |
| 811 | + 'move': '{}.tar.gz'.format(dataset_name) |
| 812 | + } |
| 813 | + filenames = [ |
| 814 | + op.join(dataset_name, version, 'civet{}'.format(density), |
| 815 | + 'tpl-civet_space-ICBM152_hemi-{}_den-{}_{}.obj' |
| 816 | + .format(hemi, density, surf)) |
| 817 | + for surf in keys for hemi in ['L', 'R'] |
| 818 | + ] |
| 819 | + |
| 820 | + data = _fetch_files(data_dir, resume=resume, verbose=verbose, |
| 821 | + files=[(f, url, opts) for f in filenames]) |
| 822 | + |
| 823 | + data = [SURFACE(*data[i:i + 2]) for i in range(0, len(keys) * 2, 2)] |
738 | 824 |
|
739 | 825 | return Bunch(**dict(zip(keys, data))) |
0 commit comments