55
66import itertools
77import json
8- import os
98import os .path as op
109import warnings
1110
1716from ..utils import check_fs_subjid
1817
1918
20- def fetch_cammoun2012 (version = 'volume ' , data_dir = None , url = None , resume = True ,
21- verbose = 1 ):
19+ def fetch_cammoun2012 (version = 'MNI125NLin2009aSym ' , data_dir = None , url = None ,
20+ resume = True , verbose = 1 ):
2221 """
2322 Downloads files for Cammoun et al., 2012 multiscale parcellation
2423
2524 Parameters
2625 ----------
27- version : {'volume', 'fsaverage', 'fsaverage5', 'fsaverage6', 'gcs'}
28- Specifies which version of the dataset to download, where 'volume' will
29- return .nii.gz atlas files defined in MNI152 space, 'fsaverageX' will
30- return .annot files defined in fsaverageX space (FreeSurfer 6.0.1), and
31- 'gcs' will return FreeSurfer-style .gcs probabilistic atlas files for
32- generating new, subject-specific parcellations
26+ version : str, optional
27+ Specifies which version of the dataset to download, where
28+ 'MNI125NLin2009aSym' will return .nii.gz atlas files defined in MNI152
29+ space, 'fsaverageX' will return .annot files defined in fsaverageX
30+ space (FreeSurfer 6.0.1), 'fslr32k' will return .label.gii files in
31+ fs_LR_32k HCP space, and 'gcs' will return FreeSurfer-style .gcs
32+ probabilistic atlas files for generating new, subject-specific
33+ parcellations. Default: 'MNI125NLin2009aSym'
3334 data_dir : str, optional
3435 Path to use as data directory. If not specified, will check for
3536 environmental variable 'NNT_DATA'; if that is not set, will use
@@ -62,8 +63,23 @@ def fetch_cammoun2012(version='volume', data_dir=None, url=None, resume=True,
6263 License: https://raw.githubusercontent.com/LTS5/cmp/master/COPYRIGHT
6364 """
6465
66+ if version == 'surface' :
67+ warnings .warn ('Providing `version="surface"` is deprecated and will '
68+ 'be removed in a future release. For consistent '
69+ 'behavior please use `version="fsaverage"` instead.' ,
70+ DeprecationWarning , stacklevel = 2 )
71+ version = 'fsaverage'
72+ elif version == 'volume' :
73+ warnings .warn ('Providing `version="volume"` is deprecated and will '
74+ 'be removed in a future release. For consistent '
75+ 'behavior please use `version="MNI152NLin2009aSym"` '
76+ 'instead.' ,
77+ DeprecationWarning , stacklevel = 2 )
78+ version = 'MNI152NLin2009aSym'
79+
6580 versions = [
66- 'volume' , 'surface' , 'gcs' , 'fsaverage' , 'fsaverage5' , 'fsaverage6'
81+ 'gcs' , 'fsaverage' , 'fsaverage5' , 'fsaverage6' , 'fslr32k' ,
82+ 'MNI152NLin2009aSym'
6783 ]
6884 if version not in versions :
6985 raise ValueError ('The version of Cammoun et al., 2012 parcellation '
@@ -73,13 +89,6 @@ def fetch_cammoun2012(version='volume', data_dir=None, url=None, resume=True,
7389 dataset_name = 'atl-cammoun2012'
7490 keys = ['scale033' , 'scale060' , 'scale125' , 'scale250' , 'scale500' ]
7591
76- if version == 'surface' :
77- warnings .warn ('Providing `version="surface"` is deprecated and will '
78- 'be removed in a future release. For consistent '
79- 'behavior please use `version="fsaverage"` instead. ' ,
80- DeprecationWarning , stacklevel = 2 )
81- version = 'fsaverage'
82-
8392 data_dir = _get_data_dir (data_dir = data_dir )
8493 info = _get_dataset_info (dataset_name )[version ]
8594 if url is None :
@@ -92,11 +101,17 @@ def fetch_cammoun2012(version='volume', data_dir=None, url=None, resume=True,
92101 }
93102
94103 # filenames differ based on selected version of dataset
95- if version == 'volume ' :
104+ if version == 'MNI152NLin2009aSym ' :
96105 filenames = [
97106 'atl-Cammoun2012_space-MNI152NLin2009aSym_res-{}_deterministic{}'
98107 .format (res [- 3 :], suff ) for res in keys for suff in ['.nii.gz' ]
99108 ] + ['atl-Cammoun2012_space-MNI152NLin2009aSym_info.csv' ]
109+ elif version == 'fslr32k' :
110+ filenames = [
111+ 'atl-Cammoun2012_space-fslr32k_res-{}_hemi-{}_deterministic{}'
112+ .format (res [- 3 :], hemi , suff ) for res in keys
113+ for hemi in ['L' , 'R' ] for suff in ['.label.gii' ]
114+ ]
100115 elif version in ('fsaverage' , 'fsaverage5' , 'fsaverage6' ):
101116 filenames = [
102117 'atl-Cammoun2012_space-{}_res-{}_hemi-{}_deterministic{}'
@@ -111,12 +126,14 @@ def fetch_cammoun2012(version='volume', data_dir=None, url=None, resume=True,
111126 for hemi in ['L' , 'R' ] for suff in ['.gcs' , '.ctab' ]
112127 ]
113128
114- files = [(os .path .join (dataset_name , f ), url , opts ) for f in filenames ]
129+ files = [
130+ (op .join (dataset_name , version , f ), url , opts ) for f in filenames
131+ ]
115132 data = _fetch_files (data_dir , files = files , resume = resume , verbose = verbose )
116133
117- if version == 'volume ' :
134+ if version == 'MNI152NLin2009aSym ' :
118135 keys += ['info' ]
119- elif version in ('fsaverage' , 'fsaverage5' , 'fsaverage6' ):
136+ elif version in ('fslr32k' , ' fsaverage' , 'fsaverage5' , 'fsaverage6' ):
120137 data = [data [i :i + 2 ] for i in range (0 , len (data ), 2 )]
121138 else :
122139 data = [data [::2 ][i :i + 2 ] for i in range (0 , len (data ) // 2 , 2 )]
@@ -313,7 +330,7 @@ def fetch_fsaverage(version='fsaverage', data_dir=None, url=None, resume=True,
313330
314331 try :
315332 data_dir = check_fs_subjid (version )[1 ]
316- data = [os . path .join (data_dir , f ) for f in filenames ]
333+ data = [op .join (data_dir , f ) for f in filenames ]
317334 except FileNotFoundError :
318335 data = _fetch_files (data_dir , resume = resume , verbose = verbose ,
319336 files = [(op .join (dataset_name , f ), url , opts )
@@ -393,7 +410,7 @@ def fetch_connectome(dataset, data_dir=None, url=None, resume=True,
393410 }
394411
395412 filenames = [
396- os . path .join (dataset , '{}.csv' .format (fn )) for fn in info ['keys' ]
413+ op .join (dataset , '{}.csv' .format (fn )) for fn in info ['keys' ]
397414 ] + [op .join (dataset , 'ref.txt' )]
398415 data = _fetch_files (data_dir , files = [(f , url , opts ) for f in filenames ],
399416 resume = resume , verbose = verbose )
@@ -454,7 +471,7 @@ def fetch_vazquez_rodriguez2019(data_dir=None, url=None, resume=True,
454471 }
455472
456473 filenames = [
457- os . path .join (dataset_name , 'rsquared_gradient.csv' )
474+ op .join (dataset_name , 'rsquared_gradient.csv' )
458475 ]
459476 data = _fetch_files (data_dir , files = [(f , url , opts ) for f in filenames ],
460477 resume = resume , verbose = verbose )
@@ -534,10 +551,51 @@ def fetch_schaefer2018(version='fsaverage', data_dir=None, url=None,
534551 .format (version , hemi , desc ) for desc in keys for hemi in ['L' , 'R' ]
535552 ]
536553
537- files = [(os . path .join (dataset_name , version , f ), url , opts )
554+ files = [(op .join (dataset_name , version , f ), url , opts )
538555 for f in filenames ]
539556 data = _fetch_files (data_dir , files = files , resume = resume , verbose = verbose )
540557
541558 data = [data [i :i + 2 ] for i in range (0 , len (keys ) * 2 , 2 )]
542559
543560 return Bunch (** dict (zip (keys , data )))
561+
562+
563+ def fetch_hcp_standards (data_dir = None , url = None , resume = True , verbose = 1 ):
564+ """
565+ Fetches HCP standard mesh atlases for converting between FreeSurfer and HCP
566+
567+ Parameters
568+ ----------
569+ data_dir : str, optional
570+ Path to use as data directory. If not specified, will check for
571+ environmental variable 'NNT_DATA'; if that is not set, will use
572+ `~/nnt-data` instead. Default: None
573+ url : str, optional
574+ URL from which to download data. Default: None
575+ resume : bool, optional
576+ Whether to attempt to resume partial download, if possible. Default:
577+ True
578+ verbose : int, optional
579+ Modifies verbosity of download, where higher numbers mean more updates.
580+ Default: 1
581+
582+ Returns
583+ -------
584+ standards : str
585+ Filepath to standard_mesh_atlases directory
586+ """
587+ if url is None :
588+ url = 'http://brainvis.wustl.edu/workbench/standard_mesh_atlases.zip'
589+ dataset_name = 'standard_mesh_atlases'
590+ data_dir = _get_data_dir (data_dir = data_dir )
591+ opts = {
592+ 'uncompress' : True ,
593+ 'move' : '{}.zip' .format (dataset_name )
594+ }
595+ filenames = [
596+ 'L.sphere.32k_fs_LR.surf.gii' , 'R.sphere.32k_fs_LR.surf.gii'
597+ ]
598+ files = [(op .join (dataset_name , f ), url , opts ) for f in filenames ]
599+ _fetch_files (data_dir , files = files , resume = resume , verbose = verbose )
600+
601+ return op .join (data_dir , dataset_name )
0 commit comments