@@ -269,6 +269,63 @@ def rename(in_files, suffix=None):
269
269
out_files .append (name + suffix + ext )
270
270
return list_to_filename (out_files )
271
271
272
+
273
+ def get_aparc_aseg (files ):
274
+ """Return the aparc+aseg.mgz file"""
275
+ for name in files :
276
+ if 'aparc+aseg.mgz' in name :
277
+ return name
278
+ raise ValueError ('aparc+aseg.mgz not found' )
279
+
280
+
281
+ def extract_subrois (timeseries_file , label_file , indices ):
282
+ """Extract voxel time courses for each subcortical roi index
283
+
284
+ Parameters
285
+ ----------
286
+
287
+ timeseries_file: a 4D Nifti file
288
+ label_file: a 3D file containing rois in the same space/size of the 4D file
289
+ indices: a list of indices for ROIs to extract.
290
+
291
+ Returns
292
+ -------
293
+ out_file: a text file containing time courses for each voxel of each roi
294
+ The first four columns are: freesurfer index, i, j, k positions in the
295
+ label file
296
+ """
297
+ img = nb .load (timeseries_file )
298
+ data = img .get_data ()
299
+ roiimg = nb .load (label_file )
300
+ rois = roiimg .get_data ()
301
+ out_ts_file = os .path .join (os .getcwd (), 'subcortical_timeseries.txt' )
302
+ with open (out_ts_file , 'wt' ) as fp :
303
+ for fsindex in indices :
304
+ ijk = np .nonzero (rois == fsindex )
305
+ ts = data [ijk ]
306
+ for i0 , row in enumerate (ts ):
307
+ fp .write ('%d,%d,%d,%d,' % (fsindex , ijk [0 ][i0 ],
308
+ ijk [1 ][i0 ], ijk [2 ][i0 ]) +
309
+ ',' .join (['%.10f' % val for val in row ]) + '\n ' )
310
+ return out_ts_file
311
+
312
+
313
+ def combine_hemi (left , right ):
314
+ """Combine left and right hemisphere time series into a single text file
315
+ """
316
+ lh_data = nb .load (left ).get_data ()
317
+ rh_data = nb .load (right ).get_data ()
318
+
319
+ indices = np .vstack ((1000000 + np .arange (0 , lh_data .shape [0 ])[:, None ],
320
+ 2000000 + np .arange (0 , rh_data .shape [0 ])[:, None ]))
321
+ all_data = np .hstack ((indices , np .vstack ((lh_data .squeeze (),
322
+ rh_data .squeeze ()))))
323
+ filename = 'combined_surf.txt'
324
+ np .savetxt (filename , all_data ,
325
+ fmt = ',' .join (['%d' ] + ['%.10f' ] * (all_data .shape [1 ] - 1 )))
326
+ return os .path .abspath (filename )
327
+
328
+
272
329
def create_reg_workflow (name = 'registration' ):
273
330
"""Create a FEAT preprocessing workflow together with freesurfer
274
331
@@ -308,11 +365,13 @@ def create_reg_workflow(name='registration'):
308
365
name = 'inputspec' )
309
366
310
367
outputnode = Node (interface = IdentityInterface (fields = ['func2anat_transform' ,
368
+ 'out_reg_file' ,
311
369
'anat2target_transform' ,
312
370
'transforms' ,
313
371
'transformed_mean' ,
314
372
'segmentation_files' ,
315
- 'anat2target'
373
+ 'anat2target' ,
374
+ 'aparc'
316
375
]),
317
376
name = 'outputspec' )
318
377
@@ -369,6 +428,18 @@ def create_reg_workflow(name='registration'):
369
428
register .connect (binarize , 'out_file' , applyxfm , 'target_file' )
370
429
register .connect (inputnode , 'mean_image' , applyxfm , 'source_file' )
371
430
431
+ """
432
+ Apply inverse transform to aparc file
433
+ """
434
+ aparcxfm = Node (freesurfer .ApplyVolTransform (inverse = True ,
435
+ interp = 'nearest' ),
436
+ name = 'aparc_inverse_transform' )
437
+ register .connect (inputnode , 'subjects_dir' , aparcxfm , 'subjects_dir' )
438
+ register .connect (bbregister , 'out_reg_file' , aparcxfm , 'reg_file' )
439
+ register .connect (fssource , ('aparc_aseg' , get_aparc_aseg ),
440
+ aparcxfm , 'target_file' )
441
+ register .connect (inputnode , 'mean_image' , aparcxfm , 'source_file' )
442
+
372
443
"""
373
444
Convert the BBRegister transformation to ANTS ITK format
374
445
"""
@@ -453,9 +524,14 @@ def create_reg_workflow(name='registration'):
453
524
454
525
register .connect (reg , 'warped_image' , outputnode , 'anat2target' )
455
526
register .connect (warpmean , 'output_image' , outputnode , 'transformed_mean' )
456
- register .connect (applyxfm , 'transformed_file' , outputnode , 'segmentation_files' )
527
+ register .connect (applyxfm , 'transformed_file' ,
528
+ outputnode , 'segmentation_files' )
529
+ register .connect (aparcxfm , 'transformed_file' ,
530
+ outputnode , 'aparc' )
457
531
register .connect (bbregister , 'out_fsl_file' ,
458
532
outputnode , 'func2anat_transform' )
533
+ register .connect (bbregister , 'out_reg_file' ,
534
+ outputnode , 'out_reg_file' )
459
535
register .connect (reg , 'composite_transform' ,
460
536
outputnode , 'anat2target_transform' )
461
537
register .connect (merge , 'out' , outputnode , 'transforms' )
@@ -477,10 +553,12 @@ def create_workflow(files,
477
553
norm_threshold = 1 ,
478
554
num_components = 5 ,
479
555
vol_fwhm = None ,
556
+ surf_fwhm = None ,
480
557
lowpass_freq = - 1 ,
481
558
highpass_freq = - 1 ,
482
559
subjects_dir = None ,
483
560
sink_directory = os .getcwd (),
561
+ target_subject = ['fsaverage3' , 'fsaverage4' ],
484
562
name = 'resting' ):
485
563
486
564
wf = Workflow (name = name )
@@ -647,7 +725,6 @@ def merge_files(in1, in2):
647
725
648
726
wf .connect (bandpass , 'out_files' , smooth , 'in_files' )
649
727
650
-
651
728
collector = Node (Merge (2 ), name = 'collect_streams' )
652
729
wf .connect (smooth , 'smoothed_files' , collector , 'in1' )
653
730
wf .connect (bandpass , 'out_files' , collector , 'in2' )
@@ -684,6 +761,93 @@ def merge_files(in1, in2):
684
761
# extract target space ROIs
685
762
# combine subcortical and cortical rois into a single cifti file
686
763
764
+ #######
765
+ # Convert aparc to subject functional space
766
+
767
+ # Sample the average time series in aparc ROIs
768
+ sampleaparc = MapNode (freesurfer .SegStats (default_color_table = True ),
769
+ iterfield = ['in_file' , 'summary_file' ,
770
+ 'avgwf_txt_file' ],
771
+ name = 'aparc_ts' )
772
+ sampleaparc .inputs .segment_id = ([8 ] + range (10 , 14 ) + [17 , 18 , 26 , 47 ] +
773
+ range (49 , 55 ) + [58 ] + range (1001 , 1036 ) +
774
+ range (2001 , 2036 ))
775
+
776
+ wf .connect (registration , 'outputspec.aparc' ,
777
+ sampleaparc , 'segmentation_file' )
778
+ wf .connect (collector , 'out' , sampleaparc , 'in_file' )
779
+
780
+ def get_names (files , suffix ):
781
+ """Generate appropriate names for output files
782
+ """
783
+ from nipype .utils .filemanip import (split_filename , filename_to_list ,
784
+ list_to_filename )
785
+ out_names = []
786
+ for filename in files :
787
+ _ , name , _ = split_filename (filename )
788
+ out_names .append (name + suffix )
789
+ return list_to_filename (out_names )
790
+
791
+ wf .connect (collector , ('out' , get_names , '_avgwf.txt' ),
792
+ sampleaparc , 'avgwf_txt_file' )
793
+ wf .connect (collector , ('out' , get_names , '_summary.stats' ),
794
+ sampleaparc , 'summary_file' )
795
+
796
+ # Sample the time series onto the surface of the target surface. Performs
797
+ # sampling into left and right hemisphere
798
+ target = Node (IdentityInterface (fields = ['target_subject' ]), name = 'target' )
799
+ target .iterables = ('target_subject' , filename_to_list (target_subject ))
800
+
801
+ samplerlh = MapNode (freesurfer .SampleToSurface (),
802
+ iterfield = ['source_file' ],
803
+ name = 'sampler_lh' )
804
+ samplerlh .inputs .sampling_method = "average"
805
+ samplerlh .inputs .sampling_range = (0.1 , 0.9 , 0.1 )
806
+ samplerlh .inputs .sampling_units = "frac"
807
+ samplerlh .inputs .interp_method = "trilinear"
808
+ samplerlh .inputs .smooth_surf = surf_fwhm
809
+ #samplerlh.inputs.cortex_mask = True
810
+ samplerlh .inputs .out_type = 'niigz'
811
+ samplerlh .inputs .subjects_dir = subjects_dir
812
+
813
+ samplerrh = samplerlh .clone ('sampler_rh' )
814
+
815
+ samplerlh .inputs .hemi = 'lh'
816
+ wf .connect (collector , 'out' , samplerlh , 'source_file' )
817
+ wf .connect (registration , 'outputspec.out_reg_file' , samplerlh , 'reg_file' )
818
+ wf .connect (target , 'target_subject' , samplerlh , 'target_subject' )
819
+
820
+ samplerrh .set_input ('hemi' , 'rh' )
821
+ wf .connect (collector , 'out' , samplerrh , 'source_file' )
822
+ wf .connect (registration , 'outputspec.out_reg_file' , samplerrh , 'reg_file' )
823
+ wf .connect (target , 'target_subject' , samplerrh , 'target_subject' )
824
+
825
+ # Combine left and right hemisphere to text file
826
+ combiner = MapNode (Function (input_names = ['left' , 'right' ],
827
+ output_names = ['out_file' ],
828
+ function = combine_hemi ,
829
+ imports = imports ),
830
+ iterfield = ['left' , 'right' ],
831
+ name = "combiner" )
832
+ wf .connect (samplerlh , 'out_file' , combiner , 'left' )
833
+ wf .connect (samplerrh , 'out_file' , combiner , 'right' )
834
+
835
+ # Sample the time series file for each subcortical roi
836
+ ts2txt = MapNode (Function (input_names = ['timeseries_file' , 'label_file' ,
837
+ 'indices' ],
838
+ output_names = ['out_file' ],
839
+ function = extract_subrois ,
840
+ imports = imports ),
841
+ iterfield = ['timeseries_file' ],
842
+ name = 'getsubcortts' )
843
+ ts2txt .inputs .indices = [8 ] + range (10 , 14 ) + [17 , 18 , 26 , 47 ] + \
844
+ range (49 , 55 ) + [58 ]
845
+ ts2txt .inputs .label_file = \
846
+ os .path .abspath (('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
847
+ '2mm_v2.nii.gz' ))
848
+ wf .connect (maskts , 'out_file' , ts2txt , 'timeseries_file' )
849
+
850
+ ######
687
851
688
852
# Save the relevant data into an output directory
689
853
datasink = Node (interface = DataSink (), name = "datasink" )
@@ -710,6 +874,20 @@ def merge_files(in1, in2):
710
874
wf .connect (createfilter2 , 'out_files' ,
711
875
datasink , 'resting.regress.@compcorr' )
712
876
wf .connect (maskts , 'out_file' , datasink , 'resting.timeseries.target' )
877
+ wf .connect (sampleaparc , 'summary_file' ,
878
+ datasink , 'resting.parcellations.aparc' )
879
+ wf .connect (sampleaparc , 'avgwf_txt_file' ,
880
+ datasink , 'resting.parcellations.aparc.@avgwf' )
881
+ wf .connect (ts2txt , 'out_file' ,
882
+ datasink , 'resting.parcellations.grayo.@subcortical' )
883
+
884
+ datasink2 = Node (interface = DataSink (), name = "datasink2" )
885
+ datasink2 .inputs .base_directory = sink_directory
886
+ datasink2 .inputs .container = subject_id
887
+ #datasink2.inputs.substitutions = [('_target_subject_', '')]
888
+ #datasink2.inputs.regexp_substitutions = (r'(/_.*(\d+/))', r'/run\2')
889
+ wf .connect (combiner , 'out_file' ,
890
+ datasink2 , 'resting.parcellations.grayo.@surface' )
713
891
return wf
714
892
715
893
@@ -722,13 +900,14 @@ def merge_files(in1, in2):
722
900
anat_file = glob (os .path .abspath ('%s/EO/anat/anat.nii' % subj_id ))[0 ]
723
901
target_file = fsl .Info .standard_image ('MNI152_T1_2mm_brain.nii.gz' )
724
902
wf = create_workflow (files , anat_file , target_file , subj_id ,
725
- 2.0 , 33 , vol_fwhm = 6.0 ,
903
+ 2.0 , 33 , vol_fwhm = 6.0 , surf_fwhm = 15.0 ,
726
904
lowpass_freq = 0.1 , highpass_freq = 0.01 ,
727
905
subjects_dir = fsdir ,
906
+ target_subject = ['fsaverage4' ],
728
907
sink_directory = os .getcwd (),
729
908
name = 'resting_' + subj_id )
730
909
wf .base_dir = os .getcwd ()
731
- # wf.run(plugin='MultiProc', plugin_args={'nprocs': 4})
732
- wf .write_graph (graph2use = 'colored' )
733
- wf .run ()
910
+ wf .run (plugin = 'MultiProc' , plugin_args = {'nprocs' : 4 })
911
+ # wf.write_graph(graph2use='colored')
912
+ # wf.run()
734
913
0 commit comments