@@ -240,7 +240,7 @@ def combine_hemi(left, right):
240
240
rh_data .squeeze ()))))
241
241
filename = 'combined_surf.txt'
242
242
np .savetxt (filename , all_data ,
243
- fmt = ',' .join (['%d' ] + ['%.10f' ] * (all_data .shape [1 ] - 1 )))
243
+ fmt = ',' .join (['%d' ] + ['%.10f' ] * (all_data .shape [1 ] - 1 )))
244
244
return os .path .abspath (filename )
245
245
246
246
@@ -281,8 +281,8 @@ def create_workflow(files,
281
281
# Run AFNI's despike. This is always run, however, whether this is fed to
282
282
# realign depends on the input configuration
283
283
despiker = MapNode (afni .Despike (outputtype = 'NIFTI_GZ' ),
284
- iterfield = ['in_file' ],
285
- name = 'despike' )
284
+ iterfield = ['in_file' ],
285
+ name = 'despike' )
286
286
#despiker.plugin_args = {'qsub_args': '-l nodes=1:ppn='}
287
287
288
288
wf .connect (remove_vol , 'roi_file' , despiker , 'in_file' )
@@ -496,12 +496,11 @@ def create_workflow(files,
496
496
sampleaparc , 'segmentation_file' )
497
497
wf .connect (bandpass , 'out_file' , sampleaparc , 'in_file' )
498
498
499
-
500
499
# Sample the time series onto the surface of the target surface. Performs
501
500
# sampling into left and right hemisphere
502
501
target = Node (IdentityInterface (fields = ['target_subject' ]), name = 'target' )
503
502
target .iterables = ('target_subject' , filename_to_list (target_subject ))
504
-
503
+
505
504
samplerlh = MapNode (freesurfer .SampleToSurface (),
506
505
iterfield = ['source_file' ],
507
506
name = 'sampler_lh' )
@@ -519,7 +518,7 @@ def create_workflow(files,
519
518
wf .connect (bandpass , 'out_file' , samplerlh , 'source_file' )
520
519
wf .connect (register , 'out_reg_file' , samplerlh , 'reg_file' )
521
520
wf .connect (target , 'target_subject' , samplerlh , 'target_subject' )
522
-
521
+
523
522
samplerrh .set_input ('hemi' , 'rh' )
524
523
wf .connect (bandpass , 'out_file' , samplerrh , 'source_file' )
525
524
wf .connect (register , 'out_reg_file' , samplerrh , 'reg_file' )
@@ -632,7 +631,7 @@ def create_workflow(files,
632
631
datasink .inputs .base_directory = sink_directory
633
632
datasink .inputs .container = subject_id
634
633
datasink .inputs .substitutions = [('_target_subject_' , '' )]
635
- datasink .inputs .regexp_substitutions = (r'(/_.*(\d+/))' , r'/run\2' ) #(r'(_.*)(\d+/)', r'run\2')
634
+ datasink .inputs .regexp_substitutions = (r'(/_.*(\d+/))' , r'/run\2' )
636
635
wf .connect (despiker , 'out_file' , datasink , 'resting.qa.despike' )
637
636
wf .connect (realign , 'par_file' , datasink , 'resting.qa.motion' )
638
637
wf .connect (tsnr , 'tsnr_file' , datasink , 'resting.qa.tsnr' )
@@ -670,7 +669,7 @@ def create_workflow(files,
670
669
datasink2 .inputs .base_directory = sink_directory
671
670
datasink2 .inputs .container = subject_id
672
671
datasink2 .inputs .substitutions = [('_target_subject_' , '' )]
673
- datasink2 .inputs .regexp_substitutions = (r'(/_.*(\d+/))' , r'/run\2' ) #(r'(_.*)(\d+/)', r'run\2')
672
+ datasink2 .inputs .regexp_substitutions = (r'(/_.*(\d+/))' , r'/run\2' )
674
673
wf .connect (combiner , 'out_file' ,
675
674
datasink2 , 'resting.parcellations.grayo.@surface' )
676
675
return wf
@@ -705,13 +704,13 @@ def create_workflow(files,
705
704
default = 'plugin_args=dict()' ,
706
705
help = "Plugin args" )
707
706
parser .add_argument ("--field_maps" , dest = "field_maps" , nargs = "+" ,
708
- help = "field map niftis" )
709
- parser .add_argument ("--fm_echospacing" ,dest = "echo_spacing" , type = float ,
710
- help = "field map echo spacing" )
707
+ help = "field map niftis" )
708
+ parser .add_argument ("--fm_echospacing" , dest = "echo_spacing" , type = float ,
709
+ help = "field map echo spacing" )
711
710
parser .add_argument ("--fm_TE_diff" , dest = 'TE_diff' , type = float ,
712
- help = "field map echo time difference" )
713
- parser .add_argument ("--fm_sigma" , dest = 'sigma' , type = int ,
714
- help = "field map sigma value" )
711
+ help = "field map echo time difference" )
712
+ parser .add_argument ("--fm_sigma" , dest = 'sigma' , type = float ,
713
+ help = "field map sigma value" )
715
714
args = parser .parse_args ()
716
715
717
716
TR = args .TR
@@ -724,11 +723,10 @@ def create_workflow(files,
724
723
from nibabel import load
725
724
img = load (args .files [0 ])
726
725
slice_thickness = max (img .get_header ().get_zooms ()[:3 ])
727
- print TR , slice_times , slice_thickness
728
-
729
726
730
727
if args .field_maps :
731
- wf = create_workflow ([os .path .abspath (filename ) for filename in args .files ],
728
+ wf = create_workflow ([os .path .abspath (filename ) for
729
+ filename in args .files ],
732
730
subject_id = args .subject_id ,
733
731
n_vol = args .n_vol ,
734
732
despike = args .despike ,
@@ -743,7 +741,8 @@ def create_workflow(files,
743
741
FM_echo_spacing = args .echo_spacing ,
744
742
FM_sigma = args .sigma )
745
743
else :
746
- wf = create_workflow ([os .path .abspath (filename ) for filename in args .files ],
744
+ wf = create_workflow ([os .path .abspath (filename ) for
745
+ filename in args .files ],
747
746
subject_id = args .subject_id ,
748
747
n_vol = args .n_vol ,
749
748
despike = args .despike ,
@@ -761,13 +760,5 @@ def create_workflow(files,
761
760
762
761
wf .config ['execution' ].update (** {'remove_unnecessary_outputs' : False })
763
762
wf .base_dir = work_dir
764
- #wf.write_graph(graph2use='flat')
765
763
exec args .plugin_args
766
- print plugin_args
767
764
wf .run (** plugin_args )
768
-
769
- '''
770
- #compute similarity matrix and partial correlation
771
- def compute_similarity():
772
- return matrix
773
- '''
0 commit comments