@@ -38,7 +38,8 @@ from nipype.interfaces.utility import Merge
38
38
from nipype .interfaces .freesurfer import ReconAll
39
39
from nipype .interfaces .ants .segmentation import CorticalThickness
40
40
from nipype .interfaces .ants import (ApplyTransforms , AntsJointFusion ,
41
- LabelGeometry , Registration )
41
+ LabelGeometry , Registration ,
42
+ MultiplyImages )
42
43
from nipype .utils .misc import human_order_sorted
43
44
44
45
@@ -108,7 +109,7 @@ def SegOptions(option):
108
109
raise argparse .ArgumentError ('ants_seg value must be one of "quick" or "fusion".' )
109
110
adv_args .add_argument ("--ants_seg" , dest = "seg" ,
110
111
default = "quick" , type = SegOptions ,
111
- help = "Use ANTs 'quick' or 'long ' to label subcortical structures" )
112
+ help = "Use ANTs 'quick' or 'fusion ' to label subcortical structures" )
112
113
adv_args .add_argument ("--ants_segN" , dest = "segN" , type = int ,
113
114
help = "Number of images to use for joint fusion (2-20)" )
114
115
adv_args .add_argument ("--mb_num_threads" , dest = "mb_num_threads" ,
@@ -129,6 +130,10 @@ TDIR = args.template
129
130
if args .skip_freesurfer and args .skip_ants :
130
131
print ("Use only one of the skip arguments: --skip_freesurfer, --skip_ants." )
131
132
133
+ # Ensure provenance configuration is inherited by workflow
134
+ if args .prov :
135
+ config .enable_provenance ()
136
+
132
137
# ----------------------------------------------------------------------------
133
138
# Initialize workflow inputs and outputs
134
139
# ----------------------------------------------------------------------------
@@ -327,12 +332,13 @@ else:
327
332
# -----------------------------------------------------
328
333
merge_transforms = Node (Merge (2 ), name = "merge_transforms" )
329
334
transformer_nn = Node (ApplyTransforms (), name = "transformer_nn" )
330
- transformer_nn .inputs .reference_image = IMAGE
331
335
transformer_nn .inputs .dimension = 3
332
336
transformer_nn .inputs .invert_transform_flags = [False , False ]
333
337
transformer_nn .inputs .input_image = '/opt/data/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz'
334
338
transformer_nn .inputs .interpolation = 'NearestNeighbor'
335
339
340
+ mbFlow .connect (corticalthickness , 'BrainSegmentationN4' ,
341
+ transformer_nn , 'reference_image' )
336
342
mbFlow .connect (corticalthickness , 'TemplateToSubject1GenericAffine' ,
337
343
merge_transforms , 'in1' )
338
344
mbFlow .connect (corticalthickness , 'TemplateToSubject0Warp' ,
@@ -378,22 +384,36 @@ else:
378
384
if args .num_threads and args .num_threads > 1 :
379
385
reg .inputs .num_threads = args .num_threads
380
386
381
- transformer_nn = MapNode (ApplyTransforms (), iterfield = ['input_image' ],
387
+ transformer_nn = MapNode (ApplyTransforms (),
388
+ iterfield = ['input_image' , 'transforms' ],
382
389
name = "transformer_nn" )
383
- transformer_nn .inputs .reference_image = IMAGE
384
390
transformer_nn .inputs .dimension = 3
385
391
transformer_nn .inputs .input_image = labels [:N ]
386
392
transformer_nn .inputs .interpolation = 'NearestNeighbor'
387
393
388
394
labeler = Node (AntsJointFusion (), name = 'labeler' )
389
395
labeler .inputs .dimension = 3
390
- labeler .inputs .target_image = [IMAGE ]
391
396
labeler .inputs .out_label_fusion = 'label.nii.gz'
392
397
if args .num_threads and args .num_threads > 1 :
393
398
labeler .inputs .num_threads = args .num_threads
394
399
400
+ def tolist (x ):
401
+ return [x ]
402
+
403
+ mask_brain = Node (MultiplyImages (dimension = 3 ,
404
+ output_product_image = 'brain.nii.gz'
405
+ ),
406
+ name = 'mask_brain' )
395
407
mbFlow .connect (corticalthickness , 'BrainSegmentationN4' ,
408
+ mask_brain , 'first_input' )
409
+ mbFlow .connect (corticalthickness , 'BrainExtractionMask' , mask_brain ,
410
+ 'second_input' )
411
+ mbFlow .connect (mask_brain , 'output_product_image' ,
396
412
reg , 'fixed_image' )
413
+ mbFlow .connect (mask_brain , 'output_product_image' ,
414
+ transformer_nn , 'reference_image' )
415
+ mbFlow .connect (mask_brain , ('output_product_image' , tolist ),
416
+ labeler , 'target_image' )
397
417
mbFlow .connect (reg , 'composite_transform' , transformer_nn , 'transforms' )
398
418
mbFlow .connect (corticalthickness , 'BrainExtractionMask' , labeler ,
399
419
'mask_image' )
@@ -429,10 +449,8 @@ if __name__ == '__main__':
429
449
time0 = time ()
430
450
431
451
# --------------------------------------------------------------------
432
- # Workflow configuration: provenance tracking, content hashing, etc.:
452
+ # Workflow configuration: content hashing, crashfiles , etc.:
433
453
# --------------------------------------------------------------------
434
- if args .prov :
435
- config .enable_provenance ()
436
454
mbFlow .config ['execution' ]['hash_method' ] = 'content'
437
455
# mbFlow.config['execution']['use_relative_paths'] = True
438
456
mbFlow .config ['execution' ]['crashfile_format' ] = 'txt'
0 commit comments