@@ -38,7 +38,7 @@ from nipype.interfaces.utility import Merge
38
38
from nipype .interfaces .freesurfer import ReconAll
39
39
from nipype .interfaces .ants .segmentation import CorticalThickness
40
40
from nipype .interfaces .ants import (ApplyTransforms , AntsJointFusion ,
41
- LabelGeometry )
41
+ LabelGeometry , RegistrationSynQuick )
42
42
from nipype .utils .misc import human_order_sorted
43
43
44
44
@@ -101,9 +101,21 @@ adv_args.add_argument("--fs_flags", dest="fs_flags",
101
101
adv_args .add_argument ("--ants_num_threads" , dest = "num_threads" ,
102
102
default = 1 , type = int ,
103
103
help = "Number of threads to use with ANTs" )
104
+ def SegOptions (option ):
105
+ if option in ['quick' , 'fusion' ]:
106
+ return option
107
+ else :
108
+ raise argparse .ArgumentError ('ants_seg value must be one of "quick" or "fusion".' )
109
+ adv_args .add_argument ("--ants_seg" , dest = "seg" ,
110
+ default = "quick" , type = SegOptions ,
111
+ help = "Use ANTs 'quick' or 'long' to label subcortical structures" )
112
+ adv_args .add_argument ("--ants_segN" , dest = "segN" , type = int ,
113
+ help = "Number of images to use for joint fusion (2-20)" )
104
114
adv_args .add_argument ("--mb_num_threads" , dest = "mb_num_threads" ,
105
115
default = 1 , type = int ,
106
116
help = "Number of threads to use with mindboggle" )
117
+ adv_args .add_argument ("--prov" , action = 'store_true' ,
118
+ help = "Capture provenance" )
107
119
args = parser .parse_args ()
108
120
109
121
# ----------------------------------------------------------------------------
@@ -204,7 +216,8 @@ if args.num_threads and args.num_threads > 1:
204
216
# ----------------------------------------------------------------------------
205
217
# Create function to call mindboggle
206
218
# ----------------------------------------------------------------------------
207
- def mindboggle (subjectid , fsdir , antsdir , antsseg , out , args , num_threads = 1 ):
219
+ def mindboggle (subjectid , fsdir , antsdir , antsseg , out , prov , args ,
220
+ num_threads = 1 ):
208
221
"""
209
222
Run the mindboggle morphology pipeline (see http://mindboggle.info).
210
223
@@ -220,6 +233,8 @@ def mindboggle(subjectid, fsdir, antsdir, antsseg, out, args, num_threads=1):
220
233
name of antsCorticalThickness.sh output segmentation file
221
234
out : string
222
235
path to mindboggle output directory
236
+ prov : boolean
237
+ capture provenance
223
238
args : string
224
239
extra arguments
225
240
num_threads
@@ -239,7 +254,9 @@ def mindboggle(subjectid, fsdir, antsdir, antsseg, out, args, num_threads=1):
239
254
ants = os .path .join (antsdir , subjectid , antsseg )
240
255
241
256
all_args = ' ' .join ([DATA , '--out' , out , '--ants' , ants ,
242
- '--working' , os .getcwd (), args ])
257
+ '--working' , os .getcwd ()] +
258
+ ['--prov' , prov ] if prov else [] +
259
+ [args ])
243
260
244
261
if num_threads > 1 :
245
262
all_args += ' --plugin MultiProc --plugin_args "dict(n_procs={0})"' .\
@@ -268,6 +285,7 @@ Mindboggle = Node(name='mindboggle',
268
285
'antsdir' ,
269
286
'antsseg' ,
270
287
'out' ,
288
+ 'prov' ,
271
289
'args' ,
272
290
'num_threads' ],
273
291
output_names = ['command' ]))
@@ -287,35 +305,7 @@ else:
287
305
['OASIS-TRT_labels_to_OASIS_Atropos_template' ])
288
306
T1s = human_order_sorted (glob (os .path .join (IDIR , '*.nii.gz' )))
289
307
labels = human_order_sorted (glob (os .path .join (LDIR , '*.nii.gz' )))
290
-
291
- # ----------------------------------------------------------------------
292
- # Create workflow to label ANTs output with JointFusion and derive stats
293
- # ----------------------------------------------------------------------
294
- merge_transforms = Node (Merge (2 ), name = "merge_transforms" )
295
-
296
- # label_wf = Workflow('labelflow')
297
- transformer = MapNode (ApplyTransforms (), iterfield = ['input_image' ],
298
- name = "transformer" )
299
- transformer .inputs .reference_image = IMAGE
300
- transformer .inputs .input_image = T1s
301
- transformer .inputs .dimension = 3
302
- transformer .inputs .invert_transform_flags = [False , False ]
303
- transformer .inputs .interpolation = 'BSpline'
304
-
305
- transformer_nn = MapNode (ApplyTransforms (), iterfield = ['input_image' ],
306
- name = "transformer_nn" )
307
- transformer_nn .inputs .reference_image = IMAGE
308
- transformer_nn .inputs .dimension = 3
309
- transformer_nn .inputs .invert_transform_flags = [False , False ]
310
- transformer_nn .inputs .input_image = labels
311
- transformer_nn .inputs .interpolation = 'NearestNeighbor'
312
-
313
- labeler = Node (AntsJointFusion (), name = 'labeler' )
314
- labeler .inputs .dimension = 3
315
- labeler .inputs .target_image = [IMAGE ]
316
- labeler .inputs .out_label_fusion = 'label.nii.gz'
317
- if args .num_threads and args .num_threads > 1 :
318
- labeler .inputs .num_threads = args .num_threads
308
+ N = args .segN or len (T1s )
319
309
320
310
def mask_labels (intensity_image , label_image , output_dir = None ):
321
311
import nibabel as nb
@@ -344,23 +334,93 @@ else:
344
334
tocsv .inputs .output_file = os .path .join (ants_output , ID ,
345
335
'antslabelstats.csv' )
346
336
347
- mbFlow .connect (corticalthickness , 'TemplateToSubject1GenericAffine' ,
348
- merge_transforms , 'in1' )
349
- mbFlow .connect (corticalthickness , 'TemplateToSubject0Warp' ,
350
- merge_transforms , 'in2' )
351
- mbFlow .connect (merge_transforms , 'out' , transformer , 'transforms' )
352
- mbFlow .connect (merge_transforms , 'out' , transformer_nn , 'transforms' )
353
- mbFlow .connect (corticalthickness , 'BrainExtractionMask' , labeler ,
354
- 'mask_image' )
337
+ if args .seg and args .seg == "quick" :
338
+ # -----------------------------------------------------
339
+ # Label ANTs output with Labels in template space
340
+ # -----------------------------------------------------
341
+ merge_transforms = Node (Merge (2 ), name = "merge_transforms" )
342
+ transformer_nn = Node (ApplyTransforms (), name = "transformer_nn" )
343
+ transformer_nn .inputs .reference_image = IMAGE
344
+ transformer_nn .inputs .dimension = 3
345
+ transformer_nn .inputs .invert_transform_flags = [False , False ]
346
+ transformer_nn .inputs .input_image = '/opt/data/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_OASIS-30_v2.nii.gz'
347
+ transformer_nn .inputs .interpolation = 'NearestNeighbor'
348
+
349
+ mbFlow .connect (corticalthickness , 'TemplateToSubject1GenericAffine' ,
350
+ merge_transforms , 'in1' )
351
+ mbFlow .connect (corticalthickness , 'TemplateToSubject0Warp' ,
352
+ merge_transforms , 'in2' )
353
+ mbFlow .connect (corticalthickness , 'TemplateToSubject1GenericAffine' ,
354
+ merge_transforms , 'in1' )
355
+ mbFlow .connect (corticalthickness , 'TemplateToSubject0Warp' ,
356
+ merge_transforms , 'in2' )
357
+ mbFlow .connect (merge_transforms , 'out' , transformer_nn , 'transforms' )
358
+ mbFlow .connect (transformer_nn , 'output_image' , masker , 'label_image' )
359
+ elif args .seg :
360
+ # -----------------------------------------------------
361
+ # Create workflow to label ANTs output with JointFusion
362
+ # -----------------------------------------------------
363
+ reg = MapNode (Registration (), iterfield = ['moving_image' ],
364
+ name = "register" )
365
+ reg .inputs .fixed_image = IMAGE
366
+ reg .inputs .moving_image = T1s [:N ]
367
+ reg .inputs .output_transform_prefix = "output_"
368
+ reg .inputs .transforms = ['Rigid' , 'Affine' , 'SyN' ]
369
+ reg .inputs .transform_parameters = [(0.1 ,), (0.1 ,), (0.2 , 3.0 , 0.0 )]
370
+ reg .inputs .number_of_iterations = [[10000 , 11110 , 11110 ]] * 2 + [[
371
+ 100 , 30 , 20
372
+ ]]
373
+ reg .inputs .dimension = 3
374
+ reg .inputs .write_composite_transform = True
375
+ reg .inputs .collapse_output_transforms = True
376
+ reg .inputs .initial_moving_transform_com = True
377
+ reg .inputs .metric = ['Mattes' ] * 2 + [['Mattes' , 'CC' ]]
378
+ reg .inputs .metric_weight = [1 ] * 2 + [[0.5 , 0.5 ]]
379
+ reg .inputs .radius_or_number_of_bins = [32 ] * 2 + [[32 , 4 ]]
380
+ reg .inputs .sampling_strategy = ['Regular' ] * 2 + [[None , None ]]
381
+ reg .inputs .sampling_percentage = [0.3 ] * 2 + [[None , None ]]
382
+ reg .inputs .convergence_threshold = [1.e-8 ] * 2 + [- 0.01 ]
383
+ reg .inputs .convergence_window_size = [20 ] * 2 + [5 ]
384
+ reg .inputs .smoothing_sigmas = [[4 , 2 , 1 ]] * 2 + [[1 , 0.5 , 0 ]]
385
+ reg .inputs .sigma_units = ['vox' ] * 3
386
+ reg .inputs .shrink_factors = [[3 , 2 , 1 ]] * 2 + [[4 , 2 , 1 ]]
387
+ reg .inputs .use_estimate_learning_rate_once = [True ] * 3
388
+ reg .inputs .use_histogram_matching = [False ] * 2 + [True ]
389
+ reg .inputs .winsorize_lower_quantile = 0.005
390
+ reg .inputs .winsorize_upper_quantile = 0.995
391
+ reg .inputs .float = True
392
+ reg .inputs .output_warped_image = 'output_warped_image.nii.gz'
393
+ if args .num_threads and args .num_threads > 1 :
394
+ reg .inputs .num_threads = args .num_threads
395
+
396
+ transformer_nn = MapNode (ApplyTransforms (), iterfield = ['input_image' ],
397
+ name = "transformer_nn" )
398
+ transformer_nn .inputs .reference_image = IMAGE
399
+ transformer_nn .inputs .dimension = 3
400
+ transformer_nn .inputs .invert_transform_flags = [False , False ]
401
+ transformer_nn .inputs .input_image = labels [:N ]
402
+ transformer_nn .inputs .interpolation = 'NearestNeighbor'
403
+
404
+ labeler = Node (AntsJointFusion (), name = 'labeler' )
405
+ labeler .inputs .dimension = 3
406
+ labeler .inputs .target_image = [IMAGE ]
407
+ labeler .inputs .out_label_fusion = 'label.nii.gz'
408
+ if args .num_threads and args .num_threads > 1 :
409
+ labeler .inputs .num_threads = args .num_threads
410
+
411
+ mbFlow .connect (reg , 'composite_transform' , transformer_nn , 'transforms' )
412
+ mbFlow .connect (corticalthickness , 'BrainExtractionMask' , labeler ,
413
+ 'mask_image' )
414
+ mbFlow .connect (reg , 'warped_image' , labeler , 'atlas_image' )
415
+ mbFlow .connect (transformer_nn , 'output_image' , labeler ,
416
+ 'atlas_segmentation_image' )
417
+ mbFlow .connect (labeler , 'out_label_fusion' , masker , 'label_image' )
418
+
419
+
355
420
mbFlow .connect (corticalthickness , 'CorticalThickness' ,
356
421
tocsv , 'intensity_image' )
357
422
mbFlow .connect (corticalthickness , 'CorticalThickness' ,
358
423
masker , 'intensity_image' )
359
-
360
- mbFlow .connect (transformer , 'output_image' , labeler , 'atlas_image' )
361
- mbFlow .connect (transformer_nn , 'output_image' , labeler ,
362
- 'atlas_segmentation_image' )
363
- mbFlow .connect (labeler , 'out_label_fusion' , masker , 'label_image' )
364
424
mbFlow .connect (masker , 'new_label_file' , tocsv , 'label_image' )
365
425
366
426
# ----------------------------------------------------------------------------
@@ -369,6 +429,7 @@ else:
369
429
mbFlow .connect (corticalthickness , 'BrainSegmentation' ,
370
430
Mindboggle , 'antsseg' )
371
431
Mindboggle .inputs .out = mindboggle_output
432
+ Mindboggle .inputs .prov = args .prov
372
433
Mindboggle .inputs .args = '--roygbiv' # ' --graph hier'
373
434
if args .mb_num_threads :
374
435
Mindboggle .inputs .num_threads = args .mb_num_threads
@@ -384,7 +445,8 @@ if __name__ == '__main__':
384
445
# --------------------------------------------------------------------
385
446
# Workflow configuration: provenance tracking, content hashing, etc.:
386
447
# --------------------------------------------------------------------
387
- # config.enable_provenance()
448
+ if args .prov :
449
+ config .enable_provenance ()
388
450
mbFlow .config ['execution' ]['hash_method' ] = 'content'
389
451
# mbFlow.config['execution']['use_relative_paths'] = True
390
452
mbFlow .config ['execution' ]['crashfile_format' ] = 'txt'
0 commit comments