diff --git a/.pylintrc b/.pylintrc
index 052ca1eecc..41277323fd 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -291,7 +291,7 @@ contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=
+generated-members=CPAC.utils.configuration.configuration.Configuration.*
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
@@ -435,6 +435,7 @@ good-names=c,
ex,
nb,
Run,
+ TR,
v,
wf,
_,
@@ -443,6 +444,7 @@ good-names=c,
# they will always be accepted
good-names-rgxs=^_version_(extra|m[a-n]{2}[or]{2})$, # version parts in info.py
.*EPI.*,
+ .*TE.*,
.*T1.*,
.*T2.*
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2bd30ce3f7..283c54383c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,21 +14,33 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [Unreleased]
+## [unreleased]
### Added
- Added the ability to downsample to 10K or 2K resolution for freesurfer runs
+- Added the ability to ingress TotalReadoutTime from epi field map meta-data from the JSON sidecars.
+- Added the ability to use TotalReadoutTime of epi field maps in the calculation of FSL topup distortion correction.
+- Added ability to set minimum quality measure thresholds to all registration steps
- Difference method (``-``) for ``CPAC.utils.configuration.Configuration`` instances
+- Added ``fail_fast`` configuration setting and CLI flag
### Changed
+- Added a level of depth to `working` directories to match `log` and `output` directory structure
+- Renamed participant-pipeline-level `output` directory prefix to `pipeline_` to match `log` and `working` paths
- Changed the 1mm atlases chosen in the rbc-options preconfig to the 2mm versions
- For Nilearn-generated correlation matrices, diagonals are now set to all `1`s (were all `0`s)
- Added ability to apply nusiance correction to template-space BOLD images
- Removed ability to run single-step-resampling on motion-corrected BOLD data
- Moved default pipeline config into directory with other preconfigs
- Added crash messages from during and before graph building to logs
+- Added data-config-specific hash string to C-PAC-generated config files
+- Updated `rbc-options` preconfig to use `fmriprep-options` preprocessing
+- Changed `random.log` to `random.tsv` and updated logic to log random seed when not specified
### Fixed
+- Fixed [bug](https://github.com/FCP-INDI/C-PAC/issues/1795) that was causing `cpac run` to fail when passing a manual random seed via `--random_seed`.
+- Replaces ``DwellTime`` with ``EffectiveEchoSpacing`` for FSL usage of the term
+- Fixed an issue that was causing some epi field maps to not be ingressed if the BIDS tags were not in the correct order.
## [v1.8.4] - 2022-06-27
diff --git a/CPAC/alff/alff.py b/CPAC/alff/alff.py
index 15a09c90f2..e0f692182c 100644
--- a/CPAC/alff/alff.py
+++ b/CPAC/alff/alff.py
@@ -246,8 +246,8 @@ def alff_falff(wf, cfg, strat_pool, pipe_num, opt=None):
"switch": ["run"],
"option_key": "None",
"option_val": "None",
- "inputs": [["desc-cleaned_bold", "desc-brain_bold", "desc-preproc_bold",
- "bold"],
+ "inputs": [["desc-cleanedNofilt_bold", "desc-brain_bold",
+ "desc-preproc_bold", "bold"],
"space-bold_desc-brain_mask"],
"outputs": ["alff",
"falff"]}
@@ -262,7 +262,7 @@ def alff_falff(wf, cfg, strat_pool, pipe_num, opt=None):
alff.get_node('hp_input').iterables = ('hp', alff.inputs.hp_input.hp)
alff.get_node('lp_input').iterables = ('lp', alff.inputs.lp_input.lp)
- node, out = strat_pool.get_data(["desc-cleaned_bold", "desc-brain_bold",
+ node, out = strat_pool.get_data(["desc-cleanedNofilt_bold", "desc-brain_bold",
"desc-preproc_bold", "bold"])
wf.connect(node, out, alff, 'inputspec.rest_res')
diff --git a/CPAC/anat_preproc/anat_preproc.py b/CPAC/anat_preproc/anat_preproc.py
index eac127c152..094599baa5 100644
--- a/CPAC/anat_preproc/anat_preproc.py
+++ b/CPAC/anat_preproc/anat_preproc.py
@@ -1,4 +1,21 @@
# -*- coding: utf-8 -*-
+
+# Copyright (C) 2012-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
import os
from nipype.interfaces import afni
from nipype.interfaces import ants
@@ -15,6 +32,7 @@
wb_command, \
fslmaths_command, \
VolumeRemoveIslands
+from CPAC.registration.guardrails import guardrail_selection
from CPAC.utils.interfaces.fsl import Merge as fslMerge
from CPAC.utils.interfaces.function.seg_preproc import \
pick_tissue_from_labels_file_interface
@@ -41,20 +59,20 @@ def acpc_alignment(config=None, acpc_target='whole-head', mask=False,
name='outputspec')
if config.anatomical_preproc['acpc_alignment']['FOV_crop'] == 'robustfov':
robust_fov = pe.Node(interface=fsl_utils.RobustFOV(),
- name='anat_acpc_1_robustfov')
- robust_fov.inputs.brainsize = config.anatomical_preproc['acpc_alignment']['brain_size']
+ name='anat_acpc_1_robustfov')
+ robust_fov.inputs.brainsize = config.anatomical_preproc[
+ 'acpc_alignment']['brain_size']
robust_fov.inputs.out_transform = 'fov_xfm.mat'
fov, in_file = (robust_fov, 'in_file')
fov, fov_mtx = (robust_fov, 'out_transform')
fov, fov_outfile = (robust_fov, 'out_roi')
-
+
elif config.anatomical_preproc['acpc_alignment']['FOV_crop'] == 'flirt':
# robustfov doesn't work on some monkey data. prefer using flirt.
# ${FSLDIR}/bin/flirt -in "${Input}" -applyxfm -ref "${Input}" -omat "$WD"/roi2full.mat -out "$WD"/robustroi.nii.gz
# adopted from DCAN NHP https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/PreFreeSurfer/scripts/ACPCAlignment.sh#L80-L81
- flirt_fov = pe.Node(interface=fsl.FLIRT(),
- name='anat_acpc_1_fov')
+ flirt_fov = pe.Node(interface=fsl.FLIRT(), name='anat_acpc_1_fov')
flirt_fov.inputs.args = '-applyxfm'
fov, in_file = (flirt_fov, 'in_file')
@@ -78,34 +96,41 @@ def acpc_alignment(config=None, acpc_target='whole-head', mask=False,
name='anat_acpc_2_fov_convertxfm')
convert_fov_xfm.inputs.invert_xfm = True
- preproc.connect(fov, fov_mtx,
- convert_fov_xfm, 'in_file')
+ preproc.connect(fov, fov_mtx, convert_fov_xfm, 'in_file')
- align = pe.Node(interface=fsl.FLIRT(),
- name='anat_acpc_3_flirt')
+ align = pe.Node(interface=fsl.FLIRT(), name='anat_acpc_3_flirt')
align.inputs.interp = 'spline'
align.inputs.searchr_x = [30, 30]
align.inputs.searchr_y = [30, 30]
align.inputs.searchr_z = [30, 30]
-
- preproc.connect(fov, fov_outfile, align, 'in_file')
+ align_nodes, align_guardrails = preproc.nodes_and_guardrails(
+ align, registered=fov_outfile)
+ preproc.connect_retries(align_nodes, [(fov, fov_outfile, 'in_file')])
# align head-to-head to get acpc.mat (for human)
if acpc_target == 'whole-head':
- preproc.connect(inputnode, 'template_head_for_acpc', align,
- 'reference')
+ preproc.connect_retries(
+ align_nodes, [(inputnode, 'template_head_for_acpc', 'reference')])
+ preproc.connect_retries(
+ align_guardrails, [(inputnode, 'template_head_for_acpc',
+ 'reference')])
# align brain-to-brain to get acpc.mat (for monkey)
if acpc_target == 'brain':
- preproc.connect(inputnode, 'template_brain_for_acpc', align,
- 'reference')
+ preproc.connect_retries(
+ align_nodes, [(inputnode, 'template_brain_for_acpc', 'reference')])
+ preproc.connect_retries(
+ align_guardrails, [(inputnode, 'template_brain_for_acpc',
+ 'reference')])
concat_xfm = pe.Node(interface=fsl_utils.ConvertXFM(),
name='anat_acpc_4_concatxfm')
concat_xfm.inputs.concat_xfm = True
preproc.connect(convert_fov_xfm, 'out_file', concat_xfm, 'in_file')
- preproc.connect(align, 'out_matrix_file', concat_xfm, 'in_file2')
+ select_align = guardrail_selection(preproc, *align_nodes,
+ 'out_matrix_file', align_guardrails[0])
+ preproc.connect(select_align, 'out', concat_xfm, 'in_file2')
aff_to_rig_imports = ['import os', 'from numpy import *']
aff_to_rig = pe.Node(util.Function(input_names=['in_xfm', 'out_name'],
@@ -158,7 +183,7 @@ def acpc_alignment(config=None, acpc_target='whole-head', mask=False,
def T2wToT1wReg(wf_name='T2w_to_T1w_reg'):
-
+
# Adapted from DCAN lab
# https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/PreFreeSurfer/scripts/T2wToT1wReg.sh
@@ -171,16 +196,17 @@ def T2wToT1wReg(wf_name='T2w_to_T1w_reg'):
name='inputspec')
outputnode = pe.Node(util.IdentityInterface(fields=['T2w_to_T1w']),
- name='outputspec')
+ name='outputspec')
# ${FSLDIR}/bin/epi_reg --epi="$T2wImageBrain" --t1="$T1wImage" --t1brain="$WD"/"$T1wImageBrainFile" --out="$WD"/T2w2T1w
- T2w2T1w_reg = pe.Node(interface=fsl.EpiReg(),
- name='T2w2T1w_reg')
+ T2w2T1w_reg = pe.Node(interface=fsl.EpiReg(), name='T2w2T1w_reg')
T2w2T1w_reg.inputs.out_base = 'T2w2T1w'
-
- preproc.connect(inputnode, 'T2w_brain', T2w2T1w_reg ,'epi')
- preproc.connect(inputnode, 'T1w', T2w2T1w_reg ,'t1_head')
- preproc.connect(inputnode, 'T1w_brain', T2w2T1w_reg ,'t1_brain')
+ reg_nodes, reg_guardrails = preproc.nodes_and_guardrails(
+ T2w2T1w_reg, registered='out_file')
+ preproc.connect_retries(reg_nodes, [(inputnode, 'T2w_brain', 'epi'),
+ (inputnode, 'T1w', 't1_head'),
+ (inputnode, 'T1w_brain', 't1_brain')])
+ preproc.connect_retries(reg_guardrails, [(inputnode, 'T1w', 'reference')])
# ${FSLDIR}/bin/applywarp --rel --interp=spline --in="$T2wImage" --ref="$T1wImage" --premat="$WD"/T2w2T1w.mat --out="$WD"/T2w2T1w
T2w2T1w = pe.Node(interface=fsl.ApplyWarp(),
@@ -193,9 +219,8 @@ def T2wToT1wReg(wf_name='T2w_to_T1w_reg'):
preproc.connect(T2w2T1w_reg, 'epi2str_mat', T2w2T1w, 'premat')
# ${FSLDIR}/bin/fslmaths "$WD"/T2w2T1w -add 1 "$WD"/T2w2T1w -odt float
- T2w2T1w_final = pe.Node(interface=fsl.ImageMaths(),
- name='T2w2T1w_final')
- T2w2T1w_final.inputs.op_string = "-add 1"
+ T2w2T1w_final = pe.Node(interface=fsl.ImageMaths(), name='T2w2T1w_final')
+ T2w2T1w_final.inputs.op_string = "-add 1"
preproc.connect(T2w2T1w, 'out_file', T2w2T1w_final, 'in_file')
preproc.connect(T2w2T1w_final, 'out_file', outputnode, 'T2w_to_T1w')
@@ -790,11 +815,14 @@ def unet_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
f'brain_{pipe_num}')
native_brain_to_template_brain.inputs.dof = 6
native_brain_to_template_brain.inputs.interp = 'sinc'
- wf.connect(unet_masked_brain, 'out_file',
- native_brain_to_template_brain, 'in_file')
+ brain_nodes, brain_guardrails = wf.nodes_and_guardrails(
+ native_brain_to_template_brain, registered='out_file')
node, out = strat_pool.get_data('T1w-brain-template')
- wf.connect(node, out, native_brain_to_template_brain, 'reference')
+ wf.connect_retries(brain_nodes, [
+ (unet_masked_brain, 'out_file', 'in_file'),
+ (node, out, 'reference')])
+ wf.connect_retries(brain_guardrails, [(node, out, 'reference')])
# flirt -in head.nii.gz -ref NMT_0.5mm.nii.gz -o head_rot2atl -applyxfm -init brain_rot2atl.mat
native_head_to_template_head = pe.Node(interface=fsl.FLIRT(),
@@ -802,23 +830,34 @@ def unet_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
f'head_{pipe_num}')
native_head_to_template_head.inputs.apply_xfm = True
+ head_nodes, head_guardrails = wf.nodes_and_guardrails(
+ native_head_to_template_head, registered='out_file')
+ select_head = guardrail_selection(wf, *head_guardrails)
+
if strat_pool.check_rpool('desc-preproc_T1w') or \
strat_pool.check_rpool('desc-reorient_T1w') or \
strat_pool.check_rpool('T1w'):
- node, out = strat_pool.get_data(['desc-preproc_T1w', 'desc-reorient_T1w','T1w'])
- wf.connect(node, out, native_head_to_template_head, 'in_file')
-
+ node, out = strat_pool.get_data(
+ ['desc-preproc_T1w', 'desc-reorient_T1w','T1w'])
+ wf.connect_retries(head_nodes, [(node, out, 'in_file')])
+
elif strat_pool.check_rpool('desc-preproc_T2w') or \
strat_pool.check_rpool('desc-reorient_T2w') or \
strat_pool.check_rpool('T2w'):
- node, out = strat_pool.get_data(['desc-preproc_T2w', 'desc-reorient_T2w','T2w'])
- wf.connect(node, out, native_head_to_template_head, 'in_file')
+ node, out = strat_pool.get_data([
+ 'desc-preproc_T2w', 'desc-reorient_T2w', 'T2w'])
+ wf.connect_retries(head_nodes, [(node, out, 'in_file')])
- wf.connect(native_brain_to_template_brain, 'out_matrix_file',
+ select_template_brain_matrix = guardrail_selection(wf, *brain_nodes,
+ 'out_matrix_file',
+ brain_guardrails[0])
+
+ wf.connect(select_template_brain_matrix, 'out',
native_head_to_template_head, 'in_matrix_file')
node, out = strat_pool.get_data('T1w-template')
- wf.connect(node, out, native_head_to_template_head, 'reference')
+ wf.connect_retries(head_nodes, [(node, out, 'reference')])
+ wf.connect_retries(head_guardrails, [(node, out, 'reference')])
# fslmaths NMT_SS_0.5mm.nii.gz -bin templateMask.nii.gz
template_brain_mask = pe.Node(interface=fsl.maths.MathsCommand(),
@@ -830,7 +869,7 @@ def unet_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
# ANTS 3 -m CC[head_rot2atl.nii.gz,NMT_0.5mm.nii.gz,1,5] -t SyN[0.25] -r Gauss[3,0] -o atl2T1rot -i 60x50x20 --use-Histogram-Matching --number-of-affine-iterations 10000x10000x10000x10000x10000 --MI-option 32x16000
ants_template_head_to_template = pe.Node(interface=ants.Registration(),
- name=f'template_head_to_'
+ name='template_head_to_'
f'template_{pipe_num}')
ants_template_head_to_template.inputs.metric = ['CC']
ants_template_head_to_template.inputs.metric_weight = [1, 5]
@@ -842,13 +881,22 @@ def unet_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
ants_template_head_to_template.inputs.smoothing_sigmas = [[0.6, 0.2, 0.0]]
ants_template_head_to_template.inputs.shrink_factors = [[4, 2, 1]]
ants_template_head_to_template.inputs.convergence_threshold = [1.e-8]
- wf.connect(native_head_to_template_head, 'out_file',
- ants_template_head_to_template, 'fixed_image')
+
+ athtt_nodes, athtt_guardrails = wf.nodes_and_guardrails(
+ ants_template_head_to_template, registered='warped_image')
+ select_ants_template_head_to_template = guardrail_selection(
+ wf, *athtt_nodes, 'forward_transforms', athtt_guardrails[0])
+
+ node, out = strat_pool.get_data('T1w-brain-template')
+ wf.connect_retries(athtt_nodes, [
+ (select_head, 'out', 'fixed_image'),
+ (node, out, 'moving_image')])
+ wf.connect_retries(athtt_guardrails, [(select_head, 'out', 'reference')])
node, out = strat_pool.get_data('T1w-brain-template')
- wf.connect(node, out, ants_template_head_to_template, 'moving_image')
+ wf.connect_retries(athtt_nodes, [(node, out, 'moving_image')])
- # antsApplyTransforms -d 3 -i templateMask.nii.gz -t atl2T1rotWarp.nii.gz atl2T1rotAffine.txt -r brain_rot2atl.nii.gz -o brain_rot2atl_mask.nii.gz
+ # antsApplyTransforms -d 3 -i templateMask.nii.gz -t atl2T1rotWarp.nii.gz atl2T1rotAffine.txt -r brain_rot2atl.nii.gz -o brain_rot2atl_mask.nii.gz
template_head_transform_to_template = pe.Node(
interface=ants.ApplyTransforms(),
name=f'template_head_transform_to_template_{pipe_num}')
@@ -858,47 +906,48 @@ def unet_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
template_head_transform_to_template, 'input_image')
wf.connect(native_brain_to_template_brain, 'out_file',
template_head_transform_to_template, 'reference_image')
- wf.connect(ants_template_head_to_template, 'forward_transforms',
+ wf.connect(select_ants_template_head_to_template, 'out',
template_head_transform_to_template, 'transforms')
- # convert_xfm -omat brain_rot2native.mat -inverse brain_rot2atl.mat
+ # convert_xfm -omat brain_rot2native.mat -inverse brain_rot2atl.mat
invt = pe.Node(interface=fsl.ConvertXFM(), name='convert_xfm')
invt.inputs.invert_xfm = True
- wf.connect(native_brain_to_template_brain, 'out_matrix_file', invt,
- 'in_file')
+ wf.connect(native_brain_to_template_brain, 'out_matrix_file',
+ invt, 'in_file')
# flirt -in brain_rot2atl_mask.nii.gz -ref brain.nii.gz -o brain_mask.nii.gz -applyxfm -init brain_rot2native.mat
template_brain_to_native_brain = pe.Node(interface=fsl.FLIRT(),
- name=f'template_brain_to_native_'
+ name='template_brain_to_native_'
f'brain_{pipe_num}')
template_brain_to_native_brain.inputs.apply_xfm = True
- wf.connect(template_head_transform_to_template, 'output_image',
- template_brain_to_native_brain, 'in_file')
- wf.connect(unet_masked_brain, 'out_file', template_brain_to_native_brain,
- 'reference')
- wf.connect(invt, 'out_file', template_brain_to_native_brain,
- 'in_matrix_file')
+ tbtnb_nodes, tbtnb_guardrails = wf.nodes_and_guardrails(
+ template_brain_to_native_brain, registered='out_file')
+ wf.connect_retries(tbtnb_nodes, [
+ (template_head_transform_to_template, 'output_image', 'in_file'),
+ (unet_masked_brain, 'out_file', 'reference'),
+ (invt, 'out_file', 'in_matrix_file')])
+ wf.connect_retries(tbtnb_guardrails, [
+ (unet_masked_brain, 'out_file', 'reference')])
+ select_template_brain_to_native_brain = guardrail_selection(
+ wf, *tbtnb_guardrails)
# fslmaths brain_mask.nii.gz -thr .5 -bin brain_mask_thr.nii.gz
- refined_mask = pe.Node(interface=fsl.Threshold(), name=f'refined_mask'
- f'_{pipe_num}')
+ refined_mask = pe.Node(interface=fsl.Threshold(),
+ name=f'refined_mask_{pipe_num}')
refined_mask.inputs.thresh = 0.5
refined_mask.inputs.args = '-bin'
- wf.connect(template_brain_to_native_brain, 'out_file', refined_mask,
- 'in_file')
+ wf.connect(select_template_brain_to_native_brain, 'out',
+ refined_mask, 'in_file')
- outputs = {
- 'space-T1w_desc-brain_mask': (refined_mask, 'out_file')
- }
+ outputs = {'space-T1w_desc-brain_mask': (refined_mask, 'out_file')}
- return (wf, outputs)
+ return wf, outputs
def freesurfer_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
# register FS brain mask to native space
- fs_brain_mask_to_native = pe.Node(
- interface=freesurfer.ApplyVolTransform(),
- name='fs_brain_mask_to_native')
+ fs_brain_mask_to_native = pe.Node(interface=freesurfer.ApplyVolTransform(),
+ name='fs_brain_mask_to_native')
fs_brain_mask_to_native.inputs.reg_header = True
node, out = strat_pool.get_data('space-T1w_desc-brain_mask')
@@ -979,10 +1028,12 @@ def freesurfer_abcd_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
wf.connect(wb_command_fill_holes, 'out_file',
binary_filled_mask, 'in_file')
- brain_mask_to_t1_restore = pe.Node(interface=fsl.ApplyWarp(),
- name=f'brain_mask_to_t1_restore_{pipe_num}')
+ brain_mask_to_t1_restore = pe.Node(
+ interface=fsl.ApplyWarp(), name=f'brain_mask_to_t1_restore_{pipe_num}')
brain_mask_to_t1_restore.inputs.interp = 'nn'
- brain_mask_to_t1_restore.inputs.premat = cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']
+ brain_mask_to_t1_restore.inputs.premat = cfg.registration_workflows[
+ 'anatomical_registration'
+ ]['registration']['FSL-FNIRT']['identity_matrix']
wf.connect(binary_filled_mask, 'out_file',
brain_mask_to_t1_restore, 'in_file')
@@ -991,8 +1042,7 @@ def freesurfer_abcd_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
wf.connect(node, out, brain_mask_to_t1_restore, 'ref_file')
outputs = {
- 'space-T1w_desc-brain_mask': (brain_mask_to_t1_restore, 'out_file')
- }
+ 'space-T1w_desc-brain_mask': (brain_mask_to_t1_restore, 'out_file')}
return (wf, outputs)
@@ -1002,10 +1052,11 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
node_id = f'{opt.lower()}_{pipe_num}'
# mri_convert -it mgz ${SUBJECTS_DIR}/${subject}/mri/brainmask.mgz -ot nii brainmask.nii.gz
- convert_fs_brainmask_to_nifti = pe.Node(util.Function(input_names=['in_file'],
- output_names=['out_file'],
- function=mri_convert),
- name=f'convert_fs_brainmask_to_nifti_{node_id}')
+ convert_fs_brainmask_to_nifti = pe.Node(
+ util.Function(input_names=['in_file'],
+ output_names=['out_file'],
+ function=mri_convert),
+ name=f'convert_fs_brainmask_to_nifti_{node_id}')
node, out = strat_pool.get_data('brainmask')
wf.connect(node, out, convert_fs_brainmask_to_nifti, 'in_file')
@@ -1052,8 +1103,8 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
# flirt -in head_fs.nii.gz -ref ${FSLDIR}/data/standard/MNI152_T1_1mm.nii.gz \
# -out tmp_head_fs2standard.nii.gz -omat tmp_head_fs2standard.mat -bins 256 -cost corratio \
# -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12 -interp trilinear
- convert_head_to_template = pe.Node(interface=fsl.FLIRT(),
- name=f'convert_head_to_template_{node_id}')
+ convert_head_to_template = pe.Node(
+ interface=fsl.FLIRT(), name=f'convert_head_to_template_{node_id}')
convert_head_to_template.inputs.cost = 'corratio'
convert_head_to_template.inputs.interp = 'trilinear'
convert_head_to_template.inputs.bins = 256
@@ -1061,23 +1112,29 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
convert_head_to_template.inputs.searchr_x = [-90, 90]
convert_head_to_template.inputs.searchr_y = [-90, 90]
convert_head_to_template.inputs.searchr_z = [-90, 90]
-
- wf.connect(reorient_fs_T1, 'out_file',
- convert_head_to_template, 'in_file')
+ (head_to_template_nodes,
+ head_to_template_guardrails) = wf.nodes_and_guardrails(
+ convert_head_to_template, registered='out_file')
node, out = strat_pool.get_data('T1w-ACPC-template')
- wf.connect(node, out, convert_head_to_template, 'reference')
+ wf.connect_retries(head_to_template_nodes, [
+ (reorient_fs_T1, 'out_file', 'in_file'),
+ (node, out, 'reference')])
+ wf.connect_retries(head_to_template_guardrails, [(node, out, 'reference')])
+ head_to_template = guardrail_selection(wf, *head_to_template_guardrails)
+ head_to_template_matrix = guardrail_selection(
+ wf, *head_to_template_nodes, 'out_matrix_file',
+ head_to_template_guardrails[0])
# convert_xfm -omat tmp_standard2head_fs.mat -inverse tmp_head_fs2standard.mat
convert_xfm = pe.Node(interface=fsl_utils.ConvertXFM(),
- name=f'convert_xfm_{node_id}')
+ name=f'convert_xfm_{node_id}')
convert_xfm.inputs.invert_xfm = True
- wf.connect(convert_head_to_template, 'out_matrix_file',
- convert_xfm, 'in_file')
+ wf.connect(head_to_template_matrix, 'out', convert_xfm, 'in_file')
# bet tmp_head_fs2standard.nii.gz tmp.nii.gz -f ${bet_thr_tight} -m
- skullstrip = pe.Node(interface=fsl.BET(),
+ skullstrip = pe.Node(interface=fsl.BET(),
name=f'anat_BET_skullstrip_{node_id}')
skullstrip.inputs.output_type = 'NIFTI_GZ'
skullstrip.inputs.mask=True
@@ -1087,38 +1144,40 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
elif opt == 'FreeSurfer-BET-Loose':
skullstrip.inputs.frac=0.1
- wf.connect(convert_head_to_template, 'out_file',
- skullstrip, 'in_file')
-
+ wf.connect(head_to_template, 'out', skullstrip, 'in_file')
+
# fslmaths tmp_mask.nii.gz -mas ${CCSDIR}/templates/MNI152_T1_1mm_first_brain_mask.nii.gz tmp_mask.nii.gz
apply_mask = pe.Node(interface=fsl.maths.ApplyMask(),
name=f'apply_mask_{node_id}')
- wf.connect(skullstrip, 'out_file',
- apply_mask, 'in_file')
+ wf.connect(skullstrip, 'out_file', apply_mask, 'in_file')
node, out = strat_pool.get_data('T1w-brain-template-mask-ccs')
wf.connect(node, out, apply_mask, 'mask_file')
# flirt -in tmp_mask.nii.gz -applyxfm -init tmp_standard2head_fs.mat -out brain_fsl_mask_tight.nii.gz \
# -paddingsize 0.0 -interp nearestneighbour -ref head_fs.nii.gz
- convert_template_mask_to_native = pe.Node(interface=fsl.FLIRT(),
- name=f'convert_template_mask_to_native_{node_id}')
+ convert_template_mask_to_native = pe.Node(
+ interface=fsl.FLIRT(),
+ name=f'convert_template_mask_to_native_{node_id}')
convert_template_mask_to_native.inputs.apply_xfm = True
convert_template_mask_to_native.inputs.padding_size = 0
convert_template_mask_to_native.inputs.interp = 'nearestneighbour'
-
- wf.connect(apply_mask, 'out_file',
- convert_template_mask_to_native, 'in_file')
-
- wf.connect(convert_xfm, 'out_file',
- convert_template_mask_to_native, 'in_matrix_file')
-
- wf.connect(reorient_fs_T1, 'out_file',
- convert_template_mask_to_native, 'reference')
+ (templatemask_to_native_nodes,
+ templatemask_to_native_guardrails) = wf.nodes_and_guardrails(
+ convert_template_mask_to_native, registered='out_file')
+
+ wf.connect_retries(templatemask_to_native_nodes, [
+ (apply_mask, 'out_file', 'in_file'),
+ (convert_xfm, 'out_file', 'in_matrix_file'),
+ (reorient_fs_T1, 'out_file', 'reference')])
+ wf.connect_retries(templatemask_to_native_guardrails, [
+ (reorient_fs_T1, 'out_file', 'reference')])
+ template_mask_to_native = guardrail_selection(
+ wf, *templatemask_to_native_guardrails)
# fslmaths brain_fs_mask.nii.gz -add brain_fsl_mask_tight.nii.gz -bin brain_mask_tight.nii.gz
- # BinaryMaths doesn't use -bin!
+ # BinaryMaths doesn't use -bin!
combine_mask = pe.Node(interface=fsl.BinaryMaths(),
name=f'combine_mask_{node_id}')
@@ -1127,27 +1186,24 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
elif opt == 'FreeSurfer-BET-Loose':
combine_mask.inputs.operation = 'mul'
- wf.connect(binarize_fs_brain, 'out_file',
- combine_mask, 'in_file')
-
- wf.connect(convert_template_mask_to_native, 'out_file',
- combine_mask, 'operand_file')
+ wf.connect(binarize_fs_brain, 'out_file', combine_mask, 'in_file')
+ wf.connect(template_mask_to_native, 'out', combine_mask, 'operand_file')
binarize_combined_mask = pe.Node(interface=fsl.maths.MathsCommand(),
name=f'binarize_combined_mask_{node_id}')
binarize_combined_mask.inputs.args = '-bin'
- wf.connect(combine_mask, 'out_file',
- binarize_combined_mask, 'in_file')
+ wf.connect(combine_mask, 'out_file', binarize_combined_mask, 'in_file')
# CCS brain mask is in FS space, transfer it back to native T1 space
- fs_fsl_brain_mask_to_native = pe.Node(interface=freesurfer.ApplyVolTransform(),
- name=f'fs_fsl_brain_mask_to_native_{node_id}')
+ fs_fsl_brain_mask_to_native = pe.Node(
+ interface=freesurfer.ApplyVolTransform(),
+ name=f'fs_fsl_brain_mask_to_native_{node_id}')
fs_fsl_brain_mask_to_native.inputs.reg_header = True
fs_fsl_brain_mask_to_native.inputs.interp = 'nearest'
- wf.connect(binarize_combined_mask, 'out_file',
- fs_fsl_brain_mask_to_native, 'source_file')
+ wf.connect(binarize_combined_mask, 'out_file',
+ fs_fsl_brain_mask_to_native, 'source_file')
node, out = strat_pool.get_data('raw-average')
wf.connect(node, out, fs_fsl_brain_mask_to_native, 'target_file')
@@ -1157,14 +1213,14 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt):
if opt == 'FreeSurfer-BET-Tight':
outputs = {
- 'space-T1w_desc-tight_brain_mask': (fs_fsl_brain_mask_to_native, 'transformed_file')
- }
+ 'space-T1w_desc-tight_brain_mask': (fs_fsl_brain_mask_to_native,
+ 'transformed_file')}
elif opt == 'FreeSurfer-BET-Loose':
outputs = {
- 'space-T1w_desc-loose_brain_mask': (fs_fsl_brain_mask_to_native, 'transformed_file')
- }
+ 'space-T1w_desc-loose_brain_mask': (fs_fsl_brain_mask_to_native,
+ 'transformed_file')}
- return (wf, outputs)
+ return wf, outputs
def mask_T2(wf_name='mask_T2'):
@@ -1195,25 +1251,36 @@ def mask_T2(wf_name='mask_T2'):
# t1w2t2w_rigid = 'flirt -dof 6 -cost mutualinfo -in {t1w} -ref {t2w} ' \
# '-omat {t1w2t2w}'.format(**kwargs)
- t1w2t2w_rigid = pe.Node(interface=fsl.FLIRT(),
- name='t1w2t2w_rigid')
-
+ t1w2t2w_rigid = pe.Node(interface=fsl.FLIRT(), name='t1w2t2w_rigid')
t1w2t2w_rigid.inputs.dof = 6
t1w2t2w_rigid.inputs.cost = 'mutualinfo'
- preproc.connect(inputnode, 'T1w', t1w2t2w_rigid, 'in_file')
- preproc.connect(inputnode, 'T2w', t1w2t2w_rigid, 'reference')
+ rigid_nodes, rigid_guardrails = preproc.nodes_and_guardrails(
+ t1w2t2w_rigid, registered='out_file')
+ preproc.connect_retries(rigid_nodes, [
+ (inputnode, 'T1w', 'in_file'),
+ (inputnode, 'T2w', 'reference')])
+ preproc.connect_retries(rigid_guardrails, [
+ (inputnode, 'T2w', 'reference')])
+ rigid_matrix = guardrail_selection(preproc, *rigid_nodes,
+ 'out_matrix_file', rigid_guardrails[0])
# t1w2t2w_mask = 'flirt -in {t1w_mask_edit} -interp nearestneighbour -ref {' \
# 't2w} -o {t2w_brain_mask} -applyxfm -init {' \
# 't1w2t2w}'.format(**kwargs)
- t1w2t2w_mask = pe.Node(interface=fsl.FLIRT(),
- name='t1w2t2w_mask')
+ t1w2t2w_mask = pe.Node(interface=fsl.FLIRT(), name='t1w2t2w_mask')
t1w2t2w_mask.inputs.apply_xfm = True
t1w2t2w_mask.inputs.interp = 'nearestneighbour'
-
- preproc.connect(inputnode, 'T1w_mask', t1w2t2w_mask, 'in_file')
- preproc.connect(inputnode, 'T2w', t1w2t2w_mask, 'reference')
- preproc.connect(t1w2t2w_rigid, 'out_matrix_file', t1w2t2w_mask, 'in_matrix_file')
+ mask_nodes, mask_guardrails = preproc.nodes_and_guardrails(
+ t1w2t2w_mask, registered='out_file')
+
+ preproc.connect_retries(mask_nodes, [
+ (inputnode, 'T1w_mask', 'in_file'),
+ (inputnode, 'T2w', 'reference'),
+ (rigid_matrix, 'out', 'in_matrix_file')])
+ preproc.connect_retries(mask_guardrails, [
+ (inputnode, 'T2w', 'reference')])
+ # pylint: disable=no-value-for-parameter
+ select_mask = guardrail_selection(preproc, *mask_guardrails)
# mask_t2w = 'fslmaths {t2w} -mas {t2w_brain_mask} ' \
# '{t2w_brain}'.format(**kwargs)
@@ -1222,11 +1289,10 @@ def mask_T2(wf_name='mask_T2'):
mask_t2w.inputs.op_string = "-mas %s "
preproc.connect(inputnode, 'T2w', mask_t2w, 'in_file')
- preproc.connect(t1w2t2w_mask, 'out_file', mask_t2w, 'operand_files')
-
+ preproc.connect(select_mask, 'out', mask_t2w, 'operand_files')
preproc.connect(mask_t1w, 'out_file', outputnode, 'T1w_brain')
preproc.connect(mask_t2w, 'out_file', outputnode, 'T2w_brain')
- preproc.connect(t1w2t2w_mask, 'out_file', outputnode, 'T2w_mask')
+ preproc.connect(select_mask, 'out', outputnode, 'T2w_mask')
return preproc
@@ -1438,11 +1504,12 @@ def acpc_align_brain_with_mask(wf, cfg, strat_pool, pipe_num, opt=None):
outputs = {
'desc-preproc_T1w': (acpc_align, 'outputspec.acpc_aligned_head'),
'desc-acpcbrain_T1w': (acpc_align, 'outputspec.acpc_aligned_brain'),
- 'space-T1w_desc-brain_mask': (acpc_align, 'outputspec.acpc_brain_mask'),
- 'space-T1w_desc-prebrain_mask': (strat_pool.get_data('space-T1w_desc-brain_mask'))
- }
+ 'space-T1w_desc-brain_mask': (acpc_align,
+ 'outputspec.acpc_brain_mask'),
+ 'space-T1w_desc-prebrain_mask': (
+ strat_pool.get_data('space-T1w_desc-brain_mask'))}
- return (wf, outputs)
+ return wf, outputs
def registration_T2w_to_T1w(wf, cfg, strat_pool, pipe_num, opt=None):
@@ -1475,11 +1542,9 @@ def registration_T2w_to_T1w(wf, cfg, strat_pool, pipe_num, opt=None):
node, out = strat_pool.get_data(['desc-acpcbrain_T2w'])
wf.connect(node, out, T2_to_T1_reg, 'inputspec.T2w_brain')
- outputs = {
- 'desc-preproc_T2w': (T2_to_T1_reg, 'outputspec.T2w_to_T1w')
- }
+ outputs = {'desc-preproc_T2w': (T2_to_T1_reg, 'outputspec.T2w_to_T1w')}
- return (wf, outputs)
+ return wf, outputs
def non_local_means(wf, cfg, strat_pool, pipe_num, opt=None):
@@ -2731,13 +2796,13 @@ def fnirt_based_brain_extraction(config=None, wf_name='fnirt_based_brain_extract
preproc = pe.Workflow(name=wf_name)
- inputnode = pe.Node(util.IdentityInterface(fields=['anat_data',
- 'template-ref-mask-res-2',
- 'template_skull_for_anat',
- 'template_skull_for_anat_2mm',
- 'template_brain_mask_for_anat']),
- name='inputspec')
-
+ inputnode = pe.Node(util.IdentityInterface(
+ fields=['anat_data',
+ 'template-ref-mask-res-2',
+ 'template_skull_for_anat',
+ 'template_skull_for_anat_2mm',
+ 'template_brain_mask_for_anat']), name='inputspec')
+
outputnode = pe.Node(util.IdentityInterface(fields=['anat_brain',
'anat_brain_mask']),
name='outputspec')
@@ -2750,12 +2815,17 @@ def fnirt_based_brain_extraction(config=None, wf_name='fnirt_based_brain_extract
linear_reg.inputs.dof = 12
linear_reg.inputs.interp = 'spline'
linear_reg.inputs.no_search = True
-
- preproc.connect(inputnode, 'anat_data',
- linear_reg, 'in_file')
-
- preproc.connect(inputnode, 'template_skull_for_anat_2mm',
- linear_reg, 'reference')
+ lreg_nodes, lreg_guardrails = preproc.nodes_and_guardrails(
+ linear_reg, registered='out_file')
+
+ preproc.connect_retries(lreg_nodes, [
+ (inputnode, 'anat_data', 'in_file'),
+ (inputnode, 'template_skull_for_anat_2mm', 'reference')])
+ preproc.connect_retries(lreg_guardrails, [
+ (inputnode, 'template_skull_for_anat_2mm', 'reference')])
+ linear_reg_matrix = guardrail_selection(preproc, *lreg_nodes,
+ 'out_matrix_file',
+ lreg_guardrails[0])
# non-linear registration to 2mm reference
# fnirt --in="$Input" --ref="$Reference2mm" --aff="$WD"/roughlin.mat --refmask="$Reference2mmMask" \
@@ -2763,9 +2833,7 @@ def fnirt_based_brain_extraction(config=None, wf_name='fnirt_based_brain_extract
# --refout="$WD"/IntensityModulatedT1.nii.gz --iout="$WD"/"$BaseName"_to_MNI_nonlin.nii.gz \
# --logout="$WD"/NonlinearReg.txt --intout="$WD"/NonlinearIntensities.nii.gz \
# --cout="$WD"/NonlinearReg.nii.gz --config="$FNIRTConfig"
- non_linear_reg = pe.Node(interface=fsl.FNIRT(),
- name='non_linear_reg')
-
+ non_linear_reg = pe.Node(interface=fsl.FNIRT(), name='non_linear_reg')
non_linear_reg.inputs.field_file = True # --fout
non_linear_reg.inputs.jacobian_file = True # --jout
non_linear_reg.inputs.modulatedref_file = True # --refout
@@ -2775,36 +2843,30 @@ def fnirt_based_brain_extraction(config=None, wf_name='fnirt_based_brain_extract
non_linear_reg.inputs.fieldcoeff_file = True # --cout
non_linear_reg.inputs.config_file = config.registration_workflows[
'anatomical_registration']['registration']['FSL-FNIRT']['fnirt_config']
-
- preproc.connect(inputnode, 'anat_data',
- non_linear_reg, 'in_file')
-
- preproc.connect(inputnode, 'template_skull_for_anat_2mm',
- non_linear_reg, 'ref_file')
-
- preproc.connect(linear_reg, 'out_matrix_file',
- non_linear_reg, 'affine_file')
-
- preproc.connect(inputnode, 'template-ref-mask-res-2',
- non_linear_reg, 'refmask_file')
+ nlreg_nodes, nlreg_guardrails = preproc.nodes_and_guardrails(
+ non_linear_reg, registered='warped_file')
+
+ preproc.connect_retries(nlreg_nodes, [
+ (inputnode, 'anat_data', 'in_file'),
+ (inputnode, 'template_skull_for_anat_2mm', 'ref_file'),
+ (linear_reg_matrix, 'out', 'affine_file'),
+ (inputnode, 'template-ref-mask-res-2', 'refmask_file')])
+ preproc.connect_retries(nlreg_guardrails, [
+ (inputnode, 'template_skull_for_anat_2mm', 'reference')])
+ field_file = guardrail_selection(preproc, *nlreg_nodes, 'field_file',
+ nlreg_guardrails[0])
# Overwrite the image output from FNIRT with a spline interpolated highres version
# creating spline interpolated hires version
# applywarp --rel --interp=spline --in="$Input" --ref="$Reference" -w "$WD"/str2standard.nii.gz --out="$WD"/"$BaseName"_to_MNI_nonlin.nii.gz
- apply_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='apply_warp')
-
+ apply_warp = pe.Node(interface=fsl.ApplyWarp(), name='apply_warp')
apply_warp.inputs.interp = 'spline'
apply_warp.inputs.relwarp = True
- preproc.connect(inputnode, 'anat_data',
- apply_warp, 'in_file')
-
+ preproc.connect(inputnode, 'anat_data', apply_warp, 'in_file')
preproc.connect(inputnode, 'template_skull_for_anat',
apply_warp, 'ref_file')
-
- preproc.connect(non_linear_reg, 'field_file',
- apply_warp, 'field_file')
+ preproc.connect(field_file, 'out', apply_warp, 'field_file')
# Invert warp and transform dilated brain mask back into native space, and use it to mask input image
# Input and reference spaces are the same, using 2mm reference to save time
@@ -2814,49 +2876,35 @@ def fnirt_based_brain_extraction(config=None, wf_name='fnirt_based_brain_extract
preproc.connect(inputnode, 'template_skull_for_anat_2mm',
inverse_warp, 'reference')
-
- preproc.connect(non_linear_reg, 'field_file',
- inverse_warp, 'warp')
+ preproc.connect(field_file, inverse_warp, 'warp')
# Apply inverse warp
# applywarp --rel --interp=nn --in="$ReferenceMask" --ref="$Input" -w "$WD"/standard2str.nii.gz -o "$OutputBrainMask"
- apply_inv_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='apply_inv_warp')
+ apply_inv_warp = pe.Node(interface=fsl.ApplyWarp(), name='apply_inv_warp')
apply_inv_warp.inputs.interp = 'nn'
apply_inv_warp.inputs.relwarp = True
preproc.connect(inputnode, 'template_brain_mask_for_anat',
apply_inv_warp, 'in_file')
+ preproc.connect(inputnode, 'anat_data', apply_inv_warp, 'ref_file')
+ preproc.connect(inverse_warp, 'inverse_warp', apply_inv_warp, 'field_file')
+ preproc.connect(apply_inv_warp, 'out_file', outputnode, 'anat_brain_mask')
- preproc.connect(inputnode, 'anat_data',
- apply_inv_warp, 'ref_file')
-
- preproc.connect(inverse_warp, 'inverse_warp',
- apply_inv_warp, 'field_file')
-
- preproc.connect(apply_inv_warp, 'out_file',
- outputnode, 'anat_brain_mask')
-
# Apply mask to create brain
# fslmaths "$Input" -mas "$OutputBrainMask" "$OutputBrainExtractedImage"
apply_mask = pe.Node(interface=fsl.MultiImageMaths(),
name='apply_mask')
apply_mask.inputs.op_string = '-mas %s'
- preproc.connect(inputnode, 'anat_data',
- apply_mask, 'in_file')
+ preproc.connect(inputnode, 'anat_data', apply_mask, 'in_file')
+ preproc.connect(apply_inv_warp, 'out_file', apply_mask, 'operand_files')
+ preproc.connect(apply_mask, 'out_file', outputnode, 'anat_brain')
- preproc.connect(apply_inv_warp, 'out_file',
- apply_mask, 'operand_files')
-
- preproc.connect(apply_mask, 'out_file',
- outputnode, 'anat_brain')
-
return preproc
-def fast_bias_field_correction(config=None, wf_name='fast_bias_field_correction'):
-
+def fast_bias_field_correction(config=None,
+ wf_name='fast_bias_field_correction'):
### ABCD Harmonization - FAST bias field correction ###
# Ref: https://github.com/DCAN-Labs/DCAN-HCP/blob/master/PreFreeSurfer/PreFreeSurferPipeline.sh#L688-L694
@@ -2881,10 +2929,8 @@ def fast_bias_field_correction(config=None, wf_name='fast_bias_field_correction'
preproc.connect(inputnode, 'anat_brain',
fast_bias_field_correction, 'in_files')
-
preproc.connect(fast_bias_field_correction, 'restored_image',
outputnode, 'anat_brain_restore')
-
preproc.connect(fast_bias_field_correction, 'bias_field',
outputnode, 'bias_field')
@@ -2894,7 +2940,7 @@ def fast_bias_field_correction(config=None, wf_name='fast_bias_field_correction'
# fslmaths ${T1wFolder}/T1w_acpc_brain_mask.nii.gz -mul -1 -add 1 ${T1wFolder}/T1w_acpc_inverse_brain_mask.nii.gz
inverse_brain_mask = pe.Node(interface=fsl.ImageMaths(),
- name='inverse_brain_mask')
+ name='inverse_brain_mask')
inverse_brain_mask.inputs.op_string = '-mul -1 -add 1'
preproc.connect(inputnode, 'anat_brain_mask',
@@ -2905,25 +2951,19 @@ def fast_bias_field_correction(config=None, wf_name='fast_bias_field_correction'
name='apply_mask')
apply_mask.inputs.op_string = '-mul %s'
- preproc.connect(inputnode, 'anat_data',
- apply_mask, 'in_file')
-
+ preproc.connect(inputnode, 'anat_data', apply_mask, 'in_file')
preproc.connect(inverse_brain_mask, 'out_file',
apply_mask, 'operand_files')
# fslmaths ${T1wFolder}/T1w_fast_restore.nii.gz -add ${T1wFolder}/T1w_acpc_dc_skull.nii.gz ${T1wFolder}/${T1wImage}_acpc_dc_restore
anat_restore = pe.Node(interface=fsl.MultiImageMaths(),
- name='get_anat_restore')
+ name='get_anat_restore')
anat_restore.inputs.op_string = '-add %s'
preproc.connect(fast_bias_field_correction, 'restored_image',
anat_restore, 'in_file')
-
- preproc.connect(apply_mask, 'out_file',
- anat_restore, 'operand_files')
-
- preproc.connect(anat_restore, 'out_file',
- outputnode, 'anat_restore')
+ preproc.connect(apply_mask, 'out_file', anat_restore, 'operand_files')
+ preproc.connect(anat_restore, 'out_file', outputnode, 'anat_restore')
return preproc
@@ -2964,34 +3004,40 @@ def freesurfer_abcd_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
'''
# fnirt-based brain extraction
- brain_extraction = fnirt_based_brain_extraction(config=cfg,
- wf_name=f'fnirt_based_brain_extraction_{pipe_num}')
+ fn_brain_extraction = fnirt_based_brain_extraction(
+ config=cfg, wf_name=f'fnirt_based_brain_extraction_{pipe_num}')
node, out = strat_pool.get_data('desc-preproc_T1w')
- wf.connect(node, out, brain_extraction, 'inputspec.anat_data')
+ wf.connect(node, out, fn_brain_extraction, 'inputspec.anat_data')
node, out = strat_pool.get_data('template-ref-mask-res-2')
- wf.connect(node, out, brain_extraction, 'inputspec.template-ref-mask-res-2')
+ wf.connect(node, out,
+ fn_brain_extraction, 'inputspec.template-ref-mask-res-2')
node, out = strat_pool.get_data('T1w-template')
- wf.connect(node, out, brain_extraction, 'inputspec.template_skull_for_anat')
+ wf.connect(node, out,
+ fn_brain_extraction, 'inputspec.template_skull_for_anat')
node, out = strat_pool.get_data('T1w-template-res-2')
- wf.connect(node, out, brain_extraction, 'inputspec.template_skull_for_anat_2mm')
+ wf.connect(node, out,
+ fn_brain_extraction, 'inputspec.template_skull_for_anat_2mm')
node, out = strat_pool.get_data('T1w-brain-template-mask')
- wf.connect(node, out, brain_extraction, 'inputspec.template_brain_mask_for_anat')
+ wf.connect(node, out,
+ fn_brain_extraction, 'inputspec.template_brain_mask_for_anat')
# fast bias field correction
- fast_correction = fast_bias_field_correction(config=cfg,
- wf_name=f'fast_bias_field_correction_{pipe_num}')
+ fast_correction = fast_bias_field_correction(
+ config=cfg, wf_name=f'fast_bias_field_correction_{pipe_num}')
node, out = strat_pool.get_data('desc-preproc_T1w')
wf.connect(node, out, fast_correction, 'inputspec.anat_data')
- wf.connect(brain_extraction, 'outputspec.anat_brain', fast_correction, 'inputspec.anat_brain')
+ wf.connect(fn_brain_extraction, 'outputspec.anat_brain',
+ fast_correction, 'inputspec.anat_brain')
- wf.connect(brain_extraction, 'outputspec.anat_brain_mask', fast_correction, 'inputspec.anat_brain_mask')
+ wf.connect(fn_brain_extraction, 'outputspec.anat_brain_mask',
+ fast_correction, 'inputspec.anat_brain_mask')
### ABCD Harmonization ###
# Ref: https://github.com/DCAN-Labs/DCAN-HCP/blob/master/FreeSurfer/FreeSurferPipeline.sh#L140-L144
@@ -3001,40 +3047,44 @@ def freesurfer_abcd_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
name=f'resample_anat_head_1mm_{pipe_num}')
resample_head_1mm.inputs.interp = 'spline'
resample_head_1mm.inputs.apply_isoxfm = 1
+ head_1mm_nodes, head_1mm_guardrails = wf.nodes_and_guardrails(
+ resample_head_1mm, registered='out_file')
node, out = strat_pool.get_data('desc-preproc_T1w')
- wf.connect(node, out, resample_head_1mm, 'in_file')
-
- wf.connect(node, out, resample_head_1mm, 'reference')
-
+ wf.connect_retries(head_1mm_nodes, [(node, out, 'in_file'),
+ (node, out, 'reference')])
+ wf.connect_retries(head_1mm_guardrails, [(node, out, 'reference')])
+ head_1mm = guardrail_selection(wf, *head_1mm_guardrails)
# applywarp --rel --interp=spline -i "$T1wImage" -r "$T1wImageFile"_1mm.nii.gz --premat=$FSLDIR/etc/flirtsch/ident.mat -o "$T1wImageFile"_1mm.nii.gz
- applywarp_head_to_head_1mm = pe.Node(interface=fsl.ApplyWarp(),
- name=f'applywarp_head_to_head_1mm_{pipe_num}')
+ applywarp_head_to_head_1mm = pe.Node(
+ interface=fsl.ApplyWarp(),
+ name=f'applywarp_head_to_head_1mm_{pipe_num}')
applywarp_head_to_head_1mm.inputs.relwarp = True
applywarp_head_to_head_1mm.inputs.interp = 'spline'
- applywarp_head_to_head_1mm.inputs.premat = cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']
+ applywarp_head_to_head_1mm.inputs.premat = cfg.registration_workflows[
+ 'anatomical_registration'
+ ]['registration']['FSL-FNIRT']['identity_matrix']
wf.connect(node, out, applywarp_head_to_head_1mm, 'in_file')
-
- wf.connect(resample_head_1mm, 'out_file',
- applywarp_head_to_head_1mm, 'ref_file')
+ wf.connect(head_1mm, 'out', applywarp_head_to_head_1mm, 'ref_file')
# applywarp --rel --interp=nn -i "$T1wImageBrain" -r "$T1wImageFile"_1mm.nii.gz --premat=$FSLDIR/etc/flirtsch/ident.mat -o "$T1wImageBrainFile"_1mm.nii.gz
- applywarp_brain_to_head_1mm = pe.Node(interface=fsl.ApplyWarp(),
- name=f'applywarp_brain_to_head_1mm_{pipe_num}')
+ applywarp_brain_to_head_1mm = pe.Node(
+ interface=fsl.ApplyWarp(),
+ name=f'applywarp_brain_to_head_1mm_{pipe_num}')
applywarp_brain_to_head_1mm.inputs.relwarp = True
applywarp_brain_to_head_1mm.inputs.interp = 'nn'
- applywarp_brain_to_head_1mm.inputs.premat = cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['identity_matrix']
+ applywarp_brain_to_head_1mm.inputs.premat = cfg.registration_workflows[
+ 'anatomical_registration'
+ ]['registration']['FSL-FNIRT']['identity_matrix']
wf.connect(fast_correction, 'outputspec.anat_brain_restore',
- applywarp_brain_to_head_1mm, 'in_file')
-
- wf.connect(resample_head_1mm, 'out_file',
- applywarp_brain_to_head_1mm, 'ref_file')
+ applywarp_brain_to_head_1mm, 'in_file')
+ wf.connect(head_1mm, 'out', applywarp_brain_to_head_1mm, 'ref_file')
# fslstats $T1wImageBrain -M
average_brain = pe.Node(interface=fsl.ImageStats(),
- name=f'average_brain_{pipe_num}')
+ name=f'average_brain_{pipe_num}')
average_brain.inputs.op_string = '-M'
average_brain.inputs.output_type = 'NIFTI_GZ'
@@ -3042,17 +3092,16 @@ def freesurfer_abcd_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
average_brain, 'in_file')
# fslmaths "$T1wImageFile"_1mm.nii.gz -div $Mean -mul 150 -abs "$T1wImageFile"_1mm.nii.gz
- normalize_head = pe.Node(util.Function(input_names=['in_file', 'number', 'out_file_suffix'],
+ normalize_head = pe.Node(util.Function(input_names=['in_file', 'number',
+ 'out_file_suffix'],
output_names=['out_file'],
function=fslmaths_command),
name=f'normalize_head_{pipe_num}')
normalize_head.inputs.out_file_suffix = '_norm'
- wf.connect(applywarp_head_to_head_1mm, 'out_file',
+ wf.connect(applywarp_head_to_head_1mm, 'out_file',
normalize_head, 'in_file')
-
- wf.connect(average_brain, 'out_stat',
- normalize_head, 'number')
+ wf.connect(average_brain, 'out_stat', normalize_head, 'number')
### recon-all -all step ###
reconall = pe.Node(interface=freesurfer.ReconAll(),
@@ -3060,10 +3109,9 @@ def freesurfer_abcd_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
mem_gb=2.7)
sub_dir = cfg.pipeline_setup['working_directory']['path']
- freesurfer_subject_dir = os.path.join(sub_dir,
- 'cpac_'+cfg['subject_id'],
- f'anat_preproc_freesurfer_{pipe_num}',
- 'anat_freesurfer')
+ freesurfer_subject_dir = os.path.join(
+ sub_dir, 'cpac_'+cfg['subject_id'],
+ f'anat_preproc_freesurfer_{pipe_num}', 'anat_freesurfer')
# create the directory for FreeSurfer node
if not os.path.exists(freesurfer_subject_dir):
@@ -3071,10 +3119,10 @@ def freesurfer_abcd_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = freesurfer_subject_dir
- reconall.inputs.openmp = cfg.pipeline_setup['system_config']['num_OMP_threads']
+ reconall.inputs.openmp = cfg.pipeline_setup['system_config'][
+ 'num_OMP_threads']
- wf.connect(normalize_head, 'out_file',
- reconall, 'T1_files')
+ wf.connect(normalize_head, 'out_file', reconall, 'T1_files')
wf, hemisphere_outputs = freesurfer_hemispheres(wf, reconall, pipe_num)
@@ -3088,17 +3136,18 @@ def freesurfer_abcd_preproc(wf, cfg, strat_pool, pipe_num, opt=None):
**hemisphere_outputs
}
- return (wf, outputs)
+ return wf, outputs
-def correct_restore_brain_intensity_abcd(wf, cfg, strat_pool, pipe_num, opt=None):
+def correct_restore_brain_intensity_abcd(wf, cfg, strat_pool, pipe_num,
+ opt=None):
'''
{"name": "correct_restore_brain_intensity_abcd",
"config": ["anatomical_preproc", "brain_extraction"],
"switch": "None",
"option_key": "using",
"option_val": "FreeSurfer-ABCD",
- "inputs": [(["desc-preproc_T1w", "desc-reorient_T1w", "T1w"],
+ "inputs": [("desc-preproc_T1w",
"desc-n4_T1w",
"desc-restore-brain_T1w",
"space-T1w_desc-brain_mask",
@@ -3124,23 +3173,23 @@ def correct_restore_brain_intensity_abcd(wf, cfg, strat_pool, pipe_num, opt=None
merge_t1_acpc.inputs.dimension = 't'
- wf.connect(merge_t1_acpc_to_list, 'out',
- merge_t1_acpc, 'in_files')
+ wf.connect(merge_t1_acpc_to_list, 'out', merge_t1_acpc, 'in_files')
# fslmaths ${T1wFolder}/xfms/${T1wImage}_dc -mul 0 ${T1wFolder}/xfms/${T1wImage}_dc
- multiply_t1_acpc_by_zero = pe.Node(interface=fsl.ImageMaths(),
- name=f'multiply_t1_acpc_by_zero_{pipe_num}')
-
+ multiply_t1_acpc_by_zero = pe.Node(
+ interface=fsl.ImageMaths(),
+ name=f'multiply_t1_acpc_by_zero_{pipe_num}')
multiply_t1_acpc_by_zero.inputs.op_string = '-mul 0'
- wf.connect(merge_t1_acpc, 'merged_file',
- multiply_t1_acpc_by_zero, 'in_file')
+ wf.connect(merge_t1_acpc, 'merged_file',
+ multiply_t1_acpc_by_zero, 'in_file')
# Ref: https://github.com/DCAN-Labs/DCAN-HCP/blob/master/PostFreeSurfer/PostFreeSurferPipeline.sh#L157
# convertwarp --relout --rel --ref="$T1wFolder"/"$T1wImageBrainMask" --premat="$T1wFolder"/xfms/"$InitialT1wTransform" \
# --warp1="$T1wFolder"/xfms/"$dcT1wTransform" --out="$T1wFolder"/xfms/"$OutputOrigT1wToT1w"
- convertwarp_orig_t1_to_t1 = pe.Node(interface=fsl.ConvertWarp(),
- name=f'convertwarp_orig_t1_to_t1_{pipe_num}')
+ convertwarp_orig_t1_to_t1 = pe.Node(
+ interface=fsl.ConvertWarp(),
+ name=f'convertwarp_orig_t1_to_t1_{pipe_num}')
convertwarp_orig_t1_to_t1.inputs.out_relwarp = True
convertwarp_orig_t1_to_t1.inputs.relwarp = True
@@ -3148,14 +3197,15 @@ def correct_restore_brain_intensity_abcd(wf, cfg, strat_pool, pipe_num, opt=None
node, out = strat_pool.get_data('space-T1w_desc-brain_mask')
wf.connect(node, out, convertwarp_orig_t1_to_t1, 'reference')
- node, out = strat_pool.get_data('from-T1w_to-ACPC_mode-image_desc-aff2rig_xfm')
+ node, out = strat_pool.get_data(
+ 'from-T1w_to-ACPC_mode-image_desc-aff2rig_xfm')
wf.connect(node, out, convertwarp_orig_t1_to_t1, 'premat')
wf.connect(multiply_t1_acpc_by_zero, 'out_file',
- convertwarp_orig_t1_to_t1, 'warp1')
+ convertwarp_orig_t1_to_t1, 'warp1')
# Ref: https://github.com/DCAN-Labs/DCAN-HCP/blob/master/PostFreeSurfer/scripts/CreateMyelinMaps.sh#L72-L73
# applywarp --rel --interp=spline -i "$BiasField" -r "$T1wImageBrain" -w "$AtlasTransform" -o "$BiasFieldOutput"
- applywarp_biasfield = pe.Node(interface=fsl.ApplyWarp(),
+ applywarp_biasfield = pe.Node(interface=fsl.ApplyWarp(),
name=f'applywarp_biasfield_{pipe_num}')
applywarp_biasfield.inputs.relwarp = True
@@ -3175,29 +3225,27 @@ def correct_restore_brain_intensity_abcd(wf, cfg, strat_pool, pipe_num, opt=None
name=f'threshold_biasfield_{pipe_num}')
threshold_biasfield.inputs.op_string = '-thr 0.1'
- wf.connect(applywarp_biasfield, 'out_file',
- threshold_biasfield, 'in_file')
+ wf.connect(applywarp_biasfield, 'out_file', threshold_biasfield, 'in_file')
# Ref: https://github.com/DCAN-Labs/DCAN-HCP/blob/master/PostFreeSurfer/scripts/CreateMyelinMaps.sh#L67-L70
# applywarp --rel --interp=spline -i "$OrginalT1wImage" -r "$T1wImageBrain" -w "$OutputOrigT1wToT1w" -o "$OutputT1wImage"
- applywarp_t1 = pe.Node(interface=fsl.ApplyWarp(),
+ applywarp_t1 = pe.Node(interface=fsl.ApplyWarp(),
name=f'applywarp_t1_{pipe_num}')
-
+
applywarp_t1.inputs.relwarp = True
applywarp_t1.inputs.interp = 'spline'
-
+
node, out = strat_pool.get_data('desc-n4_T1w')
wf.connect(node, out, applywarp_t1, 'in_file')
-
+
node, out = strat_pool.get_data('space-T1w_desc-brain_mask')
wf.connect(node, out, applywarp_t1, 'ref_file')
-
+
wf.connect(convertwarp_orig_t1_to_t1, 'out_file',
- applywarp_t1, 'field_file')
+ applywarp_t1, 'field_file')
# fslmaths "$OutputT1wImage" -abs "$OutputT1wImage" -odt float
- abs_t1 = pe.Node(interface=fsl.ImageMaths(),
- name=f'abs_t1_{pipe_num}')
+ abs_t1 = pe.Node(interface=fsl.ImageMaths(), name=f'abs_t1_{pipe_num}')
abs_t1.inputs.op_string = '-abs'
wf.connect(applywarp_t1, 'out_file', abs_t1, 'in_file')
@@ -3217,14 +3265,11 @@ def correct_restore_brain_intensity_abcd(wf, cfg, strat_pool, pipe_num, opt=None
apply_mask = pe.Node(interface=fsl.maths.ApplyMask(),
name=f'get_restored_corrected_brain_{pipe_num}')
- wf.connect(div_t1_by_biasfield, 'out_file',
- apply_mask, 'in_file')
+ wf.connect(div_t1_by_biasfield, 'out_file', apply_mask, 'in_file')
node, out = strat_pool.get_data('space-T1w_desc-brain_mask')
wf.connect(node, out, apply_mask, 'mask_file')
- outputs = {
- 'desc-restore-brain_T1w': (apply_mask, 'out_file')
- }
+ outputs = {'desc-restore-brain_T1w': (apply_mask, 'out_file')}
- return (wf, outputs)
+ return wf, outputs
diff --git a/CPAC/anat_preproc/ants.py b/CPAC/anat_preproc/ants.py
index 368d4b847d..868b981ac2 100644
--- a/CPAC/anat_preproc/ants.py
+++ b/CPAC/anat_preproc/ants.py
@@ -17,23 +17,21 @@
from packaging.version import parse as parseversion, Version
# nipype
+# pylint: disable=wrong-import-order
import CPAC.pipeline.nipype_pipeline_engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces.fsl.maths import ApplyMask
-from nipype.interfaces.ants import N4BiasFieldCorrection, Atropos, MultiplyImages
-
-from ..utils.misc import get_template_specs
+from nipype.interfaces.ants import (N4BiasFieldCorrection, Atropos,
+ MultiplyImages)
+from CPAC.registration.guardrails import guardrail_selection, retry_clone
# niworkflows
-from ..utils.interfaces.ants import (
- ImageMath,
- ResampleImageBySpacing,
- AI,
- ThresholdImage,
-)
+from CPAC.utils.interfaces.ants import (ImageMath,
+ ResampleImageBySpacing,
+ AI,
+ ThresholdImage)
from CPAC.utils.interfaces.fixes import (
FixHeaderRegistration as Registration,
- FixHeaderApplyTransforms as ApplyTransforms,
-)
+ FixHeaderApplyTransforms as ApplyTransforms)
from CPAC.utils.interfaces.utils import CopyXForm
@@ -114,8 +112,9 @@ def init_brain_extraction_wf(tpl_target_path,
Estimated peak memory consumption of the most hungry nodes
in the workflow
bids_suffix : str
- Sequence type of the first input image. For a list of acceptable values
- see https://bids-specification.readthedocs.io/en/latest/\
+ Sequence type of the first input image. For a list of
+ acceptable values see
+ https://bids-specification.readthedocs.io/en/latest/\
04-modality-specific-files/01-magnetic-resonance-imaging-data.html#anatomy-imaging-data
atropos_refine : bool
Enables or disables the whole ATROPOS sub-workflow
@@ -146,12 +145,14 @@ def init_brain_extraction_wf(tpl_target_path,
computation to a specific region.
**Outputs**
out_file
- Skull-stripped and :abbr:`INU (intensity non-uniformity)`-corrected ``in_files``
+ Skull-stripped and :abbr:`INU (intensity non-uniformity)
+ `-corrected ``in_files``
out_mask
Calculated brain mask
bias_corrected
- The ``in_files`` input images, after :abbr:`INU (intensity non-uniformity)`
- correction, before skull-stripping.
+ The ``in_files`` input images, after
+ :abbr:`INU (intensity non-uniformity)` correction, before
+ skull-stripping.
bias_image
The :abbr:`INU (intensity non-uniformity)` field estimated for each
input in ``in_files``
@@ -187,7 +188,8 @@ def init_brain_extraction_wf(tpl_target_path,
mem_gb=1.3, mem_x=(3811976743057169 / 302231454903657293676544,
'hdr_file'))
- trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity', op2='0.01 0.999 256'),
+ trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity',
+ op2='0.01 0.999 256'),
name='truncate_images', iterfield=['op1'])
inu_n4 = pe.MapNode(
N4BiasFieldCorrection(
@@ -226,6 +228,7 @@ def init_brain_extraction_wf(tpl_target_path,
_ants_version = Registration().version
if _ants_version and parseversion(_ants_version) >= Version('2.3.0'):
init_aff.inputs.search_grid = (40, (0, 40, 40))
+ init_aff_nodes = (init_aff, retry_clone(init_aff))
# Set up spatial normalization
settings_file = 'antsBrainExtraction_%s.json' if use_laplacian \
@@ -241,10 +244,12 @@ def init_brain_extraction_wf(tpl_target_path,
if _ants_version and parseversion(_ants_version) >= Version('2.2.0'):
fixed_mask_trait += 's'
+ norm_nodes, norm_guardrails = wf.nodes_and_guardrails(
+ norm, registered='warped_image')
+
map_brainmask = pe.Node(
ApplyTransforms(interpolation='Gaussian', float=True),
- name='map_brainmask'
- )
+ name='map_brainmask')
map_brainmask.inputs.input_image = str(tpl_mask_path)
thr_brainmask = pe.Node(ThresholdImage(
@@ -267,24 +272,34 @@ def init_brain_extraction_wf(tpl_target_path,
n_procs=omp_nthreads, name='inu_n4_final', iterfield=['input_image'])
# Apply mask
- apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask')
-
+ apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'],
+ name='apply_mask')
+
+ wf.connect_retries(init_aff_nodes, [
+ (inputnode, 'in_mask', 'fixed_image_mask'),
+ (res_tmpl, 'output_image', 'fixed_image'),
+ (res_target, 'output_image', 'moving_image')])
+ for i, node in enumerate(norm_nodes):
+ wf.connect(init_aff_nodes[i], 'output_transform',
+ node, 'initial_moving_transform')
+ wf.connect_retries(norm_nodes, [
+ (inputnode, 'in_mask', fixed_mask_trait)])
+ norm_rtransforms = guardrail_selection(wf, *norm_nodes,
+ 'reverse_transforms',
+ norm_guardrails[0])
+ norm_rinvert_flags = guardrail_selection(wf, *norm_nodes,
+ 'reverse_invert_flags',
+ norm_guardrails[0])
wf.connect([
(inputnode, trunc, [('in_files', 'op1')]),
(inputnode, copy_xform, [(('in_files', _pop), 'hdr_file')]),
(inputnode, inu_n4_final, [('in_files', 'input_image')]),
- (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]),
- (inputnode, norm, [('in_mask', fixed_mask_trait)]),
(inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]),
(trunc, inu_n4, [('output_image', 'input_image')]),
- (inu_n4, res_target, [
- (('output_image', _pop), 'input_image')]),
- (res_tmpl, init_aff, [('output_image', 'fixed_image')]),
- (res_target, init_aff, [('output_image', 'moving_image')]),
- (init_aff, norm, [('output_transform', 'initial_moving_transform')]),
- (norm, map_brainmask, [
- ('reverse_transforms', 'transforms'),
- ('reverse_invert_flags', 'invert_transform_flags')]),
+ (inu_n4, res_target, [(('output_image', _pop), 'input_image')]),
+ (norm_rtransforms, map_brainmask, [('out', 'transforms')]),
+ (norm_rinvert_flags, map_brainmask, [
+ ('out', 'invert_transform_flags')]),
(map_brainmask, thr_brainmask, [('output_image', 'input_image')]),
(thr_brainmask, dil_brainmask, [('output_image', 'op1')]),
(dil_brainmask, get_brainmask, [('output_image', 'op1')]),
@@ -294,12 +309,10 @@ def init_brain_extraction_wf(tpl_target_path,
(apply_mask, copy_xform, [('out_file', 'out_file')]),
(inu_n4_final, copy_xform, [('output_image', 'bias_corrected'),
('bias_image', 'bias_image')]),
- (copy_xform, outputnode, [
- ('out_file', 'out_file'),
- ('out_mask', 'out_mask'),
- ('bias_corrected', 'bias_corrected'),
- ('bias_image', 'bias_image')]),
- ])
+ (copy_xform, outputnode, [('out_file', 'out_file'),
+ ('out_mask', 'out_mask'),
+ ('bias_corrected', 'bias_corrected'),
+ ('bias_image', 'bias_image')])])
if use_laplacian:
lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
@@ -311,29 +324,28 @@ def init_brain_extraction_wf(tpl_target_path,
mrg_tmpl.inputs.in1 = tpl_target_path
mrg_target = pe.Node(niu.Merge(2), name='mrg_target')
wf.connect([
- (inu_n4, lap_target, [
- (('output_image', _pop), 'op1')]),
+ (inu_n4, lap_target, [(('output_image', _pop), 'op1')]),
(lap_tmpl, mrg_tmpl, [('output_image', 'in2')]),
(inu_n4, mrg_target, [('output_image', 'in1')]),
- (lap_target, mrg_target, [('output_image', 'in2')]),
- (mrg_tmpl, norm, [('out', 'fixed_image')]),
- (mrg_target, norm, [('out', 'moving_image')]),
- ])
+ (lap_target, mrg_target, [('output_image', 'in2')])])
+ wf.connect_retries(norm_nodes, [(mrg_tmpl, 'out', 'fixed_image'),
+ (mrg_target, 'out', 'moving_image')])
+ wf.connect_retries(norm_guardrails, [(mrg_tmpl, 'out', 'reference')])
else:
- norm.inputs.fixed_image = tpl_target_path
- wf.connect([
- (inu_n4, norm, [
- (('output_image', _pop), 'moving_image')]),
- ])
+ for i, node in enumerate(norm_nodes):
+ node.inputs.fixed_image = tpl_target_path
+ norm_guardrails[i].inputs.reference = tpl_target_path
+ wf.connect_retries(norm_nodes, [
+ (inu_n4, ('output_image', _pop), 'moving_image')])
if atropos_refine:
- atropos_model = atropos_model or list(ATROPOS_MODELS[bids_suffix].values())
+ atropos_model = atropos_model or list(
+ ATROPOS_MODELS[bids_suffix].values())
atropos_wf = init_atropos_wf(
use_random_seed=atropos_use_random_seed,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
- in_segmentation_model=atropos_model,
- )
+ in_segmentation_model=atropos_model)
sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1),
name='sel_wm',
run_without_submitting=True,
@@ -343,24 +355,19 @@ def init_brain_extraction_wf(tpl_target_path,
wf.disconnect([
(get_brainmask, apply_mask, [('output_image', 'mask_file')]),
- (copy_xform, outputnode, [('out_mask', 'out_mask')]),
- ])
+ (copy_xform, outputnode, [('out_mask', 'out_mask')])])
wf.connect([
- (inu_n4, atropos_wf, [
- ('output_image', 'inputnode.in_files')]),
+ (inu_n4, atropos_wf, [('output_image', 'inputnode.in_files')]),
(thr_brainmask, atropos_wf, [
('output_image', 'inputnode.in_mask')]),
(get_brainmask, atropos_wf, [
('output_image', 'inputnode.in_mask_dilated')]),
(atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
(sel_wm, inu_n4_final, [('out', 'weight_image')]),
- (atropos_wf, apply_mask, [
- ('outputnode.out_mask', 'mask_file')]),
- (atropos_wf, outputnode, [
- ('outputnode.out_mask', 'out_mask'),
- ('outputnode.out_segm', 'out_segm'),
- ('outputnode.out_tpms', 'out_tpms')]),
- ])
+ (atropos_wf, apply_mask, [('outputnode.out_mask', 'mask_file')]),
+ (atropos_wf, outputnode, [('outputnode.out_mask', 'out_mask'),
+ ('outputnode.out_segm', 'out_segm'),
+ ('outputnode.out_tpms', 'out_tpms')])])
return wf
diff --git a/CPAC/cwas/cwas.py b/CPAC/cwas/cwas.py
index 5264b07bd0..5513ed8cac 100644
--- a/CPAC/cwas/cwas.py
+++ b/CPAC/cwas/cwas.py
@@ -3,6 +3,9 @@
import nibabel as nb
import numpy as np
import pandas as pd
+import scipy.stats
+from scipy.stats import t
+from numpy import inf
from CPAC.cwas.mdmr import mdmr
from CPAC.utils import correlation
@@ -35,7 +38,6 @@ def joint_mask(subjects, mask_file=None):
mask_file = os.path.join(os.getcwd(), 'joint_mask.nii.gz')
create_merged_copefile(files, cope_file)
create_merge_mask(cope_file, mask_file)
-
return mask_file
@@ -48,7 +50,6 @@ def calc_mdmrs(D, regressor, cols, permutations):
def calc_subdists(subjects_data, voxel_range):
subjects, voxels, _ = subjects_data.shape
D = np.zeros((len(voxel_range), subjects, subjects))
-
for i, v in enumerate(voxel_range):
profiles = np.zeros((subjects, voxels))
for si in range(subjects):
@@ -67,6 +68,12 @@ def calc_cwas(subjects_data, regressor, regressor_selected_cols, permutations, v
D, regressor, regressor_selected_cols, permutations)
return F_set, p_set
+def pval_to_zval(p_set, permu):
+ inv_pval = 1 - p_set
+ zvals = t.ppf(inv_pval, (len(p_set) - 1))
+ zvals[zvals == -inf] = permu / (permu + 1)
+ zvals[zvals == inf] = permu / (permu + 1)
+ return zvals
def nifti_cwas(subjects, mask_file, regressor_file, participant_column,
columns_string, permutations, voxel_range):
@@ -130,7 +137,7 @@ def nifti_cwas(subjects, mask_file, regressor_file, participant_column,
for sub_id in subject_ids:
if str(sub_id).lstrip('0') == str(pheno_sub_id):
regressor_data.at[index, participant_column] = str(sub_id)
-
+
regressor_data.index = regressor_data[participant_column]
# Keep only data from specific subjects
@@ -144,33 +151,27 @@ def nifti_cwas(subjects, mask_file, regressor_file, participant_column,
if len(regressor_selected_cols) == 0:
regressor_selected_cols = [i for i, c in enumerate(regressor_cols)]
regressor_selected_cols = np.array(regressor_selected_cols)
-
# Remove participant id column from the dataframe and convert it to a numpy matrix
regressor = ordered_regressor_data \
.drop(columns=[participant_column]) \
.reset_index(drop=True) \
.values \
.astype(np.float64)
-
if len(regressor.shape) == 1:
regressor = regressor[:, np.newaxis]
elif len(regressor.shape) != 2:
raise ValueError('Bad regressor shape: %s' % str(regressor.shape))
-
if len(subject_files) != regressor.shape[0]:
raise ValueError('Number of subjects does not match regressor size')
-
- mask = nb.load(mask_file).get_data().astype('bool')
+ mask = nb.load(mask_file).get_fdata().astype('bool')
mask_indices = np.where(mask)
-
subjects_data = np.array([
- nb.load(subject_file).get_data().astype('float64')[mask_indices].T
+ nb.load(subject_file).get_fdata().astype('float64')[mask_indices]
for subject_file in subject_files
])
F_set, p_set = calc_cwas(subjects_data, regressor, regressor_selected_cols,
permutations, voxel_range)
-
cwd = os.getcwd()
F_file = os.path.join(cwd, 'pseudo_F.npy')
p_file = os.path.join(cwd, 'significance_p.npy')
@@ -182,7 +183,7 @@ def nifti_cwas(subjects, mask_file, regressor_file, participant_column,
def create_cwas_batches(mask_file, batches):
- mask = nb.load(mask_file).get_data().astype('bool')
+ mask = nb.load(mask_file).get_fdata().astype('bool')
voxels = mask.sum(dtype=int)
return np.array_split(np.arange(voxels), batches)
@@ -198,7 +199,7 @@ def volumize(mask_image, data):
)
-def merge_cwas_batches(cwas_batches, mask_file):
+def merge_cwas_batches(cwas_batches, mask_file, z_score, permutations):
_, _, voxel_range = zip(*cwas_batches)
voxels = np.array(np.concatenate(voxel_range))
@@ -211,18 +212,37 @@ def merge_cwas_batches(cwas_batches, mask_file):
p_set[voxel_range] = np.load(p_file)
log_p_set = -np.log10(p_set)
+ one_p_set = 1 - p_set
F_vol = volumize(mask_image, F_set)
p_vol = volumize(mask_image, p_set)
log_p_vol = volumize(mask_image, log_p_set)
+ one_p_vol = volumize(mask_image, one_p_set)
cwd = os.getcwd()
F_file = os.path.join(cwd, 'pseudo_F_volume.nii.gz')
p_file = os.path.join(cwd, 'p_significance_volume.nii.gz')
log_p_file = os.path.join(cwd, 'neglog_p_significance_volume.nii.gz')
+ one_p_file = os.path.join(cwd, 'one_minus_p_values.nii.gz')
F_vol.to_filename(F_file)
p_vol.to_filename(p_file)
log_p_vol.to_filename(log_p_file)
+ one_p_vol.to_filename(one_p_file)
+
+ if 1 in z_score:
+ zvals = pval_to_zval(p_set, permutations)
+ z_file = zstat_image(zvals, mask_file)
+
+ return F_file, p_file, log_p_file, one_p_file, z_file
+
+def zstat_image(zvals, mask_file):
+ mask_image = nb.load(mask_file)
- return F_file, p_file, log_p_file
+ z_vol = volumize(mask_image, zvals)
+
+ cwd = os.getcwd()
+ z_file = os.path.join(cwd, 'zstat.nii.gz')
+
+ z_vol.to_filename(z_file)
+ return z_file
diff --git a/CPAC/cwas/mdmr.py b/CPAC/cwas/mdmr.py
index 5aed0505ca..80524ff4d2 100644
--- a/CPAC/cwas/mdmr.py
+++ b/CPAC/cwas/mdmr.py
@@ -83,15 +83,15 @@ def mdmr(D, X, columns, permutations):
Gs[:, di] = gower(D[di]).flatten()
X1 = np.hstack((np.ones((subjects, 1)), X))
- columns = columns.copy() + 1
+ columns = columns.copy() #removed a +1
regressors = X1.shape[1]
-
- permutation_indexes = np.zeros((permutations + 1, subjects), dtype=np.int)
+
+ permutation_indexes = np.zeros((permutations, subjects), dtype=np.int)
permutation_indexes[0, :] = range(subjects)
- for i in range(1, permutations + 1):
+ for i in range(1, permutations):
permutation_indexes[i,:] = np.random.permutation(subjects)
-
+
H2perms = gen_h2_perms(X1, columns, permutation_indexes)
IHperms = gen_ih_perms(X1, columns, permutation_indexes)
diff --git a/CPAC/cwas/pipeline.py b/CPAC/cwas/pipeline.py
index 401c7fa336..5dcab83340 100644
--- a/CPAC/cwas/pipeline.py
+++ b/CPAC/cwas/pipeline.py
@@ -2,6 +2,7 @@
import os
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.utility as util
+from nipype import config
from CPAC.utils.interfaces.function import Function
@@ -11,6 +12,7 @@
create_cwas_batches,
merge_cwas_batches,
nifti_cwas,
+ zstat_image,
)
@@ -54,6 +56,8 @@ def create_cwas(name='cwas', working_dir=None, crash_dir=None):
Pseudo F values of CWAS
outputspec.p_map : string (nifti file)
Significance p values calculated from permutation tests
+ outputspec.z_map : string (nifti file)
+ Significance p values converted to z-scores
CWAS Procedure:
@@ -83,10 +87,9 @@ def create_cwas(name='cwas', working_dir=None, crash_dir=None):
References
----------
- .. [1] Shehzad Z, Kelly C, Reiss PT, Emerson JW, McMahon K, Copland DA, Castellanos FX, Milham MP. An Analytic Framework for Connectome-Wide Association Studies. Under Review.
+ .. [1] Shehzad Z, Kelly C, Reiss PT, Cameron Craddock R, Emerson JW, McMahon K, Copland DA, Castellanos FX, Milham MP. A multivariate distance-based analytic framework for connectome-wide association studies. Neuroimage. 2014 Jun;93 Pt 1(0 1):74-94. doi: 10.1016/j.neuroimage.2014.02.024. Epub 2014 Feb 28. PMID: 24583255; PMCID: PMC4138049.
"""
-
if not working_dir:
working_dir = os.path.join(os.getcwd(), 'MDMR_work_dir')
if not crash_dir:
@@ -95,7 +98,8 @@ def create_cwas(name='cwas', working_dir=None, crash_dir=None):
workflow = pe.Workflow(name=name)
workflow.base_dir = working_dir
workflow.config['execution'] = {'hash_method': 'timestamp',
- 'crashdump_dir': os.path.abspath(crash_dir)}
+ 'crashdump_dir': os.path.abspath(crash_dir),
+ 'crashfile_format': 'txt'}
inputspec = pe.Node(util.IdentityInterface(fields=['roi',
'subjects',
@@ -103,12 +107,15 @@ def create_cwas(name='cwas', working_dir=None, crash_dir=None):
'participant_column',
'columns',
'permutations',
- 'parallel_nodes']),
+ 'parallel_nodes',
+ 'z_score']),
name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=['F_map',
'p_map',
- 'neglog_p_map']),
+ 'neglog_p_map',
+ 'one_p_map',
+ 'z_map']),
name='outputspec')
ccb = pe.Node(Function(input_names=['mask_file',
@@ -139,10 +146,14 @@ def create_cwas(name='cwas', working_dir=None, crash_dir=None):
name='joint_mask')
mcwasb = pe.Node(Function(input_names=['cwas_batches',
- 'mask_file'],
+ 'mask_file',
+ 'z_score',
+ 'permutations'],
output_names=['F_file',
'p_file',
- 'neglog_p_file'],
+ 'neglog_p_file',
+ 'one_p_file',
+ 'z_file'],
function=merge_cwas_batches,
as_module=True),
name='cwas_volumes')
@@ -181,9 +192,15 @@ def create_cwas(name='cwas', working_dir=None, crash_dir=None):
mcwasb, 'cwas_batches')
workflow.connect(jmask, 'joint_mask',
mcwasb, 'mask_file')
+ workflow.connect(inputspec, 'z_score',
+ mcwasb, 'z_score')
+ workflow.connect(inputspec, 'permutations',
+ mcwasb, 'permutations')
workflow.connect(mcwasb, 'F_file', outputspec, 'F_map')
workflow.connect(mcwasb, 'p_file', outputspec, 'p_map')
workflow.connect(mcwasb, 'neglog_p_file', outputspec, 'neglog_p_map')
+ workflow.connect(mcwasb, 'one_p_file', outputspec, 'one_p_map')
+ workflow.connect(mcwasb, 'z_file', outputspec, 'z_map')
return workflow
diff --git a/CPAC/distortion_correction/distortion_correction.py b/CPAC/distortion_correction/distortion_correction.py
index 33ff232b58..4b981678e9 100644
--- a/CPAC/distortion_correction/distortion_correction.py
+++ b/CPAC/distortion_correction/distortion_correction.py
@@ -1,24 +1,38 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
+# Copyright (C) 2017-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
import os
import subprocess
import nibabel as nb
-
+# pylint: disable=ungrouped-imports,wrong-import-order
from CPAC.pipeline import nipype_pipeline_engine as pe
-from nipype.interfaces import afni, fsl
-import nipype.interfaces.utility as util
-import nipype.interfaces.ants as ants
-import nipype.interfaces.afni.preprocess as preprocess
-import nipype.interfaces.afni.utils as afni_utils
+from nipype.interfaces import afni, ants, fsl, utility as util
+from nipype.interfaces.afni import preprocess, utils as afni_utils
-from CPAC.pipeline.engine import wrap_block
+# from CPAC.pipeline.engine import wrap_block
+from CPAC.registration.guardrails import guardrail_selection
from CPAC.utils import function
from CPAC.utils.interfaces.function import Function
from CPAC.utils.datasource import match_epi_fmaps
-from CPAC.func_preproc.func_preproc import bold_mask_afni, bold_masking
+# from CPAC.func_preproc.func_preproc import bold_mask_afni, bold_masking
from CPAC.distortion_correction.utils import run_convertwarp, \
phase_encode, \
@@ -104,8 +118,8 @@ def distcor_phasediff_fsl_fugue(wf, cfg, strat_pool, pipe_num, opt=None):
"inputs": ["diffphase",
"diffmag",
"deltaTE",
- "diffphase-dwell",
- "dwell-asym-ratio"],
+ "effectiveEchoSpacing",
+ "ees-asym-ratio"],
"outputs": ["despiked-fieldmap",
"fieldmap-mask"]}
'''
@@ -209,10 +223,10 @@ def distcor_phasediff_fsl_fugue(wf, cfg, strat_pool, pipe_num, opt=None):
wf.connect(fslmath_mask, 'out_file', fugue1, 'mask_file')
- node, out = strat_pool.get_data('diffphase-dwell')
+ # FSL calls EffectiveEchoSpacing "dwell_time"
+ node, out = strat_pool.get_data('effectiveEchoSpacing')
wf.connect(node, out, fugue1, 'dwell_time')
-
- node, out = strat_pool.get_data('dwell-asym-ratio')
+ node, out = strat_pool.get_data('ees-asym-ratio')
wf.connect(node, out, fugue1, 'dwell_to_asym_ratio')
wf.connect(prepare, 'out_fieldmap', fugue1, 'fmap_in_file')
@@ -382,17 +396,22 @@ def distcor_blip_afni_qwarp(wf, cfg, strat_pool, pipe_num, opt=None):
func_edge_detect.inputs.expr = 'a*b'
func_edge_detect.inputs.outputtype = 'NIFTI_GZ'
-
- wf.connect(match_epi_fmaps_node, 'opposite_pe_epi', func_edge_detect, 'in_file_a')
+
+ wf.connect(match_epi_fmaps_node, 'opposite_pe_epi',
+ func_edge_detect, 'in_file_a')
wf.connect(func_get_brain_mask, 'out_file', func_edge_detect, 'in_file_b')
- opp_pe_to_func = pe.Node(interface=fsl.FLIRT(), name='opp_pe_to_func')
- opp_pe_to_func.inputs.cost = 'corratio'
-
- wf.connect(func_edge_detect, 'out_file', opp_pe_to_func, 'in_file')
+ _opp_pe_to_func = pe.Node(interface=fsl.FLIRT(), name='opp_pe_to_func')
+ _opp_pe_to_func.inputs.cost = 'corratio'
+ optf_nodes, optf_guardrails = wf.nodes_and_guardrails(
+ _opp_pe_to_func, registered='out_file')
node, out = strat_pool.get_data('desc-mean_bold')
- wf.connect(node, out, opp_pe_to_func, 'reference')
+ wf.connect_retries(optf_nodes, [(func_edge_detect, 'out_file', 'in_file'),
+ (node, out, 'reference')])
+ wf.connect_retries(optf_guardrails, [
+ (node, out, 'reference')])
+ opp_pe_to_func = guardrail_selection(wf, *optf_guardrails)
prep_qwarp_input_imports = ['import os', 'import subprocess']
prep_qwarp_input = \
@@ -417,7 +436,7 @@ def distcor_blip_afni_qwarp(wf, cfg, strat_pool, pipe_num, opt=None):
imports=calculate_blip_warp_imports),
name='calc_blip_warp')
- wf.connect(opp_pe_to_func, 'out_file', calc_blip_warp, 'opp_pe')
+ wf.connect(opp_pe_to_func, 'out', calc_blip_warp, 'opp_pe')
wf.connect(prep_qwarp_input, 'qwarp_input', calc_blip_warp, 'same_pe')
convert_afni_warp_imports = ['import os', 'import nibabel as nb']
@@ -467,8 +486,8 @@ def distcor_blip_afni_qwarp(wf, cfg, strat_pool, pipe_num, opt=None):
}
return (wf, outputs)
-
-
+
+
def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
'''Execute FSL TOPUP to calculate the distortion "unwarp" for
phase encoding direction EPI field map distortion correction.
@@ -479,18 +498,19 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
"switch": ["run"],
"option_key": "using",
"option_val": "Blip-FSL-TOPUP",
- "inputs": [(["desc-preproc_bold", "bold"],
- "space-bold_desc-brain_mask"),
+ "inputs": [("desc-mean_bold", "space-bold_desc-brain_mask"),
"pe-direction",
"epi-1",
"epi-1-pedir",
"epi-1-TE",
"epi-1-dwell",
+ "epi-1-total-readout",
"epi-2",
"epi-2-pedir",
"epi-2-TE",
- "epi-2-dwell"],
- "outputs": ["desc-reginput_bold",
+ "epi-2-dwell",
+ "epi-2-total-readout"],
+ "outputs": ["desc-mean_bold",
"space-bold_desc-brain_mask",
"blip-warp"]}
'''
@@ -531,12 +551,12 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
else:
'''
-
+
create_list = pe.Node(interface=util.Merge(2), name="create_list")
node, out = strat_pool.get_data('epi-1')
wf.connect(node, out, create_list, 'in1')
-
+
node, out = strat_pool.get_data('epi-2')
wf.connect(node, out, create_list, 'in2')
@@ -550,26 +570,28 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
Mask.inputs.operand_value = 0
Mask.inputs.operation = "mul"
Mask.inputs.args = "-add 1"
-
+
node, out = strat_pool.get_data('epi-1')
wf.connect(node, out, Mask, 'in_file')
- #zpad_phases = z_pad("zpad_phases")
- #wf.connect(merge_image, "merged_file", zpad_phases, "inputspec.input_image")
+ # zpad_phases = z_pad("zpad_phases")
+ # wf.connect(merge_image, "merged_file", zpad_phases, "inputspec.input_image")
- #zpad_mask = z_pad("zpad_mask")
- #wf.connect(Mask, "out_file", zpad_mask, "inputspec.input_image")
+ # zpad_mask = z_pad("zpad_mask")
+ # wf.connect(Mask, "out_file", zpad_mask, "inputspec.input_image")
# extrapolate existing values beyond the mask
- extrap_vals = pe.Node(interface=fsl.maths.BinaryMaths(),
+ extrap_vals = pe.Node(interface=fsl.maths.BinaryMaths(),
name="extrap_vals")
extrap_vals.inputs.operation = "add"
extrap_vals.inputs.operand_value = 1
extrap_vals.inputs.args = "-abs -dilM -dilM -dilM -dilM -dilM"
-
- #wf.connect(zpad_phases, "outputspec.output_image", extrap_vals, "in_file")
- #wf.connect(zpad_mask, "outputspec.output_image", extrap_vals, "operand_file")
-
+
+ # wf.connect(zpad_phases, "outputspec.output_image",
+ # extrap_vals, "in_file")
+ # wf.connect(zpad_mask, "outputspec.output_image",
+ # extrap_vals, "operand_file")
+
wf.connect(merge_image, "merged_file", extrap_vals, "in_file")
wf.connect(Mask, "out_file", extrap_vals, "operand_file")
@@ -588,7 +610,9 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
"phase_one",
"phase_two",
"dwell_time_one",
- "dwell_time_two"
+ "dwell_time_two",
+ "ro_time_one",
+ "ro_time_two"
],
output_names=["acq_params"],
function=phase_encode,
@@ -598,18 +622,30 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
)
node, out = strat_pool.get_data('epi-1')
wf.connect(node, out, phase_encoding, 'phase_one')
-
+
node, out = strat_pool.get_data('epi-2')
wf.connect(node, out, phase_encoding, 'phase_two')
-
+
node, out = strat_pool.get_data('pe-direction')
wf.connect(node, out, phase_encoding, 'unwarp_dir')
-
- node, out = strat_pool.get_data('epi-1-dwell')
- wf.connect(node, out, phase_encoding, 'dwell_time_one')
- node, out = strat_pool.get_data('epi-2-dwell')
- wf.connect(node, out, phase_encoding, 'dwell_time_two')
+ if strat_pool.check_rpool('epi-1-dwell') and \
+ strat_pool.check_rpool('epi-2-dwell'):
+
+ node, out = strat_pool.get_data('epi-1-dwell')
+ wf.connect(node, out, phase_encoding, 'dwell_time_one')
+
+ node, out = strat_pool.get_data('epi-2-dwell')
+ wf.connect(node, out, phase_encoding, 'dwell_time_two')
+
+ if strat_pool.check_rpool('epi-1-total-readout') and \
+ strat_pool.check_rpool('epi-2-total-readout'):
+
+ node, out = strat_pool.get_data('epi-1-total-readout')
+ wf.connect(node, out, phase_encoding, 'ro_time_one')
+
+ node, out = strat_pool.get_data('epi-2-total-readout')
+ wf.connect(node, out, phase_encoding, 'ro_time_two')
topup_imports = ["import os",
"import subprocess"]
@@ -631,24 +667,23 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
wf.connect(phase_encoding, "acq_params", run_topup, "acqparams")
choose_phase = pe.Node(
- util.Function(
- input_names=["phase_imgs",
+ util.Function(
+ input_names=["phase_imgs",
"unwarp_dir"],
output_names=["out_phase_image",
"vnum"],
function=choose_phase_image
- ),
- name="choose_phase",
+ ), name="choose_phase",
)
-
+
wf.connect(create_list, 'out', choose_phase, 'phase_imgs')
node, out = strat_pool.get_data("pe-direction")
wf.connect(node, out, choose_phase, "unwarp_dir")
vnum_base = pe.Node(
- util.Function(
- input_names=["vnum",
+ util.Function(
+ input_names=["vnum",
"motion_mat_list",
"jac_matrix_list",
"warp_field_list"],
@@ -656,72 +691,75 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
"out_jacobian",
"out_warp_field"],
function=find_vnum_base
- ),
- name="Motion_Jac_Warp_matrices",
- )
+ ), name="Motion_Jac_Warp_matrices",
+ )
wf.connect(choose_phase, 'vnum', vnum_base, 'vnum')
wf.connect(run_topup, 'out_xfms', vnum_base, 'motion_mat_list')
wf.connect(run_topup, 'out_jacs', vnum_base, 'jac_matrix_list')
wf.connect(run_topup, 'out_warps', vnum_base, 'warp_field_list')
- create_scout = pe.Node(interface=afni_utils.Calc(),
- name="topupwf_create_scout")
- create_scout.inputs.set(
- expr='a',
- single_idx=0,
- outputtype='NIFTI_GZ'
- )
+ # create_scout = pe.Node(interface=afni_utils.Calc(),
+ # name="topupwf_create_scout")
+ # create_scout.inputs.set(
+ # expr='a',
+ # single_idx=0,
+ # outputtype='NIFTI_GZ'
+ # )
- node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
- wf.connect(node, out, create_scout, 'in_file_a')
+ mean_bold = strat_pool.node_data("desc-mean_bold")
+ # node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
+ # wf.connect(node, out, create_scout, 'in_file_a')
flirt = pe.Node(interface=fsl.FLIRT(), name="flirt")
flirt.inputs.dof = 6
flirt.inputs.interp = 'spline'
flirt.inputs.out_matrix_file = 'SBRef2PhaseTwo_gdc.mat'
-
- wf.connect(create_scout, 'out_file', flirt, 'in_file')
- wf.connect(choose_phase, 'out_phase_image', flirt, 'reference')
-
- #fsl_convert_xfm
+ flirt_nodes, flirt_guardrails = wf.nodes_and_guardrails(
+ flirt, registered='out_file')
+
+ wf.connect_retries(flirt_nodes, [
+ (mean_bold.node, mean_bold.out, 'in_file'),
+ (choose_phase, 'out_phase_image', 'reference')])
+ wf.connect_retries(flirt_guardrails, [
+ (choose_phase, 'out_phase_image', 'reference')])
+ flirt_matrix = guardrail_selection(wf, *flirt_nodes, 'out_matrix_file',
+ flirt_guardrails[0])
+
+ # fsl_convert_xfm
convert_xfm = pe.Node(interface=fsl.ConvertXFM(), name="convert_xfm")
convert_xfm.inputs.concat_xfm = True
convert_xfm.inputs.out_file = 'SBRef2WarpField.mat'
- wf.connect(flirt, 'out_matrix_file', convert_xfm,'in_file')
- wf.connect(vnum_base, 'out_motion_mat', convert_xfm,'in_file2')
+ wf.connect(flirt_matrix, 'out', convert_xfm, 'in_file')
+ wf.connect(vnum_base, 'out_motion_mat', convert_xfm, 'in_file2')
- #fsl_convert_warp
- convert_warp = pe.Node(interface=fsl.ConvertWarp(),
- name = "convert_warp")
+ # fsl_convert_warp
+ convert_warp = pe.Node(interface=fsl.ConvertWarp(), name="convert_warp")
convert_warp.inputs.relwarp = True
convert_warp.inputs.out_relwarp = True
convert_warp.inputs.out_file = 'WarpField.nii.gz'
wf.connect(choose_phase, 'out_phase_image', convert_warp, 'reference')
wf.connect(vnum_base, 'out_warp_field', convert_warp, 'warp1')
- wf.connect(convert_xfm, 'out_file' ,convert_warp, 'premat')
+ wf.connect(convert_xfm, 'out_file', convert_warp, 'premat')
- out_convert_warp = (convert_warp,'out_file')
-
- VolumeNumber = 1+1
+ VolumeNumber = 1 + 1
vnum = str(VolumeNumber).zfill(2)
name = "PhaseTwo_aw"
vnum_base_two = pe.Node(
- util.Function(
+ util.Function(
input_names=["vnum",
"motion_mat_list",
"jac_matrix_list",
"warp_field_list"],
- output_names=["out_motion_mat",
- "out_jacobian",
+ output_names=["out_motion_mat",
+ "out_jacobian",
"out_warp_field"],
function=find_vnum_base
- ),
- name=f"Motion_Jac_Warp_matrices_{name}",
- )
+ ), name=f"Motion_Jac_Warp_matrices_{name}",
+ )
vnum_base_two.inputs.vnum = vnum
wf.connect(run_topup, 'out_xfms', vnum_base_two, 'motion_mat_list')
@@ -732,7 +770,7 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
aw_two = pe.Node(interface=fsl.ApplyWarp(), name="aw_two")
aw_two.inputs.relwarp = True
aw_two.inputs.interp = 'spline'
-
+
node, out = strat_pool.get_data('epi-2')
wf.connect(node, out, aw_two, 'in_file')
wf.connect(node, out, aw_two, 'ref_file')
@@ -746,14 +784,15 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
wf.connect(aw_two, 'out_file', mul_phase_two, 'in_file')
wf.connect(vnum_base_two, 'out_jacobian', mul_phase_two, 'operand_file')
-
- # PhaseOne (first vol) - warp and Jacobian modulate to get distortion corrected output
- VolumeNumber= 0 + 1
+
+ # PhaseOne (first vol) - warp and Jacobian modulate to get
+ # distortion corrected output
+ VolumeNumber = 0 + 1
vnum = str(VolumeNumber).zfill(2)
name = "PhaseOne_aw"
vnum_base_one = pe.Node(
- util.Function(
+ util.Function(
input_names=["vnum",
"motion_mat_list",
"jac_matrix_list",
@@ -762,9 +801,8 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
"out_jacobian",
"out_warp_field"],
function=find_vnum_base
- ),
- name=f"Motion_Jac_Warp_matrices_{name}",
- )
+ ), name=f"Motion_Jac_Warp_matrices_{name}",
+ )
vnum_base_one.inputs.vnum = vnum
wf.connect(run_topup, 'out_xfms', vnum_base_one, 'motion_mat_list')
@@ -772,7 +810,7 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
wf.connect(run_topup, 'out_warps', vnum_base_one, 'warp_field_list')
# fsl_applywarp to phaseOne
- aw_one = pe.Node(interface=fsl.ApplyWarp(),name = "aw_one")
+ aw_one = pe.Node(interface=fsl.ApplyWarp(), name="aw_one")
aw_one.inputs.relwarp = True
aw_one.inputs.interp = 'spline'
@@ -783,7 +821,7 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
wf.connect(vnum_base_one, 'out_motion_mat', aw_one, 'premat')
wf.connect(vnum_base_one, 'out_warp_field', aw_one, 'field_file')
- mul_phase_one = pe.Node(interface = fsl.BinaryMaths(), name="mul_phase_one")
+ mul_phase_one = pe.Node(interface=fsl.BinaryMaths(), name="mul_phase_one")
mul_phase_one.inputs.operation = 'mul'
wf.connect(aw_one, 'out_file', mul_phase_one, 'in_file')
@@ -794,19 +832,20 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
aw_jac.inputs.relwarp = True
aw_jac.inputs.interp = 'spline'
- wf.connect(create_scout, 'out_file', aw_jac, 'in_file') #SBRef.nii.gz
- wf.connect(create_scout, 'out_file', aw_jac, 'ref_file') #SBRef.nii.gz
+ wf.connect(mean_bold.node, mean_bold.out, aw_jac, 'in_file') # SBRef.nii.gz
+ wf.connect(mean_bold.node, mean_bold.out,
+ aw_jac, 'ref_file') # SBRef.nii.gz
wf.connect(convert_warp, 'out_file', aw_jac, 'field_file')
- mul_jac = pe.Node(interface = fsl.BinaryMaths(),name = "mul_jac")
+ mul_jac = pe.Node(interface=fsl.BinaryMaths(), name="mul_jac")
mul_jac.inputs.operation = 'mul'
mul_jac.inputs.out_file = "SBRef_dc_jac.nii.gz"
wf.connect(aw_jac, 'out_file', mul_jac, 'in_file')
wf.connect(vnum_base, 'out_jacobian', mul_jac, 'operand_file')
- #Calculate Equivalent Field Map
- tp_field_map = pe.Node(interface = fsl.BinaryMaths(),name = "tp_field_map")
+ # Calculate Equivalent Field Map
+ tp_field_map = pe.Node(interface=fsl.BinaryMaths(), name="tp_field_map")
tp_field_map.inputs.operation = 'mul'
tp_field_map.inputs.operand_value = 6.283
@@ -819,18 +858,17 @@ def distcor_blip_fsl_topup(wf, cfg, strat_pool, pipe_num, opt=None):
wf.connect(run_topup, 'corrected_outfile', mag_field_map, 'in_file')
- #fsl_bet
- bet = pe.Node(interface = fsl.BET(), name="bet")
+ # fsl_bet
+ bet = pe.Node(interface=fsl.BET(), name="bet")
bet.inputs.frac = 0.35
bet.inputs.mask = True
wf.connect(mag_field_map, 'out_file', bet, 'in_file')
outputs = {
- 'desc-reginput_bold': (mul_jac, 'out_file'),
+ 'desc-mean_bold': (mul_jac, 'out_file'),
'space-bold_desc-brain_mask': (bet, 'out_file'),
'blip-warp': (convert_warp, 'out_file')
}
return (wf, outputs)
-
diff --git a/CPAC/distortion_correction/utils.py b/CPAC/distortion_correction/utils.py
index e617bd6eb4..8df4ff6f6e 100644
--- a/CPAC/distortion_correction/utils.py
+++ b/CPAC/distortion_correction/utils.py
@@ -140,11 +140,9 @@ def gradient_distortion_correction(wf, inp_image, name):
return (wf, out_warpmask, out_applywarp)
-def phase_encode(unwarp_dir, phase_one, phase_two, dwell_time_one,
- dwell_time_two):
- """
-
- Calculate readout time and populate parameter file
+def phase_encode(unwarp_dir, phase_one, phase_two, dwell_time_one=None,
+ dwell_time_two=None, ro_time_one=None, ro_time_two=None):
+ """Calculate readout time and populate parameter file
Parameters
__________
@@ -159,8 +157,10 @@ def phase_encode(unwarp_dir, phase_one, phase_two, dwell_time_one,
echo spacing of phase one
dwell_time_two
echo spacing of phase two
- fsl_dir
- FSL directory
+ ro_time_one
+ total readout time of phase one
+ ro_time_two
+ total readout time of phase two
Returns
_______
@@ -170,29 +170,44 @@ def phase_encode(unwarp_dir, phase_one, phase_two, dwell_time_one,
"""
+ meta_data = [dwell_time_one, dwell_time_two,
+ ro_time_one, ro_time_two]
+ if not any(meta_data):
+ raise Exception("\n[!] Blip-FSL-TOPUP workflow: neither "
+ "TotalReadoutTime nor DwellTime is present in the "
+ "epi field map meta-data.")
+
# create text file
acq_params = os.path.join(os.getcwd(), "acqparams.txt")
if isinstance(unwarp_dir, bytes):
unwarp_dir = unwarp_dir.decode()
- if unwarp_dir in ["x", "x-", "-x","i","-i","i-"]:
- dim = nibabel.load(phase_one).shape[0]
- n_PE_steps = dim - 1
- ro_time_one = np.round(dwell_time_one * n_PE_steps, 6)
- ro_time_two = np.round(dwell_time_two * n_PE_steps, 6)
- ro_times = [f"-1 0 0 {ro_time_one}", f"1 0 0 {ro_time_two}"]
- elif unwarp_dir in ["y", "y-", "-y","j","-j","j-"]:
- dim = nibabel.load(phase_one).shape[1]
- n_PE_steps = dim - 1
- ro_time_one = np.round(dwell_time_one * n_PE_steps, 6)
- ro_time_two = np.round(dwell_time_two * n_PE_steps, 6)
- ro_times = [f"0 -1 0 {ro_time_one}", f"0 1 0 {ro_time_two}"]
+ if unwarp_dir in ["x", "x-", "-x", "i", "-i", "i-"]:
+ if dwell_time_one and dwell_time_two:
+ dim = nibabel.load(phase_one).shape[0]
+ n_PE_steps = dim - 1
+ ro_time_one = np.round(dwell_time_one * n_PE_steps, 6)
+ ro_time_two = np.round(dwell_time_two * n_PE_steps, 6)
+ if ro_time_one and ro_time_two:
+ ro_times = [f"-1 0 0 {ro_time_one}", f"1 0 0 {ro_time_two}"]
+ else:
+ raise Exception("[!] No dwell time or total readout time "
+ "present for the acq-fMRI EPI field maps.")
+ elif unwarp_dir in ["y", "y-", "-y", "j", "-j", "j-"]:
+ if dwell_time_one and dwell_time_two:
+ dim = nibabel.load(phase_one).shape[1]
+ n_PE_steps = dim - 1
+ ro_time_one = np.round(dwell_time_one * n_PE_steps, 6)
+ ro_time_two = np.round(dwell_time_two * n_PE_steps, 6)
+ if ro_time_one and ro_time_two:
+ ro_times = [f"0 -1 0 {ro_time_one}", f"0 1 0 {ro_time_two}"]
+ else:
+ raise Exception("[!] No dwell time or total readout time "
+ "present for the acq-fMRI EPI field maps.")
else:
raise Exception(f"unwarp_dir={unwarp_dir} is unsupported.")
-
-
# get number of volumes
dims = [
int(subprocess.check_output([f"fslval", phase_one, "dim4"]).decode(sys.stdout.encoding)),
diff --git a/CPAC/func_preproc/func_ingress.py b/CPAC/func_preproc/func_ingress.py
index a7c46824c1..d34758ff58 100644
--- a/CPAC/func_preproc/func_ingress.py
+++ b/CPAC/func_preproc/func_ingress.py
@@ -1,26 +1,28 @@
-from nipype import logging
-logger = logging.getLogger('nipype.workflow')
+# Copyright (C) 2020-2022 C-PAC Developers
-from CPAC.pipeline import nipype_pipeline_engine as pe
+# This file is part of C-PAC.
-import nipype.interfaces.afni as afni
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
-from CPAC.utils.interfaces.function import Function
-from CPAC.utils.utils import (
- get_scan_params
-)
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+from nipype import logging
from CPAC.utils.datasource import (
create_func_datasource,
- create_fmap_datasource,
- get_fmap_phasediff_metadata,
- calc_deltaTE_and_asym_ratio
-)
+ ingress_func_metadata)
+logger = logging.getLogger('nipype.workflow')
def connect_func_ingress(workflow, strat_list, c, sub_dict, subject_id,
input_creds_path, unique_id=None):
-
for num_strat, strat in enumerate(strat_list):
if 'func' in sub_dict:
@@ -49,168 +51,8 @@ def connect_func_ingress(workflow, strat_list, c, sub_dict, subject_id,
'scan': (func_wf, 'outputspec.scan')
})
- # Grab field maps
- diff = False
- blip = False
- fmap_rp_list = []
- fmap_TE_list = []
-
- if "fmap" in sub_dict:
- for key in sub_dict["fmap"]:
-
- gather_fmap = create_fmap_datasource(sub_dict["fmap"],
- "fmap_gather_"
- "{0}".format(key))
- gather_fmap.inputs.inputnode.set(
- subject=subject_id,
- creds_path=input_creds_path,
- dl_dir=c.pipeline_setup['working_directory']['path']
- )
- gather_fmap.inputs.inputnode.scan = key
- strat.update_resource_pool({
- key: (gather_fmap, 'outputspec.rest'),
- "{0}_scan_params".format(key): (gather_fmap,
- 'outputspec.scan_params')
- })
-
- fmap_rp_list.append(key)
-
- if key == "diff_phase" or key == "diff_mag_one" or \
- key == "diff_mag_two":
- diff = True
-
- get_fmap_metadata_imports = ['import json']
- get_fmap_metadata = pe.Node(Function(
- input_names=['data_config_scan_params'],
- output_names=['echo_time',
- 'dwell_time',
- 'pe_direction'],
- function=get_fmap_phasediff_metadata,
- imports=get_fmap_metadata_imports),
- name='{0}_get_metadata_{1}'.format(key,
- num_strat))
-
- node, out_file = strat["{}_scan_params".format(key)]
- workflow.connect(node, out_file, get_fmap_metadata,
- 'data_config_scan_params')
-
- strat.update_resource_pool({
- "{}_TE".format(key): (get_fmap_metadata,
- 'echo_time'),
- "{}_dwell".format(key): (get_fmap_metadata,
- 'dwell_time'),
- "{}_pedir".format(key): (get_fmap_metadata,
- 'pe_direction')
- })
- fmap_TE_list.append("{}_TE".format(key))
-
- if "epi_" in key:
- blip = True
-
- if diff:
- calc_delta_ratio = pe.Node(Function(
- input_names=['dwell_time',
- 'echo_time_one',
- 'echo_time_two',
- 'echo_time_three'],
- output_names=['deltaTE',
- 'dwell_asym_ratio'],
- function=calc_deltaTE_and_asym_ratio),
- name='diff_distcor_calc_delta_{}'.format(num_strat))
-
- node, out_file = strat['diff_phase_dwell']
- workflow.connect(node, out_file, calc_delta_ratio,
- 'dwell_time')
-
- node, out_file = strat[fmap_TE_list[0]]
- workflow.connect(node, out_file, calc_delta_ratio,
- 'echo_time_one')
-
- node, out_file = strat[fmap_TE_list[1]]
- workflow.connect(node, out_file, calc_delta_ratio,
- 'echo_time_two')
-
- if len(fmap_TE_list) > 2:
- node, out_file = strat[fmap_TE_list[2]]
- workflow.connect(node, out_file, calc_delta_ratio,
- 'echo_time_three')
-
- strat.update_resource_pool({
- 'deltaTE': (calc_delta_ratio, 'deltaTE'),
- 'dwell_asym_ratio': (calc_delta_ratio,
- 'dwell_asym_ratio')
- })
-
- # Add in nodes to get parameters from configuration file
- # a node which checks if scan_parameters are present for each scan
- if unique_id is None:
- workflow_name=f'scan_params_{num_strat}'
- else:
- workflow_name=f'scan_params_{unique_id}_{num_strat}'
-
- scan_params = \
- pe.Node(Function(
- input_names=['data_config_scan_params',
- 'subject_id',
- 'scan',
- 'pipeconfig_tr',
- 'pipeconfig_tpattern',
- 'pipeconfig_start_indx',
- 'pipeconfig_stop_indx'],
- output_names=['tr',
- 'tpattern',
- 'ref_slice',
- 'start_indx',
- 'stop_indx',
- 'pe_direction'],
- function=get_scan_params,
- as_module=True
- ), name=workflow_name)
-
- if "Selected Functional Volume" in c.functional_registration['1-coregistration']['func_input_prep']['input']:
- get_func_volume = pe.Node(interface=afni.Calc(),
- name='get_func_volume_{0}'.format(
- num_strat))
-
- get_func_volume.inputs.set(
- expr='a',
- single_idx=c.functional_registration['1-coregistration']['func_input_prep']['Selected Functional Volume']['func_reg_input_volume'],
- outputtype='NIFTI_GZ'
- )
- workflow.connect(func_wf, 'outputspec.rest',
- get_func_volume, 'in_file_a')
-
- # wire in the scan parameter workflow
- workflow.connect(func_wf, 'outputspec.scan_params',
- scan_params, 'data_config_scan_params')
-
- workflow.connect(func_wf, 'outputspec.subject',
- scan_params, 'subject_id')
-
- workflow.connect(func_wf, 'outputspec.scan',
- scan_params, 'scan')
-
- # connect in constants
- scan_params.inputs.set(
- pipeconfig_start_indx=c.functional_preproc['truncation']['start_tr'],
- pipeconfig_stop_indx=c.functional_preproc['truncation']['stop_tr']
- )
-
- strat.update_resource_pool({
- 'raw_functional': (func_wf, 'outputspec.rest'),
- 'scan_id': (func_wf, 'outputspec.scan'),
- 'tr': (scan_params, 'tr'),
- 'tpattern': (scan_params, 'tpattern'),
- 'start_idx': (scan_params, 'start_indx'),
- 'stop_idx': (scan_params, 'stop_indx'),
- 'pe_direction': (scan_params, 'pe_direction'),
- })
-
- strat.set_leaf_properties(func_wf, 'outputspec.rest')
-
- if "Selected Functional Volume" in c.functional_registration['1-coregistration']['func_input_prep']['input']:
- strat.update_resource_pool({
- 'selected_func_volume': (get_func_volume, 'out_file')
- })
+ (workflow, strat.rpool, diff, blip, fmap_rp_list
+ ) = ingress_func_metadata(workflow, c, strat.rpool, sub_dict,
+ subject_id, input_creds_path, unique_id)
return (workflow, diff, blip, fmap_rp_list)
diff --git a/CPAC/func_preproc/func_preproc.py b/CPAC/func_preproc/func_preproc.py
index c8661b7e8c..5d50c0eb68 100644
--- a/CPAC/func_preproc/func_preproc.py
+++ b/CPAC/func_preproc/func_preproc.py
@@ -1,6 +1,8 @@
# pylint: disable=ungrouped-imports,wrong-import-order,wrong-import-position
from nipype import logging
from nipype.interfaces import afni, ants, fsl, utility as util
+
+from CPAC.registration.guardrails import guardrail_selection
logger = logging.getLogger('nipype.workflow')
from CPAC.pipeline import nipype_pipeline_engine as pe
from nipype.interfaces.afni import preprocess
@@ -104,20 +106,23 @@ def anat_refined_mask(init_bold_mask=True, wf_name='init_bold_mask'):
name='func_to_anat_linear_reg')
linear_reg_func_to_anat.inputs.cost = 'mutualinfo'
linear_reg_func_to_anat.inputs.dof = 6
-
- wf.connect(func_tmp_brain, 'out_file',
- linear_reg_func_to_anat, 'in_file')
-
- wf.connect(input_node, 'anat_brain',
- linear_reg_func_to_anat, 'reference')
+ linear_reg_nodes, linear_reg_guardrails = wf.nodes_and_guardrails(
+ linear_reg_func_to_anat, registered='out_file')
+ wf.connect_retries(linear_reg_nodes, [
+ (func_tmp_brain, 'out_file', 'in_file'),
+ (input_node, 'anat_brain', 'reference')])
+ wf.connect_retries(linear_reg_guardrails, [
+ (input_node, 'anat_brain', 'reference')])
+ linear_reg_matrix = guardrail_selection(wf, *linear_reg_nodes,
+ 'out_matrix_file',
+ linear_reg_guardrails[0])
# 3.2 Inverse func to anat affine
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_func2anat_affine')
inv_func_to_anat_affine.inputs.invert_xfm = True
- wf.connect(linear_reg_func_to_anat, 'out_matrix_file',
- inv_func_to_anat_affine, 'in_file')
+ wf.connect(linear_reg_matrix, 'out', inv_func_to_anat_affine, 'in_file')
# 4. anat mask to func space
# Transform anatomical mask to functional space to get BOLD mask
@@ -127,38 +132,37 @@ def anat_refined_mask(init_bold_mask=True, wf_name='init_bold_mask'):
reg_anat_mask_to_func.inputs.cost = 'mutualinfo'
reg_anat_mask_to_func.inputs.dof = 6
reg_anat_mask_to_func.inputs.interp = 'nearestneighbour'
-
- wf.connect(input_node, 'anatomical_brain_mask',
- reg_anat_mask_to_func, 'in_file')
-
- wf.connect(func_tmp_brain, 'out_file',
- reg_anat_mask_to_func, 'reference')
-
- wf.connect(inv_func_to_anat_affine, 'out_file',
- reg_anat_mask_to_func, 'in_matrix_file')
+ ramtf_nodes, ramtf_guardrails = wf.nodes_and_guardrails(
+ reg_anat_mask_to_func, registered='out_file')
+ wf.connect_retries(ramtf_nodes, [
+ (input_node, 'anatomical_brain_mask', 'in_file'),
+ (func_tmp_brain, 'out_file', 'reference'),
+ (inv_func_to_anat_affine, 'out_file', 'in_matrix_file')])
+ wf.connect_retries(ramtf_guardrails, [
+ (func_tmp_brain, 'out_file', 'reference')])
+ # pylint: disable=no-value-for-parameter
+ anat_mask_to_func = guardrail_selection(wf, *ramtf_guardrails)
# 5. get final func mask: refine func tmp mask with anat_mask_in_func mask
func_mask = pe.Node(interface=fsl.MultiImageMaths(), name='func_mask')
func_mask.inputs.op_string = "-mul %s"
- wf.connect(reg_anat_mask_to_func, 'out_file',
- func_mask, 'operand_files')
+ wf.connect(anat_mask_to_func, 'out', func_mask, 'operand_files')
- if init_bold_mask == True:
- wf.connect(func_tmp_brain_mask_dil, 'out_file',
- func_mask, 'in_file')
+ if init_bold_mask is True:
+ wf.connect(func_tmp_brain_mask_dil, 'out_file', func_mask, 'in_file')
else:
- wf.connect(input_node, 'init_func_brain_mask',
- func_mask, 'in_file')
+ wf.connect(input_node, 'init_func_brain_mask', func_mask, 'in_file')
- wf.connect(func_mask, 'out_file',
- output_node, 'func_brain_mask')
+ wf.connect(func_mask, 'out_file', output_node, 'func_brain_mask')
return wf
+
def anat_based_mask(wf_name='bold_mask'):
-# reference DCAN lab BOLD mask
-# https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
+ # pylint: disable=line-too-long
+ # reference DCAN lab BOLD mask
+ # https://github.com/DCAN-Labs/DCAN-HCP/blob/master/fMRIVolume/scripts/DistortionCorrectionAndEPIToT1wReg_FLIRTBBRAndFreeSurferBBRbased.sh
wf = pe.Workflow(name=wf_name)
input_node = pe.Node(util.IdentityInterface(fields=['func',
@@ -167,11 +171,11 @@ def anat_based_mask(wf_name='bold_mask'):
name='inputspec')
output_node = pe.Node(util.IdentityInterface(fields=['func_brain_mask']),
- name='outputspec')
+ name='outputspec')
- # 0. Take single volume of func
+ # 0. Take single volume of func
func_single_volume = pe.Node(interface=afni.Calc(),
- name='func_single_volume')
+ name='func_single_volume')
func_single_volume.inputs.set(
expr='a',
@@ -179,59 +183,54 @@ def anat_based_mask(wf_name='bold_mask'):
outputtype='NIFTI_GZ'
)
- wf.connect(input_node, 'func',
- func_single_volume, 'in_file_a')
+ wf.connect(input_node, 'func', func_single_volume, 'in_file_a')
# 1. Register func head to anat head to get func2anat matrix
linear_reg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
- name='func_to_anat_linear_reg')
+ name='func_to_anat_linear_reg')
linear_reg_func_to_anat.inputs.dof = 6
linear_reg_func_to_anat.inputs.interp = 'spline'
linear_reg_func_to_anat.inputs.searchr_x = [30, 30]
linear_reg_func_to_anat.inputs.searchr_y = [30, 30]
linear_reg_func_to_anat.inputs.searchr_z = [30, 30]
+ func_to_anat_nodes, func_to_anat_guardrails = wf.nodes_and_guardrails(
+ linear_reg_func_to_anat, registered='out_file')
+ wf.connect_retries(func_to_anat_nodes, [
+ (func_single_volume, 'out_file', 'in_file'),
+ (input_node, 'anat_head', 'reference')])
+ wf.connect_retries(func_to_anat_guardrails, [
+ (input_node, 'anat_head', 'reference')])
+ func_to_anat_matrix = guardrail_selection(wf, *func_to_anat_nodes,
+ 'out_matrix_file',
+ func_to_anat_guardrails[0])
- wf.connect(func_single_volume, 'out_file',
- linear_reg_func_to_anat, 'in_file')
-
- wf.connect(input_node, 'anat_head',
- linear_reg_func_to_anat, 'reference')
-
- # 2. Inverse func to anat affine, to get anat-to-func transform
+ # 2. Inverse func to anat affine, to get anat-to-func transform
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
- name='inv_func2anat_affine')
+ name='inv_func2anat_affine')
inv_func_to_anat_affine.inputs.invert_xfm = True
- wf.connect(linear_reg_func_to_anat, 'out_matrix_file',
- inv_func_to_anat_affine, 'in_file')
+ wf.connect(func_to_anat_matrix, 'out', inv_func_to_anat_affine, 'in_file')
# 3. get BOLD mask
- # 3.1 Apply anat-to-func transform to transfer anatomical brain to functional space
+ # 3.1 Apply anat-to-func transform to transfer anatomical brain to
+ # functional space
reg_anat_brain_to_func = pe.Node(interface=fsl.ApplyWarp(),
- name='reg_anat_brain_to_func')
+ name='reg_anat_brain_to_func')
reg_anat_brain_to_func.inputs.interp = 'nn'
reg_anat_brain_to_func.inputs.relwarp = True
- wf.connect(input_node, 'anat_brain',
- reg_anat_brain_to_func, 'in_file')
-
- wf.connect(input_node, 'func',
- reg_anat_brain_to_func, 'ref_file')
-
- wf.connect(inv_func_to_anat_affine, 'out_file',
- reg_anat_brain_to_func, 'premat')
+ wf.connect(input_node, 'anat_brain', reg_anat_brain_to_func, 'in_file')
+ wf.connect(input_node, 'func', reg_anat_brain_to_func, 'ref_file')
+ wf.connect(inv_func_to_anat_affine, 'out_file',
+ reg_anat_brain_to_func, 'premat')
- # 3.2 Binarize transfered image and fill holes to get BOLD mask.
+ # 3.2 Binarize transfered image and fill holes to get BOLD mask.
# Binarize
- func_mask_bin = pe.Node(interface=fsl.ImageMaths(),
- name='func_mask')
+ func_mask_bin = pe.Node(interface=fsl.ImageMaths(), name='func_mask')
func_mask_bin.inputs.op_string = '-bin'
- wf.connect(reg_anat_brain_to_func, 'out_file',
- func_mask_bin, 'in_file')
-
- wf.connect(func_mask_bin, 'out_file',
- output_node, 'func_brain_mask')
+ wf.connect(reg_anat_brain_to_func, 'out_file', func_mask_bin, 'in_file')
+ wf.connect(func_mask_bin, 'out_file', output_node, 'func_brain_mask')
return wf
@@ -1915,20 +1914,25 @@ def bold_mask_anatomical_based(wf, cfg, strat_pool, pipe_num, opt=None):
linear_reg_func_to_anat.inputs.searchr_y = [30, 30]
linear_reg_func_to_anat.inputs.searchr_z = [30, 30]
- wf.connect(func_single_volume, 'out_file',
- linear_reg_func_to_anat, 'in_file')
+ func_to_anat_nodes, func_to_anat_guardrails = wf.nodes_and_guardrails(
+ linear_reg_func_to_anat, registered='out_file')
node, out = strat_pool.get_data(["desc-preproc_T1w", "desc-reorient_T1w",
"T1w"])
- wf.connect(node, out, linear_reg_func_to_anat, 'reference')
+ wf.connect_retries(func_to_anat_nodes, [
+ (func_single_volume, 'out_file', 'in_file'),
+ (node, out, 'reference')])
+ wf.connect_retries(func_to_anat_guardrails, [(node, out, 'reference')])
+ func_to_anat_matrix = guardrail_selection(wf, *func_to_anat_nodes,
+ 'out_matrix_file',
+ func_to_anat_guardrails[0])
# 2. Inverse func to anat affine, to get anat-to-func transform
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name='inv_func2anat_affine')
inv_func_to_anat_affine.inputs.invert_xfm = True
- wf.connect(linear_reg_func_to_anat, 'out_matrix_file',
- inv_func_to_anat_affine, 'in_file')
+ wf.connect(func_to_anat_matrix, 'out', inv_func_to_anat_affine, 'in_file')
# 3. get BOLD mask
# 3.1 Apply anat-to-func transform to transfer anatomical brain to functional space
@@ -1940,16 +1944,14 @@ def bold_mask_anatomical_based(wf, cfg, strat_pool, pipe_num, opt=None):
node, out = strat_pool.get_data("desc-brain_T1w")
wf.connect(node, out, reg_anat_brain_to_func, 'in_file')
- node, out = strat_pool.get_data(["desc-preproc_bold",
- "bold"])
+ node, out = strat_pool.get_data(["desc-preproc_bold", "bold"])
wf.connect(node, out, reg_anat_brain_to_func, 'ref_file')
wf.connect(inv_func_to_anat_affine, 'out_file',
reg_anat_brain_to_func, 'premat')
# 3.2 Binarize transfered image
- func_mask_bin = pe.Node(interface=fsl.ImageMaths(),
- name='func_mask_bin')
+ func_mask_bin = pe.Node(interface=fsl.ImageMaths(), name='func_mask_bin')
func_mask_bin.inputs.op_string = '-abs -bin'
wf.connect(reg_anat_brain_to_func, 'out_file',
@@ -2060,7 +2062,6 @@ def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None):
"outputs": ["space-bold_desc-brain_mask",
"desc-ROIbrain_bold"]}
'''
-
# Run 3dAutomask to generate func initial mask
func_tmp_brain_mask = pe.Node(interface=preprocess.Automask(),
name=f'func_tmp_brain_mask_AFNI_{pipe_num}')
@@ -2078,7 +2079,7 @@ def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None):
func_roi.inputs.t_min = 7
func_roi.inputs.t_size = 1
- node, out = strat_pool.get_data(["desc-motion_bold",
+ node, out = strat_pool.get_data(["desc-motion_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, func_roi, 'in_file')
@@ -2100,49 +2101,56 @@ def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None):
reg_func_to_anat.inputs.cost = 'corratio'
reg_func_to_anat.inputs.dof = 6
- wf.connect(func_tmp_brain, 'out_file',
- reg_func_to_anat, 'in_file')
+ func_to_anat_nodes, func_to_anat_guardrails = wf.nodes_and_guardrails(
+ reg_func_to_anat, registered='out_file')
node, out = strat_pool.get_data("desc-brain_T1w")
- wf.connect(node, out, reg_func_to_anat, 'reference')
+ wf.connect_retries(func_to_anat_nodes, [
+ (func_tmp_brain, 'out_file', 'in_file'),
+ (node, out, 'reference')])
+ wf.connect_retries(func_to_anat_guardrails, [(node, out, 'reference')])
+ func_to_anat_matrix = guardrail_selection(wf, *func_to_anat_nodes,
+ 'out_matrix_file',
+ func_to_anat_guardrails[0])
# Inverse func2anat matrix
inv_func_to_anat_affine = pe.Node(interface=fsl.ConvertXFM(),
name=f'inv_func2anat_affine_{pipe_num}')
inv_func_to_anat_affine.inputs.invert_xfm = True
- wf.connect(reg_func_to_anat, 'out_matrix_file',
- inv_func_to_anat_affine, 'in_file')
+ wf.connect(func_to_anat_matrix, 'out', inv_func_to_anat_affine, 'in_file')
# Transform anat brain to func space
reg_anat_brain_to_func = pe.Node(interface=fsl.FLIRT(),
name=f'reg_anat_brain_to_func_{pipe_num}')
reg_anat_brain_to_func.inputs.apply_xfm = True
reg_anat_brain_to_func.inputs.interp = 'trilinear'
+ (anat_brain_to_func_nodes,
+ anat_brain_to_func_guardrails) = wf.nodes_and_guardrails(
+ reg_anat_brain_to_func, registered='out_file')
node, out = strat_pool.get_data("desc-brain_T1w")
- wf.connect(node, out, reg_anat_brain_to_func, 'in_file')
-
- wf.connect(func_roi, 'roi_file',
- reg_anat_brain_to_func, 'reference')
-
- wf.connect(inv_func_to_anat_affine, 'out_file',
- reg_anat_brain_to_func, 'in_matrix_file')
+ wf.connect_retries(anat_brain_to_func_nodes, [
+ (node, out, 'in_file'),
+ (func_roi, 'roi_file', 'reference'),
+ (inv_func_to_anat_affine, 'out_file', 'in_matrix_file')])
+ wf.connect_retries(anat_brain_to_func_guardrails, [
+ (func_roi, 'roi_file', 'reference')])
+ anat_brain_to_func = guardrail_selection(wf,
+ *anat_brain_to_func_guardrails)
# Binarize and dilate anat brain in func space
bin_anat_brain_in_func = pe.Node(interface=fsl.ImageMaths(),
name=f'bin_anat_brain_in_func_{pipe_num}')
bin_anat_brain_in_func.inputs.op_string = '-bin -dilM'
- wf.connect(reg_anat_brain_to_func, 'out_file',
- bin_anat_brain_in_func, 'in_file')
+ wf.connect(anat_brain_to_func, 'out', bin_anat_brain_in_func, 'in_file')
# Binarize detectable func signals
- bin_func = pe.Node(interface=fsl.ImageMaths(),
- name=f'bin_func_{pipe_num}')
+ bin_func = pe.Node(interface=fsl.ImageMaths(), name=f'bin_func_{pipe_num}')
bin_func.inputs.op_string = '-Tstd -bin'
- node, out = strat_pool.get_data(["desc-motion_bold",
+ node, out = strat_pool.get_data(["desc-motion_bold",
"desc-preproc_bold",
"bold"])
wf.connect(node, out, bin_func, 'in_file')
@@ -2162,22 +2170,16 @@ def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None):
intersect_mask.inputs.op_string = '-mul %s -mul %s'
intersect_mask.inputs.output_datatype = 'char'
- wf.connect(bin_func, 'out_file',
- intersect_mask, 'in_file')
-
- wf.connect(merge_func_mask, 'out',
- intersect_mask, 'operand_files')
+ wf.connect(bin_func, 'out_file', intersect_mask, 'in_file')
+ wf.connect(merge_func_mask, 'out', intersect_mask, 'operand_files')
# this is the func input for coreg in ccs
# TODO evaluate if it's necessary to use this brain
example_func_brain = pe.Node(interface=fsl.maths.ApplyMask(),
name=f'get_example_func_brain_{pipe_num}')
- wf.connect(func_roi, 'roi_file',
- example_func_brain, 'in_file')
-
- wf.connect(intersect_mask, 'out_file',
- example_func_brain, 'mask_file')
+ wf.connect(func_roi, 'roi_file', example_func_brain, 'in_file')
+ wf.connect(intersect_mask, 'out_file', example_func_brain, 'mask_file')
outputs = {
'space-bold_desc-brain_mask': (intersect_mask, 'out_file'),
diff --git a/CPAC/image_utils/__init__.py b/CPAC/image_utils/__init__.py
index 3bb29507ac..6d79f91bd8 100644
--- a/CPAC/image_utils/__init__.py
+++ b/CPAC/image_utils/__init__.py
@@ -1,2 +1,22 @@
-from .spatial_smoothing import *
-from .statistical_transforms import *
+# Copyright (C) 2018-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+from .spatial_smoothing import set_gauss, spatial_smoothing
+from .statistical_transforms import calc_avg, fisher_z_score_standardize, \
+ z_score_standardize
+
+__all__ = ['calc_avg', 'fisher_z_score_standardize', 'set_gauss',
+ 'spatial_smoothing', 'z_score_standardize']
diff --git a/CPAC/image_utils/spatial_smoothing.py b/CPAC/image_utils/spatial_smoothing.py
index de5aa81fe3..f14dff8d87 100644
--- a/CPAC/image_utils/spatial_smoothing.py
+++ b/CPAC/image_utils/spatial_smoothing.py
@@ -1,8 +1,22 @@
-import nipype.interfaces.fsl as fsl
+# Copyright (C) 2018-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+from nipype.interfaces import fsl, utility as util
from nipype.interfaces.afni import preprocess as afni
from CPAC.pipeline import nipype_pipeline_engine as pe
-import nipype.interfaces.utility as util
-from CPAC.utils import Outputs
def set_gauss(fwhm):
@@ -64,17 +78,17 @@ def spatial_smoothing(wf_name, fwhm, input_image_type='func_derivative',
elif opt == 'AFNI':
if input_image_type == 'func_derivative_multi':
- output_smooth = pe.MapNode(interface= afni.BlurToFWHM(),
+ output_smooth = pe.MapNode(interface=afni.BlurToFWHM(),
name='smooth_multi',
iterfield=['in_file'])
else:
- output_smooth = pe.Node(interface= afni.BlurToFWHM(),
+ output_smooth = pe.Node(interface=afni.BlurToFWHM(),
name='smooth',
iterfield=['in_file'])
output_smooth.inputs.outputtype = 'NIFTI_GZ'
- if opt =='FSL':
- # wire in the resource to be smoothed
+ if opt == 'FSL':
+ # wire in the resource to be smoothed
wf.connect(inputnode, 'in_file', output_smooth, 'in_file')
# get the parameters for fwhm
wf.connect(inputnode_fwhm, ('fwhm', set_gauss),
diff --git a/CPAC/image_utils/statistical_transforms.py b/CPAC/image_utils/statistical_transforms.py
index 490fe593e4..c3b989931c 100644
--- a/CPAC/image_utils/statistical_transforms.py
+++ b/CPAC/image_utils/statistical_transforms.py
@@ -1,7 +1,22 @@
-from CPAC.pipeline import nipype_pipeline_engine as pe
-import nipype.interfaces.utility as util
-from nipype.interfaces.afni import preprocess
+# Copyright (C) 2018-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+from nipype.interfaces import utility as util
+from nipype.interfaces.afni import preprocess
+from CPAC.pipeline import nipype_pipeline_engine as pe
from CPAC.utils import function
from CPAC.utils.utils import (
extract_output_mean,
diff --git a/CPAC/info.py b/CPAC/info.py
index 8e43b41c4a..e3c966a850 100644
--- a/CPAC/info.py
+++ b/CPAC/info.py
@@ -185,6 +185,7 @@ def get_cpac_gitversion():
"nose==1.3.7",
"numpy==1.16.4",
"pandas==0.23.4",
+ "pathvalidate==2.5.2",
"patsy==0.5.0",
"prov==1.5.2",
"psutil==5.4.6",
diff --git a/CPAC/longitudinal_pipeline/longitudinal_workflow.py b/CPAC/longitudinal_pipeline/longitudinal_workflow.py
index 08f399c4ef..6cab874aa3 100644
--- a/CPAC/longitudinal_pipeline/longitudinal_workflow.py
+++ b/CPAC/longitudinal_pipeline/longitudinal_workflow.py
@@ -1,4 +1,20 @@
# -*- coding: utf-8 -*-
+# Copyright (C) 2020-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
import os
import copy
import time
@@ -26,32 +42,24 @@
build_segmentation_stack
from CPAC.pipeline.engine import initiate_rpool, ingress_output_dir
-from CPAC.registration import (
+from CPAC.registration.registration import (
+ apply_transform,
create_fsl_flirt_linear_reg,
create_fsl_fnirt_nonlinear_reg,
- create_wf_calculate_ants_warp
-)
-
-from CPAC.registration.registration import apply_transform
+ create_wf_calculate_ants_warp)
-from CPAC.utils.datasource import (
- resolve_resolution,
- create_anat_datasource,
- create_check_for_s3_node
-)
+from CPAC.utils.datasource import resolve_resolution
from CPAC.longitudinal_pipeline.longitudinal_preproc import (
subject_specific_template
)
-from CPAC.utils import Strategy, find_files, function, Outputs
-from CPAC.utils.utils import check_prov_for_regtool
-
+from CPAC.utils import find_files, function
+from CPAC.utils.outputs import Outputs
+from CPAC.utils.strategy import Strategy
from CPAC.utils.utils import (
check_config_resources,
- check_system_deps,
- get_scan_params,
- get_tr
+ check_prov_for_regtool
)
logger = logging.getLogger('nipype.workflow')
@@ -446,7 +454,7 @@ def anat_longitudinal_wf(subject_id, sub_list, config):
workflow.run()
- cpac_dir = os.path.join(out_dir, f'cpac_{orig_pipe_name}',
+ cpac_dir = os.path.join(out_dir, f'pipeline_{orig_pipe_name}',
f'{subject_id}_{unique_id}')
cpac_dirs.append(os.path.join(cpac_dir, 'anat'))
diff --git a/CPAC/nuisance/nuisance.py b/CPAC/nuisance/nuisance.py
index 82e75969bd..6699358cd5 100644
--- a/CPAC/nuisance/nuisance.py
+++ b/CPAC/nuisance/nuisance.py
@@ -14,7 +14,6 @@
from nipype.interfaces import afni
from nipype.interfaces.afni import utils as afni_utils
from scipy.fftpack import fft, ifft
-from CPAC import utils
from CPAC.utils.interfaces.function import Function
from CPAC.utils.interfaces.masktool import MaskTool
from CPAC.utils.interfaces.pc import PC
@@ -2453,7 +2452,8 @@ def nuisance_regression(wf, cfg, strat_pool, pipe_num, opt, space):
f'_{space}_{opt["Name"]}'
f'_{pipe_num}')
- desc_keys = ('desc-preproc_bold', 'desc-cleaned_bold')
+ desc_keys = ('desc-preproc_bold', 'desc-cleaned_bold',
+ 'desc-denoisedNofilt_bold')
if space != 'native':
desc_keys = tuple(f'space-{space}_{key}' for key in desc_keys)
@@ -2507,6 +2507,7 @@ def nuisance_regression(wf, cfg, strat_pool, pipe_num, opt, space):
outputs = {
desc_keys[0]: (filt, 'outputspec.residual_file_path'),
desc_keys[1]: (filt, 'outputspec.residual_file_path'),
+ desc_keys[2]: (nuis, 'outputspec.residual_file_path'),
'regressors': (filt, 'outputspec.residual_regressor')
}
@@ -2531,6 +2532,7 @@ def nuisance_regression(wf, cfg, strat_pool, pipe_num, opt, space):
outputs = {
desc_keys[0]: (nuis, 'outputspec.residual_file_path'),
desc_keys[1]: (nuis, 'outputspec.residual_file_path'),
+ desc_keys[2]: (nuis, 'outputspec.residual_file_path')
}
return (wf, outputs)
@@ -2553,10 +2555,13 @@ def nuisance_regression_native(wf, cfg, strat_pool, pipe_num, opt=None):
"dvars"),
"TR"],
"outputs": {"desc-preproc_bold": {
- "Description": "Preprocessed BOLD image that was nusiance-"
+ "Description": "Preprocessed BOLD image that was nuisance-"
"regressed in native space"},
"desc-cleaned_bold": {
- "Description": "Preprocessed BOLD image that was nusiance-"
+ "Description": "Preprocessed BOLD image that was nuisance-"
+ "regressed in native space"},
+ "desc-denoisedNofilt_bold": {
+ "Description": "Preprocessed BOLD image that was nuisance-"
"regressed in native space"},
"regressors": {
"Description": "Regressors that were applied in native space"}}}
@@ -2582,10 +2587,13 @@ def nuisance_regression_template(wf, cfg, strat_pool, pipe_num, opt=None):
"dvars"),
"TR"],
"outputs": {"space-template_desc-preproc_bold": {
- "Description": "Preprocessed BOLD image that was nusiance-"
+ "Description": "Preprocessed BOLD image that was nuisance-"
"regressed in template space"},
"space-template_desc-cleaned_bold": {
- "Description": "Preprocessed BOLD image that was nusiance-"
+ "Description": "Preprocessed BOLD image that was nuisance-"
+ "regressed in template space"},
+ "space-template_desc-denoisedNofilt_bold": {
+ "Description": "Preprocessed BOLD image that was nuisance-"
"regressed in template space"},
"regressors": {
"Description": "Regressors that were applied in template space"}}}
diff --git a/CPAC/pipeline/__init__.py b/CPAC/pipeline/__init__.py
index 7fde15df3e..34d05e7728 100644
--- a/CPAC/pipeline/__init__.py
+++ b/CPAC/pipeline/__init__.py
@@ -28,4 +28,5 @@
if preconfig != 'benchmark-ANTS' and
not preconfig.startswith('regtest-')]
+
__all__ = ['ALL_PIPELINE_CONFIGS', 'AVAILABLE_PIPELINE_CONFIGS']
diff --git a/CPAC/pipeline/check_outputs.py b/CPAC/pipeline/check_outputs.py
index d35f5a30cc..380fc5783b 100644
--- a/CPAC/pipeline/check_outputs.py
+++ b/CPAC/pipeline/check_outputs.py
@@ -48,7 +48,7 @@ def check_outputs(output_dir, log_dir, pipe_name, unique_id):
"""
outputs_logger = getLogger(f'{unique_id}_expectedOutputs')
missing_outputs = ExpectedOutputs()
- container = os.path.join(f'cpac_{pipe_name}', unique_id)
+ container = os.path.join(f'pipeline_{pipe_name}', unique_id)
if (
isinstance(outputs_logger, (Logger, MockLogger)) and
len(outputs_logger.handlers)
diff --git a/CPAC/pipeline/cpac_group_runner.py b/CPAC/pipeline/cpac_group_runner.py
index 3e8b4082a2..5c27f0141f 100644
--- a/CPAC/pipeline/cpac_group_runner.py
+++ b/CPAC/pipeline/cpac_group_runner.py
@@ -16,7 +16,6 @@
License along with C-PAC. If not, see ."""
import os
import fnmatch
-import pandas
from CPAC.pipeline.nipype_pipeline_engine.plugins import MultiProcPlugin
from CPAC.utils.monitoring import log_nodes_cb
@@ -34,14 +33,13 @@ def load_config_yml(config_file, individual=False):
import os
import yaml
import yamlordereddictloader
- from CPAC.utils import Configuration
try:
config_path = os.path.realpath(config_file)
config_dict = yaml.safe_load(open(config_path, 'r'))
- config = Configuration(config_dict)
+ config = config_dict
except Exception as e:
err = "\n\n[!] CPAC says: Could not load or read the configuration " \
@@ -49,10 +47,10 @@ def load_config_yml(config_file, individual=False):
raise Exception(err)
if individual:
- config.pipeline_setup['log_directory']['path'] = os.path.abspath(config.pipeline_setup['log_directory']['path'])
- config.pipeline_setup['working_directory']['path'] = os.path.abspath(config.pipeline_setup['working_directory']['path'])
- config.pipeline_setup['output_directory']['path'] = os.path.abspath(config.pipeline_setup['output_directory']['path'])
- config.pipeline_setup['crash_log_directory']['path'] = os.path.abspath(config.pipeline_setup['crash_log_directory']['path'])
+ config.logDirectory = os.path.abspath(config["pipeline_setup"]["log_directory"]["path"])
+ config.workingDirectory = os.path.abspath(config["pipeline_setup"]["working_directory"]["path"])
+ config.outputDirectory = os.path.abspath(config["pipeline_setup"]["output_directory"]["output_path"])
+ config.crashLogDirectory = os.path.abspath(config["pipeline_setup"]["crash_log_directory"]["path"])
return config
@@ -133,29 +131,26 @@ def gather_nifti_globs(pipeline_output_folder, resource_list,
import glob
import pandas as pd
import pkg_resources as p
- from __builtin__ import any as b_any
- ext = ".nii"
+ exts = ".nii"
nifti_globs = []
- keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
+ keys_tsv = p.resource_filename('CPAC', 'resources/cpac_outputs.tsv')
try:
- keys = pd.read_csv(keys_csv)
+ keys = pd.read_csv(keys_tsv, delimiter='\t')
except Exception as e:
- err = "\n[!] Could not access or read the cpac_outputs.csv " \
- "resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
+ err = "\n[!] Could not access or read the cpac_outputs.tsv " \
+ "resource file:\n{0}\n\nError details {1}\n".format(keys_tsv, e)
raise Exception(err)
derivative_list = list(
- keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
- keys['Values'] == 'z-score']['Resource'])
+ keys[keys['Sub-Directory'] == 'func']['Resource'])
derivative_list = derivative_list + list(
- keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
- keys['Values'] == 'z-stat']['Resource'])
+ keys[keys['Sub-Directory'] == 'anat']['Resource'])
if pull_func:
derivative_list = derivative_list + list(
- keys[keys['Functional timeseries'] == 'yes']['Resource'])
+ keys[keys['Space'] == 'functional']['Resource'])
if len(resource_list) == 0:
err = "\n\n[!] No derivatives selected!\n\n"
@@ -176,25 +171,21 @@ def gather_nifti_globs(pipeline_output_folder, resource_list,
dirs_to_grab.append(derivative_name)
# grab MeanFD_Jenkinson just in case
- dirs_to_grab.append("power_params")
+ dirs_to_grab.append("framewise-displacement-jenkinson")
for resource_name in dirs_to_grab:
- glob_string = os.path.join(pipeline_output_folder, "*",
- resource_name, "*", "*")
-
+ glob_string = os.path.join(pipeline_output_folder, "*", "*",
+ f"*{resource_name}*")
# get all glob strings that result in a list of paths where every path
# ends with a NIFTI file
-
prog_string = ".."
-
while len(glob.glob(glob_string)) != 0:
- if b_any(ext in x for x in glob.glob(glob_string)) == True:
+ if any(exts in x for x in glob.glob(glob_string)) == True:
nifti_globs.append(glob_string)
glob_string = os.path.join(glob_string, "*")
prog_string = prog_string + "."
- print(prog_string)
if len(nifti_globs) == 0:
err = "\n\n[!] No output filepaths found in the pipeline output " \
@@ -339,23 +330,22 @@ def create_output_dict_list(nifti_globs, pipeline_output_folder,
if derivatives is None:
- keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
+ keys_tsv = p.resource_filename('CPAC', 'resources/cpac_outputs.tsv')
try:
- keys = pd.read_csv(keys_csv)
+ keys = pd.read_csv(keys_tsv,delimiter='\t')
except Exception as e:
err = "\n[!] Could not access or read the cpac_outputs.csv " \
- "resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
+ "resource file:\n{0}\n\nError details {1}\n".format(keys_tsv, e)
raise Exception(err)
derivatives = list(
- keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
- keys['Values'] == 'z-score']['Resource'])
+ keys[keys['Sub-Directory'] == 'func']['Resource'])
derivatives = derivatives + list(
- keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][
- keys['Values'] == 'z-stat']['Resource'])
+ keys[keys['Sub-Directory'] == 'anat']['Resource'])
if pull_func:
- derivatives = derivatives + list(keys[keys['Functional timeseries'] == 'yes']['Resource'])
+ derivatives = derivatives + list(
+ keys[keys['Space'] == 'functional']['Resource'])
# remove any extra /'s
pipeline_output_folder = pipeline_output_folder.rstrip("/")
@@ -381,7 +371,7 @@ def create_output_dict_list(nifti_globs, pipeline_output_folder,
'''
# grab MeanFD_Jenkinson just in case
- search_dirs += ["power_params"]
+ search_dirs += ["framewise-displacement-jenkinson"]
exts = ['.' + ext.lstrip('.') for ext in exts]
@@ -392,23 +382,21 @@ def create_output_dict_list(nifti_globs, pipeline_output_folder,
for filename in files:
filepath = os.path.join(root, filename)
-
if not any(fnmatch.fnmatch(filepath, pattern) for pattern in nifti_globs):
continue
if not any(filepath.endswith(ext) for ext in exts):
continue
-
relative_filepath = filepath.split(pipeline_output_folder)[1]
filepath_pieces = [_f for _f in relative_filepath.split("/") if _f]
- resource_id = filepath_pieces[1]
+ resource_id = '_'.join(filepath_pieces[2].split(".")[0].split("_")[3:])
if resource_id not in search_dirs:
continue
- series_id_string = filepath_pieces[2]
- strat_info = "_".join(filepath_pieces[3:])[:-len(ext)]
+ series_id_string = filepath_pieces[2].split("_")[1]
+ strat_info = "_".join(filepath_pieces[2].split("_")[2:3])
unique_resource_id = (resource_id, strat_info)
@@ -429,7 +417,7 @@ def create_output_dict_list(nifti_globs, pipeline_output_folder,
new_row_dict["Filepath"] = filepath
print('{0} - {1} - {2}'.format(
- unique_id,
+ unique_id.split("_")[0],
series_id,
resource_id
))
@@ -525,14 +513,12 @@ def pheno_sessions_to_repeated_measures(pheno_df, sessions_list):
More info:
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FEAT/
UserGuide#Paired_Two-Group_Difference_.28Two-Sample_Paired_T-Test.29
-
Sample input:
pheno_df
sub01
sub02
sessions_list
[ses01, ses02]
-
Expected output:
pheno_df Sessions participant_sub01 participant_sub02
sub01 ses01 1 0
@@ -718,12 +704,12 @@ def prep_feat_inputs(group_config_file):
import pandas as pd
import pkg_resources as p
- keys_csv = p.resource_filename('CPAC', 'resources/cpac_outputs.csv')
+ keys_tsv = p.resource_filename('CPAC', 'resources/cpac_outputs.tsv')
try:
- keys = pd.read_csv(keys_csv)
+ keys = pd.read_csv(keys_tsv, delimiter='\t')
except Exception as e:
- err = "\n[!] Could not access or read the cpac_outputs.csv " \
- "resource file:\n{0}\n\nError details {1}\n".format(keys_csv, e)
+ err = "\n[!] Could not access or read the cpac_outputs.tsv " \
+ "resource file:\n{0}\n\nError details {1}\n".format(keys_tsv, e)
raise Exception(err)
derivatives = list(keys[keys['Derivative'] == 'yes'][keys['Space'] == 'template'][keys['Values'] == 'z-score']['Resource'])
@@ -1113,9 +1099,9 @@ def run_feat(group_config_file, feat=True):
# get group pipeline config loaded
c = load_config_yml(group_config_file)
- pipeline_dir = c.pipeline_dir
- model_name = c.model_name
- out_dir = c.output_dir
+ pipeline_dir = c["pipeline_setup"]["output_directory"]["source_outputs_path"]
+ model_name = c["fsl_feat"]["model_name"]
+ out_dir = c["pipeline_setup"]["output_directory"]["output_path"]
pipeline_name = pipeline_dir.rstrip('/').split('/')[-1]
@@ -1178,7 +1164,7 @@ def run_feat(group_config_file, feat=True):
models[id_tuple]['dir_path'].replace(out_dir, '').lstrip('/'))
work_dir = work_dir.replace('cpac_group_analysis', 'cpac_group_analysis_workdir')
work_dir = work_dir.replace('model_files/', '')
- log_dir = os.path.join(c.log_dir,
+ log_dir = os.path.join(c["pipeline_setup"]["log_directory"]["path"],
models[id_tuple]['dir_path'].replace(out_dir, '').lstrip('/'))
log_dir = log_dir.replace('cpac_group_analysis', 'cpac_group_analysis_logdir')
log_dir = log_dir.replace('model_files/', '')
@@ -1204,9 +1190,9 @@ def run_feat(group_config_file, feat=True):
design_matrix.columns,
None, None,
custom_contrasts_csv,
- None, c.group_sep,
+ None, c["fsl_feat"]["group_sep"],
grp_vector,
- c.coding_scheme,
+ c["fsl_feat"]["coding_scheme"],
model_name,
id_tuple[0],
input_files_dir)
@@ -1245,17 +1231,18 @@ def run_feat(group_config_file, feat=True):
f_test, mat, con, grp, model_out_dir,
work_dir, log_dir, model_name, fts)))
- manage_processes(procss, out_dir, c.num_models_at_once)
+ manage_processes(procss, out_dir, c["fsl_feat"]["num_models_at_once"])
def run_cwas_group(pipeline_dir, out_dir, working_dir, crash_dir, roi_file,
regressor_file, participant_column, columns,
- permutations, parallel_nodes, inclusion=None):
+ permutations, parallel_nodes, plugin_args, z_score, inclusion=None):
import os
import numpy as np
from multiprocessing import pool
from CPAC.cwas.pipeline import create_cwas
+ from nipype import config
pipeline_dir = os.path.abspath(pipeline_dir)
@@ -1269,12 +1256,13 @@ def run_cwas_group(pipeline_dir, out_dir, working_dir, crash_dir, roi_file,
os.path.basename(pipeline_dir))
inclusion_list = None
+
if inclusion:
inclusion_list = load_text_file(inclusion, "MDMR participant "
"inclusion list")
output_df_dct = gather_outputs(pipeline_dir,
- ["functional_to_standard"],
+ ['space-template_desc-preproc_bold'],
inclusion_list, False, False,
get_func=True)
@@ -1293,7 +1281,6 @@ def run_cwas_group(pipeline_dir, out_dir, working_dir, crash_dir, roi_file,
df_dct[strat_scan] = strat_df[strat_df["Series"] == strat_scan]
else:
df_dct[list(set(strat_df["Series"]))[0]] = strat_df
-
for df_scan in df_dct.keys():
func_paths = {
p.split("_")[0]: f
@@ -1303,7 +1290,12 @@ def run_cwas_group(pipeline_dir, out_dir, working_dir, crash_dir, roi_file,
df_dct[df_scan].Filepath
)
}
-
+
+ if plugin_args['n_procs'] == 1:
+ plugin = 'Linear'
+ else:
+ plugin = 'MultiProc'
+
cwas_wf = create_cwas(name="MDMR_{0}".format(df_scan),
working_dir=working_dir,
crash_dir=crash_dir)
@@ -1314,7 +1306,8 @@ def run_cwas_group(pipeline_dir, out_dir, working_dir, crash_dir, roi_file,
cwas_wf.inputs.inputspec.columns = columns
cwas_wf.inputs.inputspec.permutations = permutations
cwas_wf.inputs.inputspec.parallel_nodes = parallel_nodes
- cwas_wf.run()
+ cwas_wf.inputs.inputspec.z_score = z_score
+ cwas_wf.run(plugin=plugin, plugin_args=plugin_args)
def run_cwas(pipeline_config):
@@ -1325,37 +1318,44 @@ def run_cwas(pipeline_config):
pipeline_config = os.path.abspath(pipeline_config)
pipeconfig_dct = yaml.safe_load(open(pipeline_config, 'r'))
-
- pipeline = pipeconfig_dct["pipeline_dir"]
- output_dir = pipeconfig_dct["output_dir"]
- working_dir = pipeconfig_dct["work_dir"]
- crash_dir = pipeconfig_dct["log_dir"]
-
- roi_file = pipeconfig_dct["mdmr_roi_file"]
- regressor_file = pipeconfig_dct["mdmr_regressor_file"]
- participant_column = pipeconfig_dct["mdmr_regressor_participant_column"]
- columns = pipeconfig_dct["mdmr_regressor_columns"]
- permutations = pipeconfig_dct["mdmr_permutations"]
- parallel_nodes = pipeconfig_dct["mdmr_parallel_nodes"]
- inclusion = pipeconfig_dct["participant_list"]
+
+ num_cpus = pipeconfig_dct["pipeline_setup"]["system_config"]["num_cpus"]
+ mem_gb = pipeconfig_dct["pipeline_setup"]["system_config"]["num_memory"]
+
+ plugin_args = {'n_procs' : num_cpus, 'memory_gb' : mem_gb}
+
+ pipeline = pipeconfig_dct["pipeline_setup"]["output_directory"]["source_outputs_path"]
+ output_dir = pipeconfig_dct["pipeline_setup"]["output_directory"]["output_path"]
+ working_dir = pipeconfig_dct["pipeline_setup"]["working_directory"]["path"]
+ crash_dir = pipeconfig_dct["pipeline_setup"]["log_directory"]["path"]
+
+ roi_file = pipeconfig_dct["mdmr"]["roi_file"]
+ regressor_file = pipeconfig_dct["mdmr"]["regressor_file"]
+ participant_column = pipeconfig_dct["mdmr"]["regressor_participant_column"]
+ columns = pipeconfig_dct["mdmr"]["regressor_columns"]
+ permutations = pipeconfig_dct["mdmr"]["permutations"]
+ parallel_nodes = pipeconfig_dct["mdmr"]["parallel_nodes"]
+ inclusion = pipeconfig_dct["mdmr"]["inclusion_list"]
+ z_score = pipeconfig_dct["mdmr"]["zscore"]
if not inclusion or "None" in inclusion or "none" in inclusion:
inclusion = None
run_cwas_group(pipeline, output_dir, working_dir, crash_dir, roi_file,
regressor_file, participant_column, columns,
- permutations, parallel_nodes,
+ permutations, parallel_nodes, plugin_args, z_score,
inclusion=inclusion)
def find_other_res_template(template_path, new_resolution):
- """Find the same template/standard file in another resolution, if it
+ """
+ Find the same template/standard file in another resolution, if it
exists.
-
template_path: file path to the template NIfTI file
+
new_resolution: (int) the resolution of the template file you need
-
NOTE: Makes an assumption regarding the filename format of the files.
+
"""
# TODO: this is assuming there is a mm resolution in the file path - not
@@ -1373,8 +1373,8 @@ def find_other_res_template(template_path, new_resolution):
template_parts[0] = str(new_resolution).join(template_parts[0].rsplit(template_parts[0][-1], 1))
ref_file = "{0}{1}".format(template_parts[0], template_parts[1])
- elif "${func_resolution}" in template_path:
- ref_file = template_path.replace("${func_resolution}",
+ elif "${resolution_for_func_preproc}" in template_path:
+ ref_file = template_path.replace("${resolution_for_func_preproc}",
"{0}mm".format(new_resolution))
if ref_file:
@@ -1474,11 +1474,11 @@ def launch_PyBASC(pybasc_config):
def run_basc(pipeline_config):
- """Run the PyBASC module.
-
+ """
+ Run the PyBASC module.
+
PyBASC is a separate Python package built and maintained by Aki Nikolaidis
which implements the BASC analysis via Python.
-
PyBASC is based off of the following work:
- Garcia-Garcia, M., Nikolaidis, A., Bellec, P., Craddock, R. C., Cheung, B., Castellanos, F. X., & Milham, M. P. (2017).
Detecting stable individual differences in the functional organization of the human basal ganglia. NeuroImage.
@@ -1486,16 +1486,12 @@ def run_basc(pipeline_config):
Multi-level bootstrap analysis of stable clusters in resting-state fMRI. Neuroimage, 51(3), 1126-1139.
- Bellec, P., Marrelec, G., & Benali, H. (2008).
A bootstrap test to investigate changes in brain connectivity for functional MRI. Statistica Sinica, 1253-1268.
-
PyBASC GitHub repository:
https://github.com/AkiNikolaidis/PyBASC
-
PyBASC author:
https://www.researchgate.net/profile/Aki_Nikolaidis
-
Inputs
pipeline_config: path to C-PAC pipeline configuration YAML file
-
Steps (of the C-PAC interface for PyBASC, not PyBASC itself)
1. Read in the PyBASC-relevant pipeline config items and create a new
PyBASC config dictionary.
@@ -1508,7 +1504,7 @@ def run_basc(pipeline_config):
selected to run PyBASC for (preprocessed and template-space
functional time series are pulled from each pipeline output
directory, for input into PyBASC).
- 6. Gather functional_to_standard outputs from each pipeline.
+ 6. Gather space-template_bold outputs from each pipeline.
7. Create further sub-directories for each nuisance regression
strategy and functional scan within each C-PAC pipeline, and
separate the functional outputs by strategy and scan as well.
@@ -1518,7 +1514,6 @@ def run_basc(pipeline_config):
into a config YAML file for each pipeline-strategy-scan we are
running.
10. Launch PyBASC for each configuration generated.
-
"""
import os
@@ -1530,27 +1525,28 @@ def run_basc(pipeline_config):
pipeconfig_dct = yaml.safe_load(open(pipeline_config, 'r'))
- output_dir = os.path.abspath(pipeconfig_dct["output_dir"])
- working_dir = os.path.abspath(pipeconfig_dct['work_dir'])
- if pipeconfig_dct['pipeline_setup']['Amazon-AWS']['aws_output_bucket_credentials']:
- creds_path = os.path.abspath(pipeconfig_dct['pipeline_setup']['Amazon-AWS']['aws_output_bucket_credentials'])
+ output_dir = os.path.abspath(pipeconfig_dct["pipeline_setup"]["output_directory"]["output_path"])
+ working_dir = os.path.abspath(pipeconfig_dct["pipeline_setup"]["working_directory"]["path"])
+ if pipeconfig_dct["pipeline_setup"]["Amazon-AWS"]['aws_output_bucket_credentials']:
+ creds_path = os.path.abspath(
+ pipeconfig_dct["pipeline_setup"]["Amazon-AWS"]['aws_output_bucket_credentials'])
- func_template = pipeconfig_dct["template_brain_only_for_func"]
+ func_template = pipeconfig_dct["basc"]["template_brain_only_for_func"]
if '$FSLDIR' in func_template:
if os.environ.get('FSLDIR'):
func_template = func_template.replace('$FSLDIR',
os.environ['FSLDIR'])
- basc_inclusion = pipeconfig_dct["participant_list"]
- basc_scan_inclusion = pipeconfig_dct["basc_scan_inclusion"]
- basc_resolution = pipeconfig_dct["basc_resolution"]
+ basc_inclusion = pipeconfig_dct["pipeline_setup"]["output_directory"]["participant_list"]
+ basc_scan_inclusion = pipeconfig_dct["basc"]["scan_inclusion"]
+ basc_resolution = pipeconfig_dct["basc"]["resolution"]
basc_config_dct = {'run': True,
'reruns': 1}
for key in pipeconfig_dct.keys():
if 'basc' in key:
- basc_config_dct[key.replace('basc_', '')] = pipeconfig_dct[key]
+ basc_config_dct = pipeconfig_dct[key]
iterables = ['dataset_bootstrap_list', 'timeseries_bootstrap_list',
'blocklength_list', 'n_clusters_list', 'output_sizes']
@@ -1610,7 +1606,8 @@ def run_basc(pipeline_config):
roi_file_two = resample_cpac_output_image(roi_two_cmd_args)
basc_config_dct['cross_cluster_mask_file'] = roi_file_two
- pipeline_dir = os.path.abspath(pipeconfig_dct["pipeline_dir"])
+ pipeline_dir = os.path.abspath(pipeconfig_dct["pipeline_setup"]
+ ["output_directory"]["source_outputs_path"])
out_dir = os.path.join(output_dir, 'cpac_group_analysis', 'PyBASC',
'{0}mm_resolution'.format(basc_resolution),
@@ -1638,8 +1635,7 @@ def run_basc(pipeline_config):
# - each dataframe will contain output filepaths and their associated
# information, and each dataframe will include ALL SERIES/SCANS
output_df_dct = gather_outputs(pipeline_dir,
- ["functional_to_standard",
- "functional_mni"],
+ ["space-template_bold"],
inclusion_list, False, False,
get_func=True)
@@ -1671,8 +1667,8 @@ def run_basc(pipeline_config):
if df_scan not in scan_inclusion:
continue
- basc_config_dct['analysis_ID'] = '{0}_{1}'.format(os.path.basename(pipeline_dir),
- df_scan)
+ basc_config_dct['analysis_ID'] = '{0}_{1}'.format(
+ os.path.basename(pipeline_dir), df_scan)
# add scan label and nuisance regression strategy label to the
# output directory path
@@ -1688,7 +1684,7 @@ def run_basc(pipeline_config):
# affinity threshold is an iterable, and must match the number of
# functional file paths for the MapNodes
- affinity_thresh = pipeconfig_dct['basc_affinity_thresh'] * len(func_paths)
+ affinity_thresh = pipeconfig_dct["basc"]["affinity_thresh"] * len(func_paths)
# resampling if necessary
# each run should take the file, resample it and write it
@@ -1746,11 +1742,11 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
output_df_dct = gather_outputs(
pipeline_dir,
- ["functional_to_standard", "roi_timeseries"],
+ ["space-template_bold", "desc-Mean_timeseries"],
inclusion_list=None,
get_motion=False, get_raw_score=False, get_func=True,
- derivatives=["functional_to_standard", "roi_timeseries"],
- exts=['nii', 'nii.gz', 'csv']
+ derivatives=["space-template_bold", "desc-Mean_timeseries"],
+ #exts=['nii', 'nii.gz', 'csv']
)
iteration_ids = []
@@ -1759,13 +1755,13 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
derivative, _ = preproc_strat
- if "voxel" not in levels and derivative == "functional_to_standard":
+ if "voxel" not in levels and derivative == "space-template_bold":
continue
- if "roi" not in levels and derivative == "roi_timeseries":
+ if "roi" not in levels and derivative == "desc-Mean_timeseries":
continue
- if derivative == "roi_timeseries":
+ if derivative == "desc-Mean_timeseries":
if roi_inclusion:
# backwards because ROI labels embedded as substrings
for roi_label in roi_inclusion:
@@ -1776,7 +1772,6 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
"{1}/{2}\n".format(roi_label, derivative, _))
continue
-
df_dct = {}
strat_df = output_df_dct[preproc_strat]
@@ -1803,10 +1798,12 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
)
}
- unique_out_dir = os.path.join(out_dir, "ISC", derivative, _, df_scan)
+ unique_out_dir = os.path.join(out_dir, "ISC", derivative, _,
+ df_scan)
it_id = "ISC_{0}_{1}_{2}".format(df_scan, derivative,
- _.replace('.', '').replace('+', ''))
+ _.replace('.', '').replace(
+ '+', ''))
isc_wf = create_isc(name=it_id,
output_dir=unique_out_dir,
@@ -1816,10 +1813,8 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
isc_wf.inputs.inputspec.permutations = permutations
isc_wf.inputs.inputspec.std = std_filter
isc_wf.inputs.inputspec.collapse_subj = False
- plugin_args = {'n_procs': num_cpus,
- 'status_callback': log_nodes_cb}
- isc_wf.run(plugin=MultiProcPlugin(plugin_args),
- plugin_args=plugin_args)
+ isc_wf.run(plugin='MultiProc',
+ plugin_args={'n_procs': num_cpus})
if isfc:
for df_scan in df_dct.keys():
@@ -1835,10 +1830,12 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
)
}
- unique_out_dir = os.path.join(out_dir, "ISFC", derivative, _, df_scan)
+ unique_out_dir = os.path.join(out_dir, "ISFC", derivative, _,
+ df_scan)
it_id = "ISFC_{0}_{1}_{2}".format(df_scan, derivative,
- _.replace('.', '').replace('+', ''))
+ _.replace('.', '').replace(
+ '+', ''))
isfc_wf = create_isfc(name=it_id,
output_dir=unique_out_dir,
@@ -1848,10 +1845,8 @@ def run_isc_group(pipeline_dir, out_dir, working_dir, crash_dir,
isfc_wf.inputs.inputspec.permutations = permutations
isfc_wf.inputs.inputspec.std = std_filter
isfc_wf.inputs.inputspec.collapse_subj = False
- plugin_args = {'n_procs': num_cpus,
- 'status_callback': log_nodes_cb}
- isfc_wf.run(plugin=MultiProcPlugin(plugin_args),
- plugin_args=plugin_args)
+ isfc_wf.run(plugin='MultiProc',
+ plugin_args={'n_procs': num_cpus})
def run_isc(pipeline_config):
@@ -1863,23 +1858,23 @@ def run_isc(pipeline_config):
pipeconfig_dct = yaml.safe_load(open(pipeline_config, 'r'))
- pipeline_dir = pipeconfig_dct["pipeline_dir"]
+ pipeline_dir = pipeconfig_dct["pipeline_setup"]["output_directory"]["source_outputs_path"]
- output_dir = pipeconfig_dct["output_dir"]
- working_dir = pipeconfig_dct["work_dir"]
- crash_dir = pipeconfig_dct["log_dir"]
+ output_dir = pipeconfig_dct["pipeline_setup"]["output_directory"]["output_path"]
+ working_dir = pipeconfig_dct["pipeline_setup"]["working_directory"]["path"]
+ crash_dir = pipeconfig_dct["pipeline_setup"]["log_directory"]["path"]
scan_inclusion = None
if "scan_inclusion" in pipeconfig_dct.keys():
- scan_inclusion = pipeconfig_dct["scan_inclusion"]
+ scan_inclusion = pipeconfig_dct["pipeline_setup"]["system_config"]["scan_inclusion"]
roi_inclusion = None
if "isc_roi_inclusion" in pipeconfig_dct.keys():
- roi_inclusion = pipeconfig_dct["isc_roi_inclusion"]
+ roi_inclusion = pipeconfig_dct["isc_isfc"]["roi_inclusion"]
num_cpus = 1
if "num_cpus" in pipeconfig_dct.keys():
- num_cpus = pipeconfig_dct["num_cpus"]
+ num_cpus = pipeconfig_dct["pipeline_setup"]["system_config"]["num_cpus"]
isc = 1 in pipeconfig_dct.get("runISC", [])
isfc = 1 in pipeconfig_dct.get("runISFC", [])
@@ -1933,12 +1928,12 @@ def run_qpp(group_config_file):
c = load_config_yml(group_config_file)
- pipeline_dir = os.path.abspath(c.pipeline_dir)
- out_dir = os.path.join(c.output_dir, 'cpac_group_analysis', 'QPP',
+ pipeline_dir = os.path.abspath(c["pipeline_setup"]["output_directory"]["source_outputs_path"])
+ out_dir = os.path.join(c["pipeline_setup"]["output_directory"]["output_path"], 'cpac_group_analysis', 'QPP',
os.path.basename(pipeline_dir))
- working_dir = os.path.join(c.work_dir, 'cpac_group_analysis', 'QPP',
+ working_dir = os.path.join(c["pipeline_setup"]["working_directory"]["path"], 'cpac_group_analysis', 'QPP',
os.path.basename(pipeline_dir))
- crash_dir = os.path.join(c.log_dir, 'cpac_group_analysis', 'QPP',
+ crash_dir = os.path.join(c["pipeline_setup"]["crash_log_directory"]["path"], 'cpac_group_analysis', 'QPP',
os.path.basename(pipeline_dir))
try:
@@ -1950,28 +1945,29 @@ def run_qpp(group_config_file):
outputs = gather_outputs(
pipeline_dir,
- ["functional_to_standard"],
- inclusion_list=c.participant_list,
+ ["space-template_bold"],
+ inclusion_list=c["pipeline_setup"]["output_directory"]
+ ["participant_list"],
get_motion=False, get_raw_score=False, get_func=True,
- derivatives=["functional_to_standard"],
- exts=['nii', 'nii.gz']
+ derivatives=["space-template_bold"],
+ #exts=['nii', 'nii.gz']
)
- if c.qpp_stratification == 'Scan':
+ if c["qpp"]["stratification"] == 'Scan':
qpp_stratification = ['Series']
- elif c.qpp_stratification == 'Session':
+ elif c["qpp"]["stratification"] == 'Session':
qpp_stratification = ['Sessions']
- elif c.qpp_stratification in ['Session and Scan', 'Scan and Session']:
+ elif c["qpp"]["stratification"] in ['Session and Scan', 'Scan and Session']:
qpp_stratification = ['Sessions', 'Series']
else:
qpp_stratification = []
for (resource_id, strat_info), output_df in outputs.items():
- if c.qpp_session_inclusion:
- output_df = output_df[output_df["Sessions"].isin(c.qpp_session_inclusion)]
- if c.qpp_scan_inclusion:
- output_df = output_df[output_df["Series"].isin(c.qpp_scan_inclusion)]
+ if c["qpp"]["session_inclusion"]:
+ output_df = output_df[output_df["Sessions"].isin(c["qpp"]["session_inclusion"])]
+ if c["qpp"]["scan_inclusion"]:
+ output_df = output_df[output_df["Series"].isin(c["qpp"]["scan_inclusion"])]
if qpp_stratification:
output_df_groups = output_df.groupby(by=qpp_stratification)
@@ -2000,12 +1996,12 @@ def run_qpp(group_config_file):
wf = create_qpp(name="QPP", working_dir=group_working_dir, crash_dir=group_crash_dir)
- wf.inputs.inputspec.window_length = c.qpp_window
- wf.inputs.inputspec.permutations = c.qpp_permutations
- wf.inputs.inputspec.lower_correlation_threshold = c.qpp_initial_threshold
- wf.inputs.inputspec.higher_correlation_threshold = c.qpp_final_threshold
- wf.inputs.inputspec.iterations = c.qpp_iterations
- wf.inputs.inputspec.correlation_threshold_iteration = c.qpp_initial_threshold_iterations
+ wf.inputs.inputspec.window_length = c["qpp"]["window"]
+ wf.inputs.inputspec.permutations = c["qpp"]["permutations"]
+ wf.inputs.inputspec.lower_correlation_threshold = c["qpp"]["initial_threshold"]
+ wf.inputs.inputspec.higher_correlation_threshold = c["qpp"]["final_threshold"]
+ wf.inputs.inputspec.iterations = c["qpp"]["iterations"]
+ wf.inputs.inputspec.correlation_threshold_iteration = c["qpp"]["initial_threshold_iterations"]
wf.inputs.inputspec.convergence_iterations = 1
wf.inputs.inputspec.datasets = output_df_group.Filepath.tolist()
@@ -2069,25 +2065,25 @@ def run(config_file):
c = load_config_yml(config_file)
# Run MDMR, if selected
- if 1 in c.runMDMR:
+ if 1 in c["mdmr"]["run"]:
run_cwas(config_file)
# Run ISC, if selected
- if 1 in c.runISC or 1 in c.runISFC:
+ if 1 in c["isc_isfc"]["runISC"] or 1 in c["isc_isfc"]["runISFC"]:
run_isc(config_file)
# Run PyBASC, if selected
- if 1 in c.run_basc:
+ if 1 in c["basc"]["run"]:
run_basc(config_file)
# Run FSL FEAT group analysis, if selected
- if 1 in c.run_fsl_feat:
+ if 1 in c["fsl_feat"]["run"]:
run_feat(config_file)
# Run randomise, if selected
- if 1 in c.run_randomise:
+ if 1 in c["fsl_randomise"]["run"]:
run_feat(config_file, feat=False)
#Run QPP, if selected
- if 1 in c.run_qpp:
- run_qpp(config_file)
+ if 1 in c["qpp"]["run"]:
+ run_qpp(config_file)
\ No newline at end of file
diff --git a/CPAC/pipeline/cpac_pipeline.py b/CPAC/pipeline/cpac_pipeline.py
index 58ba949961..993473c1d6 100644
--- a/CPAC/pipeline/cpac_pipeline.py
+++ b/CPAC/pipeline/cpac_pipeline.py
@@ -197,10 +197,9 @@
)
from CPAC.pipeline.random_state import set_up_random_state_logger
-from CPAC.utils.datasource import bidsier_prefix, gather_extraction_maps
from CPAC.pipeline.schema import valid_options
from CPAC.utils.trimmer import the_trimmer
-from CPAC.utils import Configuration
+from CPAC.utils import Configuration, set_subject
from CPAC.qc.pipeline import create_qc_workflow
from CPAC.qc.xcp import qc_xcp
@@ -248,6 +247,8 @@ def run_workflow(sub_dict, c, run, pipeline_timing_info=None, p_name=None,
the prepared nipype workflow object containing the parameters
specified in the config
'''
+ from CPAC.utils.datasource import bidsier_prefix
+
if plugin is not None and not isinstance(plugin, str):
raise TypeError(
'CPAC.pipeline.cpac_pipeline.run_workflow requires a '
@@ -258,18 +259,9 @@ def run_workflow(sub_dict, c, run, pipeline_timing_info=None, p_name=None,
# Assure that changes on config will not affect other parts
c = copy.copy(c)
- subject_id = sub_dict['subject_id']
- if sub_dict['unique_id']:
- subject_id += "_" + sub_dict['unique_id']
-
+ subject_id, p_name, log_dir = set_subject(sub_dict, c)
c['subject_id'] = subject_id
- log_dir = os.path.join(c.pipeline_setup['log_directory']['path'],
- f'pipeline_{c.pipeline_setup["pipeline_name"]}',
- subject_id)
- if not os.path.exists(log_dir):
- os.makedirs(os.path.join(log_dir))
-
set_up_logger(f'{subject_id}_expectedOutputs',
filename=f'{bidsier_prefix(c["subject_id"])}_'
'expectedOutputs.yml',
@@ -286,9 +278,9 @@ def run_workflow(sub_dict, c, run, pipeline_timing_info=None, p_name=None,
},
'execution': {
'crashfile_format': 'txt',
- 'resource_monitor_frequency': 0.2
- }
- })
+ 'resource_monitor_frequency': 0.2,
+ 'stop_on_first_crash': c['pipeline_setup', 'system_config',
+ 'fail_fast']}})
config.enable_resource_monitor()
logging.update_logging(config)
@@ -430,8 +422,9 @@ def run_workflow(sub_dict, c, run, pipeline_timing_info=None, p_name=None,
check_centrality_lfcd=check_centrality_lfcd)
# absolute paths of the dirs
- c.pipeline_setup['working_directory']['path'] = os.path.abspath(
- c.pipeline_setup['working_directory']['path'])
+ c.pipeline_setup['working_directory']['path'] = os.path.join(
+ os.path.abspath(c.pipeline_setup['working_directory']['path']),
+ p_name)
if 's3://' not in c.pipeline_setup['output_directory']['path']:
c.pipeline_setup['output_directory']['path'] = os.path.abspath(
c.pipeline_setup['output_directory']['path'])
@@ -447,6 +440,23 @@ def run_workflow(sub_dict, c, run, pipeline_timing_info=None, p_name=None,
logger.exception('Building workflow failed')
raise exception
+ wf_graph = c['pipeline_setup', 'log_directory', 'graphviz',
+ 'entire_workflow']
+ if wf_graph.get('generate'):
+ for graph2use in wf_graph.get('graph2use'):
+ dotfilename = os.path.join(log_dir, f'{p_name}_{graph2use}.dot')
+ for graph_format in wf_graph.get('format'):
+ try:
+ workflow.write_graph(dotfilename=dotfilename,
+ graph2use=graph2use,
+ format=graph_format,
+ simple_form=wf_graph.get(
+ 'simple_form', True))
+ except Exception as exception:
+ raise RuntimeError(f'Failed to visualize {p_name} ('
+ f'{graph2use}, {graph_format})'
+ ) from exception
+
if test_config:
logger.info('This has been a test of the pipeline configuration '
'file, the pipeline was built successfully, but was '
@@ -1088,6 +1098,7 @@ def connect_pipeline(wf, cfg, rpool, pipeline_blocks):
def build_workflow(subject_id, sub_dict, cfg, pipeline_name=None,
num_ants_cores=1):
+ from CPAC.utils.datasource import gather_extraction_maps
# Workflow setup
wf = initialize_nipype_wf(cfg, sub_dict)
diff --git a/CPAC/pipeline/cpac_runner.py b/CPAC/pipeline/cpac_runner.py
index def55e9e96..7191942aa7 100644
--- a/CPAC/pipeline/cpac_runner.py
+++ b/CPAC/pipeline/cpac_runner.py
@@ -17,18 +17,15 @@
import os
import sys
import warnings
-import yaml
from multiprocessing import Process
from time import strftime
+import yaml
from voluptuous.error import Invalid
-from CPAC.utils.configuration import Configuration
+from CPAC.utils.configuration import check_pname, Configuration, set_subject
from CPAC.utils.ga import track_run
from CPAC.utils.monitoring import failed_to_start, log_nodes_cb
-from CPAC.longitudinal_pipeline.longitudinal_workflow import (
- anat_longitudinal_wf,
- func_preproc_longitudinal_wf,
- func_longitudinal_template_wf
-)
+from CPAC.longitudinal_pipeline.longitudinal_workflow import \
+ anat_longitudinal_wf
from CPAC.utils.yaml_template import upgrade_pipeline_to_1_8
@@ -324,10 +321,11 @@ def run(subject_list_file, config_file=None, p_name=None, plugin=None,
warnings.warn("We recommend that the working directory full path "
"should have less then 70 characters. "
"Long paths might not work in your operating system.")
- warnings.warn("Current working directory: %s" % c.pipeline_setup['working_directory']['path'])
+ warnings.warn("Current working directory: "
+ f"{c.pipeline_setup['working_directory']['path']}")
# Get the pipeline name
- p_name = p_name or c.pipeline_setup['pipeline_name']
+ p_name = check_pname(p_name, c)
# Load in subject list
try:
@@ -577,82 +575,89 @@ def replace_index(target1, target2, file_path):
p_name, plugin, plugin_args, test_config)
except Exception as exception: # pylint: disable=broad-except
exitcode = 1
- failed_to_start(c['pipeline_setup', 'log_directory',
- 'path'], exception)
+ failed_to_start(set_subject(sub, c)[2], exception)
return exitcode
- pid = open(os.path.join(
- c.pipeline_setup['working_directory']['path'], 'pid.txt'
- ), 'w')
-
# Init job queue
job_queue = []
# Allocate processes
- processes = [
- Process(target=run_workflow,
- args=(sub, c, True, pipeline_timing_info,
- p_name, plugin, plugin_args, test_config))
- for sub in sublist
- ]
-
- # If we're allocating more processes than are subjects, run them all
- if len(sublist) <= c.pipeline_setup['system_config']['num_participants_at_once']:
- for p in processes:
- try:
- p.start()
- print(p.pid, file=pid)
- except Exception: # pylint: disable=broad-except
- exitcode = 1
- failed_to_start(c['pipeline_setup', 'log_directory',
- 'path'])
-
- # Otherwise manage resources to run processes incrementally
- else:
- idx = 0
- while idx < len(sublist):
- # If the job queue is empty and we haven't started indexing
- if len(job_queue) == 0 and idx == 0:
- # Init subject process index
- idc = idx
- # Launch processes (one for each subject)
- for p in processes[idc: idc + c.pipeline_setup[
- 'system_config']['num_participants_at_once']]:
- try:
- p.start()
- print(p.pid, file=pid)
- job_queue.append(p)
- idx += 1
- except Exception: # pylint: disable=broad-except
- exitcode = 1
- failed_to_start(c['pipeline_setup',
- 'log_directory', 'path'])
- # Otherwise, jobs are running - check them
- else:
- # Check every job in the queue's status
- for job in job_queue:
- # If the job is not alive
- if not job.is_alive():
- # Find job and delete it from queue
- print('found dead job ', job)
- loc = job_queue.index(job)
- del job_queue[loc]
- # ...and start the next available process
- # (subject)
+ processes = [Process(target=run_workflow,
+ args=(sub, c, True, pipeline_timing_info, p_name,
+ plugin, plugin_args, test_config)) for
+ sub in sublist]
+ working_dir = os.path.join(c['pipeline_setup', 'working_directory',
+ 'path'], p_name)
+ # Create pipeline-specific working dir if not exists
+ if not os.path.exists(working_dir):
+ os.makedirs(working_dir)
+ # Set PID context to pipeline-specific file
+ with open(os.path.join(working_dir, 'pid.txt'), 'w', encoding='utf-8'
+ ) as pid:
+ # If we're allocating more processes than are subjects, run
+ # them all
+ if len(sublist) <= c.pipeline_setup['system_config'][
+ 'num_participants_at_once']:
+ for i, _p in enumerate(processes):
+ try:
+ _p.start()
+ print(_p.pid, file=pid)
+ # pylint: disable=broad-except
+ except Exception as exception:
+ exitcode = 1
+ failed_to_start(set_subject(sublist[i], c)[2],
+ exception)
+ # Otherwise manage resources to run processes incrementally
+ else:
+ idx = 0
+ while idx < len(sublist):
+ # If the job queue is empty and we haven't started indexing
+ if len(job_queue) == 0 and idx == 0:
+ # Init subject process index
+ idc = idx
+ # Launch processes (one for each subject)
+ for _p in processes[idc: idc + c.pipeline_setup[
+ 'system_config']['num_participants_at_once']]:
try:
- processes[idx].start()
- # Append this to job queue and increment index
- job_queue.append(processes[idx])
+ _p.start()
+ print(_p.pid, file=pid)
+ job_queue.append(_p)
idx += 1
- except Exception: # pylint: disable=broad-except
+ # pylint: disable=broad-except
+ except Exception as exception:
exitcode = 1
- failed_to_start(c['pipeline_setup',
- 'log_directory', 'path'])
- # Add sleep so while loop isn't consuming 100% of CPU
- time.sleep(2)
- # set exitcode to 1 if any exception
- if hasattr(pid, 'exitcode'):
- exitcode = exitcode or pid.exitcode
- # Close PID txt file to indicate finish
- pid.close()
+ failed_to_start(set_subject(sublist[idx],
+ c)[2], exception)
+ # Otherwise, jobs are running - check them
+ else:
+ # Check every job in the queue's status
+ for job in job_queue:
+ # If the job is not alive
+ if not job.is_alive():
+ # Find job and delete it from queue
+ print('found dead job ', job)
+ loc = job_queue.index(job)
+ del job_queue[loc]
+ # ...and start the next available
+ # process (subject)
+ try:
+ processes[idx].start()
+ # Append this to job queue and
+ # increment index
+ # pylint: disable=modified-iterating-list
+ job_queue.append(processes[idx])
+ idx += 1
+ # pylint: disable=broad-except
+ except Exception as exception:
+ exitcode = 1
+ failed_to_start(set_subject(sublist[idx],
+ c)[2],
+ exception)
+ # Add sleep so while loop isn't consuming 100% of CPU
+ time.sleep(2)
+ # set exitcode to 1 if any exception
+ if hasattr(pid, 'exitcode'):
+ exitcode = exitcode or pid.exitcode
+ # Close PID txt file to indicate finish
+ pid.close()
sys.exit(exitcode)
diff --git a/CPAC/pipeline/engine.py b/CPAC/pipeline/engine.py
index e836aa61d4..a4a1a19f4f 100644
--- a/CPAC/pipeline/engine.py
+++ b/CPAC/pipeline/engine.py
@@ -1,19 +1,19 @@
-"""Copyright (C) 2022 C-PAC Developers
+# Copyright (C) 2021-2022 C-PAC Developers
-This file is part of C-PAC.
+# This file is part of C-PAC.
-C-PAC is free software: you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation, either version 3 of the License, or (at your
-option) any later version.
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
-C-PAC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
-License for more details.
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
-You should have received a copy of the GNU Lesser General Public
-License along with C-PAC. If not, see ."""
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
import ast
import logging
import os
@@ -30,8 +30,7 @@
from CPAC.image_utils.statistical_transforms import z_score_standardize, \
fisher_z_score_standardize
from CPAC.pipeline.check_outputs import ExpectedOutputs
-from CPAC.registration.registration import transform_derivative
-from CPAC.utils import Outputs
+from CPAC.utils.outputs import Outputs
from CPAC.utils.datasource import (
create_anat_datasource,
create_func_datasource,
@@ -617,6 +616,7 @@ def get_strats(self, resources, debug=False):
def derivative_xfm(self, wf, label, connection, json_info, pipe_idx,
pipe_x):
+ from CPAC.registration.registration import transform_derivative
if label in self.xfm:
@@ -853,7 +853,7 @@ def gather_pipes(self, wf, cfg, all=False, add_incl=None, add_excl=None):
out_dir = cfg.pipeline_setup['output_directory']['path']
pipe_name = cfg.pipeline_setup['pipeline_name']
- container = os.path.join(f'cpac_{pipe_name}', unique_id)
+ container = os.path.join(f'pipeline_{pipe_name}', unique_id)
filename = f'{unique_id}_{resource}'
out_path = os.path.join(out_dir, container, subdir, filename)
@@ -1580,9 +1580,9 @@ def ingress_output_dir(cfg, rpool, unique_id, creds_path=None):
print(f"\nOutput directory {out_dir} does not exist yet, "
f"initializing.")
return rpool
-
- cpac_dir = os.path.join(out_dir,
- f'cpac_{cfg.pipeline_setup["pipeline_name"]}',
+
+ cpac_dir = os.path.join(out_dir, 'pipeline_'
+ f'{cfg.pipeline_setup["pipeline_name"]}',
unique_id)
else:
if os.path.isdir(out_dir):
@@ -1947,9 +1947,6 @@ def initiate_rpool(wf, cfg, data_paths=None, part_id=None):
ingress_raw_func_data(wf, rpool, cfg, data_paths, unique_id,
part_id, ses_id)
- # grab already-processed data from the output directory
- rpool = ingress_output_dir(cfg, rpool, unique_id, creds_path)
-
# grab any file paths from the pipeline config YAML
rpool = ingress_pipeconfig_paths(cfg, rpool, unique_id, creds_path)
@@ -1959,7 +1956,7 @@ def initiate_rpool(wf, cfg, data_paths=None, part_id=None):
def run_node_blocks(blocks, data_paths, cfg=None):
import os
from CPAC.pipeline import nipype_pipeline_engine as pe
- from CPAC.utils.strategy import NodeBlock
+ from CPAC.pipeline.engine import NodeBlock
if not cfg:
cfg = {
diff --git a/CPAC/pipeline/nipype_pipeline_engine/__init__.py b/CPAC/pipeline/nipype_pipeline_engine/__init__.py
index 48b445241b..fc346b5068 100644
--- a/CPAC/pipeline/nipype_pipeline_engine/__init__.py
+++ b/CPAC/pipeline/nipype_pipeline_engine/__init__.py
@@ -1,35 +1,34 @@
-'''Module to import Nipype Pipeline engine and override some Classes.
-See https://fcp-indi.github.io/docs/developer/nodes
-for C-PAC-specific documentation.
-See https://nipype.readthedocs.io/en/latest/api/generated/nipype.pipeline.engine.html
-for Nipype's documentation.
-
-Copyright (C) 2022 C-PAC Developers
+# Copyright (C) 2022 C-PAC Developers
-This file is part of C-PAC.
+# This file is part of C-PAC.
-C-PAC is free software: you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation, either version 3 of the License, or (at your
-option) any later version.
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
-C-PAC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
-License for more details.
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
-You should have received a copy of the GNU Lesser General Public
-License along with C-PAC. If not, see .''' # noqa: E501
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+'''Module to import Nipype Pipeline engine and override some Classes.
+See https://fcp-indi.github.io/docs/developer/nodes
+for C-PAC-specific documentation.
+See https://nipype.readthedocs.io/en/latest/api/generated/nipype.pipeline.engine.html
+for Nipype's documentation.''' # noqa: E501 # pylint: disable=line-too-long
from nipype.pipeline import engine as pe
# import everything in nipype.pipeline.engine.__all__
from nipype.pipeline.engine import * # noqa: F401,F403
# import our DEFAULT_MEM_GB and override Node, MapNode
-from .engine import DEFAULT_MEM_GB, get_data_size, Node, MapNode, \
- UNDEFINED_SIZE, Workflow
+from .engine import DEFAULT_MEM_GB, export_graph, get_data_size, Node, \
+ MapNode, UNDEFINED_SIZE, Workflow
__all__ = [
interface for interface in dir(pe) if not interface.startswith('_')
-] + ['DEFAULT_MEM_GB', 'get_data_size', 'Node', 'MapNode', 'UNDEFINED_SIZE',
- 'Workflow']
+] + ['DEFAULT_MEM_GB', 'export_graph', 'get_data_size', 'Node', 'MapNode',
+ 'UNDEFINED_SIZE', 'Workflow']
del pe
diff --git a/CPAC/pipeline/nipype_pipeline_engine/engine.py b/CPAC/pipeline/nipype_pipeline_engine/engine.py
index 12e8808f1f..c74dd86c45 100644
--- a/CPAC/pipeline/nipype_pipeline_engine/engine.py
+++ b/CPAC/pipeline/nipype_pipeline_engine/engine.py
@@ -1,48 +1,72 @@
-'''Module to import Nipype Pipeline engine and override some Classes.
-See https://fcp-indi.github.io/docs/developer/nodes
-for C-PAC-specific documentation.
-See https://nipype.readthedocs.io/en/latest/api/generated/nipype.pipeline.engine.html
-for Nipype's documentation.
+# STATEMENT OF CHANGES:
+# This file is derived from sources licensed under the Apache-2.0 terms,
+# and this file has been changed.
-STATEMENT OF CHANGES:
- This file is derived from sources licensed under the Apache-2.0 terms,
- and this file has been changed.
+# CHANGES:
+# * Supports just-in-time dynamic memory allocation
+# * Skips doctests that require files that we haven't copied over
+# * Applies a random seed
+# * Supports overriding memory estimates via a log file and a buffer
+# * Adds quotation marks around strings in dotfiles
-CHANGES:
- * Supports just-in-time dynamic memory allocation
- * Skips doctests that require files that we haven't copied over
- * Applies a random seed
- * Supports overriding memory estimates via a log file and a buffer
+# ORIGINAL WORK'S ATTRIBUTION NOTICE:
+# Copyright (c) 2009-2016, Nipype developers
-ORIGINAL WORK'S ATTRIBUTION NOTICE:
- Copyright (c) 2009-2016, Nipype developers
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
- http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
+# Prior to release 0.12, Nipype was licensed under a BSD license.
- Prior to release 0.12, Nipype was licensed under a BSD license.
+# Modifications Copyright (C) 2022 C-PAC Developers
-Modifications Copyright (C) 2022 C-PAC Developers
+# This file is part of C-PAC.
-This file is part of C-PAC.''' # noqa: E501
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+'''Module to import Nipype Pipeline engine and override some Classes.
+See https://fcp-indi.github.io/docs/developer/nodes
+for C-PAC-specific documentation.
+See https://nipype.readthedocs.io/en/latest/api/generated/nipype.pipeline.engine.html
+for Nipype's documentation.''' # noqa: E501 # pylint: disable=line-too-long
import os
import re
-from logging import getLogger
+from copy import deepcopy
from inspect import Parameter, Signature, signature
+from logging import getLogger
+from typing import Iterable, Tuple, Union
from nibabel import load
from nipype import logging
from nipype.interfaces.utility import Function
from nipype.pipeline import engine as pe
-from nipype.pipeline.engine.utils import load_resultfile as _load_resultfile
+from nipype.pipeline.engine.utils import (
+ _create_dot_graph,
+ format_dot,
+ generate_expanded_graph,
+ get_print_name,
+ load_resultfile as _load_resultfile,
+ _replacefunk,
+ _run_dot
+)
+from nipype.utils.filemanip import fname_presuffix
from nipype.utils.functions import getsource
from numpy import prod
from traits.trait_base import Undefined
@@ -53,6 +77,7 @@
UNDEFINED_SIZE = (42, 42, 42, 1200)
random_state_logger = getLogger('random')
+logger = getLogger("nipype.workflow")
def _check_mem_x_path(mem_x_path):
@@ -399,10 +424,9 @@ def run(self, updatehash=False):
if self.seed is not None:
self._apply_random_seed()
if self.seed_applied:
- random_state_logger.info('%s',
- '%s # (Atropos constant)' %
- self.name if 'atropos' in
- self.name else self.name)
+ random_state_logger.info('%s\t%s', '# (Atropos constant)' if
+ 'atropos' in self.name else
+ str(self.seed), self.name)
return super().run(updatehash)
@@ -483,6 +507,153 @@ def _configure_exec_nodes(self, graph):
TypeError):
self._handle_just_in_time_exception(node)
+ def connect_retries(self, nodes: Iterable['Node'],
+ connections: Iterable[Tuple['Node', Union[str, tuple],
+ str]]) -> None:
+ """Method to generalize making the same connections to try and
+ retry nodes.
+
+ For each 3-tuple (``conn``) in ``connections``, will do
+ ``wf.connect(conn[0], conn[1], node, conn[2])`` for each ``node``
+ in ``nodes``
+
+ Parameters
+ ----------
+ nodes : iterable of Nodes
+
+ connections : iterable of 3-tuples of (Node, str or tuple, str)
+ """
+ wrong_conn_type_msg = (r'connect_retries `connections` argument '
+ 'must be an iterable of (Node, str or '
+ 'tuple, str) tuples.')
+ if not isinstance(connections, (list, tuple)):
+ raise TypeError(f'{wrong_conn_type_msg}: Given {connections}')
+ for node in nodes:
+ if not isinstance(node, Node):
+ raise TypeError('connect_retries requires an iterable '
+ r'of nodes for the `nodes` parameter: '
+ f'Given {node}')
+ for conn in connections:
+ if not all((isinstance(conn, (list, tuple)), len(conn) == 3,
+ isinstance(conn[0], Node),
+ isinstance(conn[1], (tuple, str)),
+ isinstance(conn[2], str))):
+ raise TypeError(f'{wrong_conn_type_msg}: Given {conn}')
+ self.connect(*conn[:2], node, conn[2])
+
+ def _get_dot(
+ self, prefix=None, hierarchy=None, colored=False, simple_form=True,
+ level=0
+ ):
+ """Create a dot file with connection info"""
+ # pylint: disable=invalid-name,protected-access
+ import networkx as nx
+
+ if prefix is None:
+ prefix = " "
+ if hierarchy is None:
+ hierarchy = []
+ colorset = [
+ "#FFFFC8", # Y
+ "#0000FF",
+ "#B4B4FF",
+ "#E6E6FF", # B
+ "#FF0000",
+ "#FFB4B4",
+ "#FFE6E6", # R
+ "#00A300",
+ "#B4FFB4",
+ "#E6FFE6", # G
+ "#0000FF",
+ "#B4B4FF",
+ ] # loop B
+ if level > len(colorset) - 2:
+ level = 3 # Loop back to blue
+ quoted_prefix = f'"{prefix}"' if len(prefix.strip()) else prefix
+ dotlist = [f'{quoted_prefix}label="{self.name}";']
+ for node in nx.topological_sort(self._graph):
+ fullname = ".".join(hierarchy + [node.fullname])
+ nodename = fullname.replace(".", "_")
+ if not isinstance(node, Workflow):
+ node_class_name = get_print_name(node, simple_form=simple_form)
+ if not simple_form:
+ node_class_name = ".".join(node_class_name.split(".")[1:])
+ if hasattr(node, "iterables") and node.iterables:
+ dotlist.append(f'"{nodename}"[label="{node_class_name}", '
+ "shape=box3d, style=filled, color=black, "
+ "colorscheme=greys7 fillcolor=2];")
+ else:
+ if colored:
+ dotlist.append(f'"{nodename}"[label="'
+ f'{node_class_name}", style=filled,'
+ f' fillcolor="{colorset[level]}"];')
+ else:
+ dotlist.append(f'"{nodename}"[label="'
+ f'{node_class_name}"];')
+
+ for node in nx.topological_sort(self._graph):
+ if isinstance(node, Workflow):
+ fullname = ".".join(hierarchy + [node.fullname])
+ nodename = fullname.replace(".", "_")
+ dotlist.append(f"subgraph \"cluster_{nodename}\" {{")
+ if colored:
+ dotlist.append(f'{prefix}{prefix}edge [color="'
+ f'{colorset[level + 1]}"];')
+ dotlist.append(f"{prefix}{prefix}style=filled;")
+ dotlist.append(f'{prefix}{prefix}fillcolor='
+ f'"{colorset[level + 2]}";')
+ dotlist.append(
+ node._get_dot(
+ prefix=prefix + prefix,
+ hierarchy=hierarchy + [self.name],
+ colored=colored,
+ simple_form=simple_form,
+ level=level + 3,
+ )
+ )
+ dotlist.append("}")
+ else:
+ for subnode in self._graph.successors(node):
+ if node._hierarchy != subnode._hierarchy:
+ continue
+ if not isinstance(subnode, Workflow):
+ nodefullname = ".".join(hierarchy + [node.fullname])
+ subnodefullname = ".".join(
+ hierarchy + [subnode.fullname])
+ nodename = nodefullname.replace(".", "_")
+ subnodename = subnodefullname.replace(".", "_")
+ for _ in self._graph.get_edge_data(
+ node, subnode
+ )["connect"]:
+ dotlist.append(f'"{nodename}" -> "{subnodename}";')
+ logger.debug("connection: %s", dotlist[-1])
+ # add between workflow connections
+ for u, v, d in self._graph.edges(data=True):
+ uname = ".".join(hierarchy + [u.fullname])
+ vname = ".".join(hierarchy + [v.fullname])
+ for src, dest in d["connect"]:
+ uname1 = uname
+ vname1 = vname
+ if isinstance(src, tuple):
+ srcname = src[0]
+ else:
+ srcname = src
+ if "." in srcname:
+ uname1 += "." + ".".join(srcname.split(".")[:-1])
+ if "." in dest and "@" not in dest:
+ if not isinstance(v, Workflow):
+ if "datasink" not in str(
+ v._interface.__class__
+ ).lower():
+ vname1 += "." + ".".join(dest.split(".")[:-1])
+ else:
+ vname1 += "." + ".".join(dest.split(".")[:-1])
+ if uname1.split(".")[:-1] != vname1.split(".")[:-1]:
+ dotlist.append(f'"{uname1.replace(".", "_")}" -> '
+ f'"{vname1.replace(".", "_")}";')
+ logger.debug("cross connection: %s", dotlist[-1])
+ return ("\n" + prefix).join(dotlist)
+
def _handle_just_in_time_exception(self, node):
# pylint: disable=protected-access
if hasattr(self, '_local_func_scans'):
@@ -492,6 +663,102 @@ def _handle_just_in_time_exception(self, node):
# TODO: handle S3 files
node._apply_mem_x(UNDEFINED_SIZE) # noqa: W0212
+ def nodes_and_guardrails(self, *nodes, registered, add_clones=True):
+ """Returns a two tuples of Nodes: (try, retry) and their
+ respective guardrails
+
+ Parameters
+ ----------
+ nodes : any number of Nodes
+
+ Returns
+ -------
+ nodes : tuple of Nodes
+
+ guardrails : tuple of Nodes
+ """
+ from CPAC.registration.guardrails import registration_guardrail_node, \
+ retry_clone
+ nodes = list(nodes)
+ if add_clones is True:
+ retries = [retry_clone(node) for node in nodes]
+ nodes.extend(retries)
+ guardrails = [None] * len(nodes)
+ for i, node in enumerate(nodes):
+ guardrails[i] = registration_guardrail_node(
+ f'guardrail_{node.name}', i)
+ self.connect(node, registered, guardrails[i], 'registered')
+ return tuple(nodes), tuple(guardrails)
+
+ def write_graph(
+ self,
+ dotfilename="graph.dot",
+ graph2use="hierarchical",
+ format="png",
+ simple_form=True,
+ ):
+ graphtypes = ["orig", "flat", "hierarchical", "exec", "colored"]
+ if graph2use not in graphtypes:
+ raise ValueError(
+ "Unknown graph2use keyword. Must be one of: " + str(graphtypes)
+ )
+ base_dir, dotfilename = os.path.split(dotfilename)
+ if base_dir == "":
+ if self.base_dir:
+ base_dir = self.base_dir
+ if self.name:
+ base_dir = os.path.join(base_dir, self.name)
+ else:
+ base_dir = os.getcwd()
+ os.makedirs(base_dir, exist_ok=True)
+ if graph2use in ["hierarchical", "colored"]:
+ if self.name[:1].isdigit(): # these graphs break if int
+ raise ValueError(f"{graph2use} graph failed, workflow name "
+ "cannot begin with a number")
+ dotfilename = os.path.join(base_dir, dotfilename)
+ self.write_hierarchical_dotfile(
+ dotfilename=dotfilename,
+ colored=graph2use == "colored",
+ simple_form=simple_form,
+ )
+ outfname = format_dot(dotfilename, format=format)
+ else:
+ graph = self._graph
+ if graph2use in ["flat", "exec"]:
+ graph = self._create_flat_graph()
+ if graph2use == "exec":
+ graph = generate_expanded_graph(deepcopy(graph))
+ outfname = export_graph(
+ graph,
+ base_dir,
+ dotfilename=dotfilename,
+ format=format,
+ simple_form=simple_form,
+ )
+
+ logger.info("Generated workflow graph: %s "
+ "(graph2use=%s, simple_form=%s).",
+ outfname, graph2use, simple_form)
+ return outfname
+
+ write_graph.__doc__ = pe.Workflow.write_graph.__doc__
+
+ def write_hierarchical_dotfile(
+ self, dotfilename=None, colored=False, simple_form=True
+ ):
+ # pylint: disable=invalid-name
+ dotlist = [f"digraph \"{self.name}\"{{"]
+ dotlist.append(self._get_dot(prefix=" ", colored=colored,
+ simple_form=simple_form))
+ dotlist.append("}")
+ dotstr = "\n".join(dotlist)
+ if dotfilename:
+ with open(dotfilename, "wt", encoding="utf-8") as fp:
+ fp.writelines(dotstr)
+ fp.close()
+ else:
+ logger.info(dotstr)
+
def get_data_size(filepath, mode='xyzt'):
"""Function to return the size of a functional image (x * y * z * t)
@@ -527,3 +794,140 @@ def get_data_size(filepath, mode='xyzt'):
if mode == 'xyz':
return prod(data_shape[0:3]).item()
return prod(data_shape).item()
+
+
+def export_graph(
+ graph_in,
+ base_dir=None,
+ show=False,
+ use_execgraph=False,
+ show_connectinfo=False,
+ dotfilename="graph.dot",
+ format="png",
+ simple_form=True,
+):
+ """Displays the graph layout of the pipeline
+ This function requires that pygraphviz and matplotlib are available on
+ the system.
+ Parameters
+ ----------
+ show : boolean
+ Indicate whether to generate pygraphviz output fromn
+ networkx. default [False]
+ use_execgraph : boolean
+ Indicates whether to use the specification graph or the
+ execution graph. default [False]
+ show_connectioninfo : boolean
+ Indicates whether to show the edge data on the graph. This
+ makes the graph rather cluttered. default [False]
+ """
+ import networkx as nx
+
+ graph = deepcopy(graph_in)
+ if use_execgraph:
+ graph = generate_expanded_graph(graph)
+ logger.debug("using execgraph")
+ else:
+ logger.debug("using input graph")
+ if base_dir is None:
+ base_dir = os.getcwd()
+
+ os.makedirs(base_dir, exist_ok=True)
+ out_dot = fname_presuffix(dotfilename, suffix="_detailed.dot",
+ use_ext=False, newpath=base_dir)
+ _write_detailed_dot(graph, out_dot)
+
+ # Convert .dot if format != 'dot'
+ outfname, res = _run_dot(out_dot, format_ext=format)
+ if res is not None and res.runtime.returncode:
+ logger.warning("dot2png: %s", res.runtime.stderr)
+
+ pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form)
+ simple_dot = fname_presuffix(dotfilename, suffix=".dot", use_ext=False,
+ newpath=base_dir)
+ nx.drawing.nx_pydot.write_dot(pklgraph, simple_dot)
+
+ # Convert .dot if format != 'dot'
+ simplefname, res = _run_dot(simple_dot, format_ext=format)
+ if res is not None and res.runtime.returncode:
+ logger.warning("dot2png: %s", res.runtime.stderr)
+
+ if show:
+ pos = nx.graphviz_layout(pklgraph, prog="dot")
+ nx.draw(pklgraph, pos)
+ if show_connectinfo:
+ nx.draw_networkx_edge_labels(pklgraph, pos)
+
+ return simplefname if simple_form else outfname
+
+
+def _write_detailed_dot(graph, dotfilename):
+ r"""
+ Create a dot file with connection info ::
+ digraph structs {
+ node [shape=record];
+ struct1 [label=" left| middle| right"];
+ struct2 [label=" one| two"];
+ struct3 [label="hello\nworld |{ b |{c| d|e}| f}| g | h"];
+ struct1:f1 -> struct2:f0;
+ struct1:f0 -> struct2:f1;
+ struct1:f2 -> struct3:here;
+ }
+ """
+ # pylint: disable=invalid-name
+ import networkx as nx
+
+ text = ["digraph structs {", "node [shape=record];"]
+ # write nodes
+ edges = []
+ for n in nx.topological_sort(graph):
+ nodename = n.itername
+ inports = []
+ for u, v, d in graph.in_edges(nbunch=n, data=True):
+ for cd in d["connect"]:
+ if isinstance(cd[0], (str, bytes)):
+ outport = cd[0]
+ else:
+ outport = cd[0][0]
+ inport = cd[1]
+ ipstrip = f"in{_replacefunk(inport)}"
+ opstrip = f"out{_replacefunk(outport)}"
+ edges.append(f'"{u.itername.replace(".", "")}":'
+ f'"{opstrip}":e -> '
+ f'"{v.itername.replace(".", "")}":'
+ f'"{ipstrip}":w;')
+ if inport not in inports:
+ inports.append(inport)
+ inputstr = (["{IN"]
+ + [f"| {ip}" for
+ ip in sorted(inports)] + ["}"])
+ outports = []
+ for u, v, d in graph.out_edges(nbunch=n, data=True):
+ for cd in d["connect"]:
+ if isinstance(cd[0], (str, bytes)):
+ outport = cd[0]
+ else:
+ outport = cd[0][0]
+ if outport not in outports:
+ outports.append(outport)
+ outputstr = (
+ ["{OUT"]
+ + [f"| {oport}" for
+ oport in sorted(outports)] + ["}"])
+ srcpackage = ""
+ if hasattr(n, "_interface"):
+ pkglist = n.interface.__class__.__module__.split(".")
+ if len(pkglist) > 2:
+ srcpackage = pkglist[2]
+ srchierarchy = ".".join(nodename.split(".")[1:-1])
+ nodenamestr = (f"{{ {nodename.split('.')[-1]} | {srcpackage} | "
+ f"{srchierarchy} }}")
+ text += [f'"{nodename.replace(".", "")}" [label='
+ f'"{"".join(inputstr)}|{nodenamestr}|{"".join(outputstr)}"];']
+ # write edges
+ for edge in sorted(edges):
+ text.append(edge)
+ text.append("}")
+ with open(dotfilename, "wt", encoding="utf-8") as filep:
+ filep.write("\n".join(text))
+ return text
diff --git a/CPAC/pipeline/nipype_pipeline_engine/utils.py b/CPAC/pipeline/nipype_pipeline_engine/utils.py
new file mode 100644
index 0000000000..d49be0c5ca
--- /dev/null
+++ b/CPAC/pipeline/nipype_pipeline_engine/utils.py
@@ -0,0 +1,27 @@
+# Copyright (C) 2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Custom nipype utilities"""
+
+
+def connect_from_spec(spec, original_spec, exclude=None):
+ """Function to connect all original inputs to a new spec"""
+ for _item, _value in original_spec.items():
+ if isinstance(exclude, (list, tuple)):
+ if _item not in exclude:
+ setattr(spec.inputs, _item, _value)
+ elif _item != exclude:
+ setattr(spec.inputs, _item, _value)
diff --git a/CPAC/pipeline/random_state/__init__.py b/CPAC/pipeline/random_state/__init__.py
index 5956f33416..b590912417 100644
--- a/CPAC/pipeline/random_state/__init__.py
+++ b/CPAC/pipeline/random_state/__init__.py
@@ -1,6 +1,6 @@
'''Random state for C-PAC'''
-from .seed import random_seed, random_seed_flags, set_up_random_state, \
- set_up_random_state_logger
+from .seed import MAX_SEED, random_seed, random_seed_flags, \
+ set_up_random_state, set_up_random_state_logger
-__all__ = ['random_seed', 'random_seed_flags', 'set_up_random_state',
- 'set_up_random_state_logger']
+__all__ = ['MAX_SEED', 'random_seed', 'random_seed_flags',
+ 'set_up_random_state', 'set_up_random_state_logger']
diff --git a/CPAC/pipeline/random_state/seed.py b/CPAC/pipeline/random_state/seed.py
index 370a9427dd..7373f938db 100644
--- a/CPAC/pipeline/random_state/seed.py
+++ b/CPAC/pipeline/random_state/seed.py
@@ -1,5 +1,20 @@
-'''Functions to set, check, and log random seed'''
-import os
+# Copyright (C) 2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Functions to set, check, and log random seed"""
import random
from logging import getLogger
@@ -10,13 +25,29 @@
from nipype.interfaces.fsl.maths import MathsCommand
from nipype.interfaces.fsl.utils import ImageMaths
-from CPAC.registration.utils import hardcoded_reg
from CPAC.utils.interfaces.ants import AI
from CPAC.utils.monitoring.custom_logging import set_up_logger
+MAX_SEED = np.iinfo(np.int32).max
_seed = {'seed': None}
+def increment_seed(node: 'Node') -> 'Node':
+ """Increment the random seed for a given node
+
+ Parameters
+ ----------
+ node : Node
+
+ Returns
+ -------
+ node : Node
+ """
+ if isinstance(node.seed, int):
+ node.seed = seed_plus_1()
+ return node
+
+
def random_random_seed():
'''Returns a random postive integer up to 2147483647
@@ -30,10 +61,10 @@ def random_random_seed():
Examples
--------
- >>> 0 < random_random_seed() <= np.iinfo(np.int32).max
+ >>> 0 < random_random_seed() <= MAX_SEED
True
'''
- return random.randint(1, np.iinfo(np.int32).max)
+ return random.randint(1, MAX_SEED)
def random_seed():
@@ -47,7 +78,7 @@ def random_seed():
-------
seed : int or None
'''
- if _seed['seed'] == 'random':
+ if _seed['seed'] in ['random', None]:
_seed['seed'] = random_random_seed()
return _seed['seed']
@@ -92,6 +123,7 @@ def random_seed_flags():
... 'functions', 'interfaces']])
True
'''
+ from CPAC.registration.utils import hardcoded_reg
seed = random_seed()
if seed is None:
return {'functions': {}, 'interfaces': {}}
@@ -137,6 +169,24 @@ def _reusable_flags():
}
+def seed_plus_1(seed=None):
+ '''Increment seed, looping back to 1 at MAX_SEED
+
+ Parameters
+ ----------
+ seed : int, optional
+ Uses configured seed if not specified
+
+ Returns
+ -------
+ int
+ '''
+ seed = random_seed() if seed is None else int(seed)
+ if seed < MAX_SEED: # increment random seed
+ return seed + 1
+ return 1 # loop back to 1
+
+
def set_up_random_state(seed):
'''Set global random seed
@@ -160,22 +210,23 @@ def set_up_random_state(seed):
>>> set_up_random_state(0)
Traceback (most recent call last):
ValueError: Valid random seeds are positive integers up to 2147483647, "random", or None, not 0
- >>> set_up_random_state(None)
-
+ >>> 1 <= set_up_random_state(None) <= MAX_SEED
+ True
''' # noqa: E501 # pylint: disable=line-too-long
if seed is not None:
if seed == 'random':
seed = random_random_seed()
- if (seed != 'random' and not (
- isinstance(seed, int) and
- (0 < int(seed) <= np.iinfo(np.int32).max)
- )):
- raise ValueError('Valid random seeds are positive integers up to '
- f'2147483647, "random", or None, not {seed}')
- try:
- _seed['seed'] = int(seed)
- except (TypeError, ValueError):
- _seed['seed'] = seed
+ else:
+ try:
+ seed = int(seed)
+ assert 0 < seed <= MAX_SEED
+ except (ValueError, TypeError, AssertionError) as error:
+ raise ValueError(
+ 'Valid random seeds are positive integers up '
+ f'to {MAX_SEED}, "random", or None, not {seed}'
+ ) from error
+
+ _seed['seed'] = seed
return random_seed()
@@ -186,5 +237,6 @@ def set_up_random_state_logger(log_dir):
----------
log_dir : str
'''
- set_up_logger('random', level='info', log_dir=log_dir)
- getLogger('random').info('seed: %s', random_seed())
+ set_up_logger('random', filename='random.tsv', level='info',
+ log_dir=log_dir)
+ getLogger('random').info('seed\tnode')
diff --git a/CPAC/pipeline/schema.py b/CPAC/pipeline/schema.py
index 6aa1898f96..e19ffaa094 100644
--- a/CPAC/pipeline/schema.py
+++ b/CPAC/pipeline/schema.py
@@ -1,31 +1,31 @@
-'''Validation schema for C-PAC pipeline configurations
+# Copyright (C) 2022 C-PAC Developers
-Copyright (C) 2022 C-PAC Developers
+# This file is part of C-PAC.
-This file is part of C-PAC.
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
-C-PAC is free software: you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation, either version 3 of the License, or (at your
-option) any later version.
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
-C-PAC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
-License for more details.
-
-You should have received a copy of the GNU Lesser General Public
-License along with C-PAC. If not, see .'''
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Validation schema for C-PAC pipeline configurations"""
# pylint: disable=too-many-lines
+import re
from itertools import chain, permutations
-import numpy as np
-from voluptuous import All, ALLOW_EXTRA, Any, Capitalize, Coerce, \
+from pathvalidate import sanitize_filename
+from voluptuous import All, ALLOW_EXTRA, Any, Capitalize, Coerce, Equal, \
ExactSequence, ExclusiveInvalid, In, Length, Lower, \
Match, Maybe, Optional, Range, Required, Schema
from CPAC import docs_prefix
+from CPAC.pipeline.random_state.seed import MAX_SEED
from CPAC.utils.datatypes import ListFromItem
-from CPAC.utils.utils import delete_nested_value, lookup_nested_value, \
- set_nested_value
+from CPAC.utils.utils import YAML_BOOLS
# 1 or more digits, optional decimal, 'e', optional '-', 1 or more digits
scientific_notation_str_regex = r'^([0-9]+(\.[0-9]*)*(e)-{0,1}[0-9]+)*$'
@@ -38,6 +38,30 @@
r'(x[0-9]+(\.[0-9]*){0,1}[a-z]*)*$'
Number = Any(float, int, All(str, Match(scientific_notation_str_regex)))
+
+
+def str_to_bool1_1(x): # pylint: disable=invalid-name
+ '''Convert strings to Booleans for YAML1.1 syntax
+ Ref https://yaml.org/type/bool.html
+ Parameters
+ ----------
+ x : any
+ Returns
+ -------
+ bool
+ '''
+ if isinstance(x, str):
+ try:
+ x = float(x)
+ if x == 0:
+ return False
+ except ValueError:
+ pass
+ x = (True if str(x).lower() in YAML_BOOLS[True] else
+ False if str(x).lower() in YAML_BOOLS[False] else x)
+ return bool(x)
+
+
forkable = All(Coerce(ListFromItem), [bool], Length(max=2))
valid_options = {
'acpc': {
@@ -202,170 +226,15 @@ def permutation_message(key, options):
'''
-def _combine_labels(config_dict, list_to_combine, new_key):
- '''
- Helper function to combine formerly separate keys into a
- combined key.
-
- Parameters
- ----------
- config_dict: dict
-
- key_sequence: iterable of lists or tuples
-
- new_key: list or tuple
-
- Returns
- -------
- updated_config_dict: dict
- '''
- new_value = []
- any_old_values = False
- for _to_combine in list_to_combine:
- try:
- old_value = lookup_nested_value(config_dict, _to_combine)
- except KeyError:
- old_value = None
- if old_value is not None:
- any_old_values = True
- if isinstance(old_value, (list, set, tuple)):
- for value in old_value:
- new_value.append(value)
- else:
- new_value.append(old_value)
- config_dict = delete_nested_value(config_dict, _to_combine)
- if any_old_values:
- return set_nested_value(config_dict, new_key, new_value)
- return config_dict
-
-
-def _now_runswitch(config_dict, key_sequence):
- '''
- Helper function to convert a formerly forkable value to a
- runswitch.
-
- Parameters
- ----------
- config_dict: dict
-
- key_sequence: list or tuple
-
- Returns
- -------
- updated_config_dict: dict
- '''
- try:
- old_forkable = lookup_nested_value(config_dict, key_sequence)
- except KeyError:
- return config_dict
- if isinstance(old_forkable, bool) or isinstance(old_forkable, list):
- return set_nested_value(
- config_dict, key_sequence, {'run': old_forkable})
- return config_dict
-
-
-def _changes_1_8_0_to_1_8_1(config_dict):
- '''
- Examples
- --------
- Starting with 1.8.0
- >>> zero = {'anatomical_preproc': {
- ... 'non_local_means_filtering': True,
- ... 'n4_bias_field_correction': True
- ... }, 'functional_preproc': {
- ... 'motion_estimates_and_correction': {
- ... 'calculate_motion_first': False
- ... }
- ... }, 'segmentation': {
- ... 'tissue_segmentation': {
- ... 'ANTs_Prior_Based': {
- ... 'CSF_label': 0,
- ... 'left_GM_label': 1,
- ... 'right_GM_label': 2,
- ... 'left_WM_label': 3,
- ... 'right_WM_label': 4}}}}
- >>> updated_apb = _changes_1_8_0_to_1_8_1(zero)[
- ... 'segmentation']['tissue_segmentation']['ANTs_Prior_Based']
- >>> updated_apb['CSF_label']
- [0]
- >>> updated_apb['GM_label']
- [1, 2]
- >>> updated_apb['WM_label']
- [3, 4]
-
- Starting with 1.8.1
- >>> one = {'anatomical_preproc': {
- ... 'non_local_means_filtering': True,
- ... 'n4_bias_field_correction': True
- ... }, 'functional_preproc': {
- ... 'motion_estimates_and_correction': {
- ... 'calculate_motion_first': False
- ... }
- ... }, 'segmentation': {
- ... 'tissue_segmentation': {
- ... 'ANTs_Prior_Based': {
- ... 'CSF_label': [0],
- ... 'GM_label': [1, 2],
- ... 'WM_label': [3, 4]}}}}
- >>> updated_apb = _changes_1_8_0_to_1_8_1(one)[
- ... 'segmentation']['tissue_segmentation']['ANTs_Prior_Based']
- >>> updated_apb['CSF_label']
- [0]
- >>> updated_apb['GM_label']
- [1, 2]
- >>> updated_apb['WM_label']
- [3, 4]
- '''
- for key_sequence in {
- ('anatomical_preproc', 'non_local_means_filtering'),
- ('anatomical_preproc', 'n4_bias_field_correction')
- }:
- config_dict = _now_runswitch(config_dict, key_sequence)
- for combiners in {
- ((
- ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'CSF_label'),
- ), ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'CSF_label')),
- ((
- ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'left_GM_label'),
- ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'right_GM_label')
- ), ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'GM_label')),
- ((
- ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'left_WM_label'),
- ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'right_WM_label')
- ), ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
- 'WM_label'))
- }:
- config_dict = _combine_labels(config_dict, *combiners)
- try:
- calculate_motion_first = lookup_nested_value(
- config_dict,
- ['functional_preproc', 'motion_estimates_and_correction',
- 'calculate_motion_first']
- )
- except KeyError:
- calculate_motion_first = None
- if calculate_motion_first is not None:
- del config_dict['functional_preproc'][
- 'motion_estimates_and_correction']['calculate_motion_first']
- config_dict = set_nested_value(config_dict, [
- 'functional_preproc', 'motion_estimates_and_correction',
- 'motion_estimates', 'calculate_motion_first'
- ], calculate_motion_first)
-
- return config_dict
+def sanitize(filename):
+ '''Sanitize a filename and replace whitespaces with underscores'''
+ return re.sub(r'\s+', '_', sanitize_filename(filename))
latest_schema = Schema({
'FROM': Maybe(str),
'pipeline_setup': {
- 'pipeline_name': All(str, Length(min=1)),
+ 'pipeline_name': All(str, Length(min=1), sanitize),
'output_directory': {
'path': str,
'source_outputs_dir': Maybe(str),
@@ -385,11 +254,22 @@ def _changes_1_8_0_to_1_8_1(config_dict):
'log_directory': {
'run_logging': bool,
'path': str,
+ 'graphviz': {
+ 'entire_workflow': {
+ 'generate': bool,
+ 'graph2use': Maybe(All(Coerce(ListFromItem),
+ [All(Lower,
+ In(('orig', 'hierarchical', 'flat',
+ 'exec', 'colored')))])),
+ 'format': Maybe(All(Coerce(ListFromItem),
+ [All(Lower, In(('png', 'svg')))])),
+ 'simple_form': Maybe(bool)}}
},
'crash_log_directory': {
'path': Maybe(str),
},
'system_config': {
+ 'fail_fast': bool,
'FSLDIR': Maybe(str),
'on_grid': {
'run': bool,
@@ -407,7 +287,7 @@ def _changes_1_8_0_to_1_8_1(config_dict):
'num_participants_at_once': int,
'random_seed': Maybe(Any(
'random',
- All(int, Range(min=1, max=np.iinfo(np.int32).max)))),
+ All(int, Range(min=1, max=MAX_SEED)))),
'observed_usage': {
'callback_log': Maybe(str),
'buffer': Number,
@@ -592,6 +472,9 @@ def _changes_1_8_0_to_1_8_1(config_dict):
},
},
'registration_workflows': {
+ 'quality_thresholds': {
+ metric: Maybe(float) for
+ metric in ('Dice', 'Jaccard', 'CrossCorr', 'Coverage')},
'anatomical_registration': {
'run': bool,
'resolution_for_anat': All(str, Match(resolution_regex)),
@@ -634,7 +517,6 @@ def _changes_1_8_0_to_1_8_1(config_dict):
'interpolation': In({'trilinear', 'sinc', 'spline'}),
'using': str,
'input': str,
- 'interpolation': str,
'cost': str,
'dof': int,
'arguments': Maybe(str),
@@ -652,11 +534,14 @@ def _changes_1_8_0_to_1_8_1(config_dict):
},
},
'boundary_based_registration': {
- 'run': forkable,
+ 'run': All(Coerce(ListFromItem),
+ [Any(bool, All(Lower, Equal('fallback')))],
+ Length(max=3)),
'bbr_schedule': str,
- 'bbr_wm_map': In({'probability_map', 'partial_volume_map'}),
+ 'bbr_wm_map': In(('probability_map',
+ 'partial_volume_map')),
'bbr_wm_mask_args': str,
- 'reference': In({'whole-head', 'brain'})
+ 'reference': In(('whole-head', 'brain'))
},
},
'EPI_registration': {
@@ -1104,6 +989,7 @@ def schema(config_dict):
-------
dict
'''
+ from CPAC.utils.utils import _changes_1_8_0_to_1_8_1
partially_validated = latest_schema(_changes_1_8_0_to_1_8_1(config_dict))
try:
if (partially_validated['registration_workflows'][
diff --git a/CPAC/pipeline/test/test_schema_validation.py b/CPAC/pipeline/test/test_schema_validation.py
index 97f3946313..092f51c6b7 100644
--- a/CPAC/pipeline/test/test_schema_validation.py
+++ b/CPAC/pipeline/test/test_schema_validation.py
@@ -1,8 +1,7 @@
'''Tests for schema.py'''
import pytest
-
-from CPAC.utils.configuration import Configuration
from voluptuous.error import Invalid
+from CPAC.utils.configuration import Configuration
@pytest.mark.parametrize('run_value', [
@@ -26,3 +25,9 @@ def test_motion_estimates_and_correction(run_value):
assert "func#motion_estimate_filter_valid_options" in str(e.value)
else:
Configuration(d)
+
+
+def test_pipeline_name():
+ '''Test that pipeline_name sucessfully sanitizes'''
+ c = Configuration({'pipeline_setup': {'pipeline_name': ':va:lid name'}})
+ assert c['pipeline_setup', 'pipeline_name'] == 'valid_name'
diff --git a/CPAC/qc/__init__.py b/CPAC/qc/__init__.py
index 75ee654fec..810d06aedc 100644
--- a/CPAC/qc/__init__.py
+++ b/CPAC/qc/__init__.py
@@ -1,2 +1,22 @@
-from .utils import *
-from .qc import *
+# Copyright (C) 2013-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Quality control utilities for C-PAC"""
+from CPAC.qc.globals import registration_guardrail_thresholds, \
+ update_thresholds
+from CPAC.qc.qcmetrics import qc_masks
+__all__ = ['qc_masks', 'registration_guardrail_thresholds',
+ 'update_thresholds']
diff --git a/CPAC/qc/globals.py b/CPAC/qc/globals.py
new file mode 100644
index 0000000000..e4a05d8d9d
--- /dev/null
+++ b/CPAC/qc/globals.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Global QC values"""
+_REGISTRATION_GUARDRAIL_THRESHOLDS = {'thresholds': {}}
+
+
+def registration_guardrail_thresholds() -> dict:
+ """Get registration guardrail thresholds
+
+ Returns
+ -------
+ dict
+ """
+ return _REGISTRATION_GUARDRAIL_THRESHOLDS['thresholds']
+
+
+def update_thresholds(thresholds) -> None:
+ """Set a registration guardrail threshold
+
+ Parameters
+ ----------
+ thresholds : dict of {str: float or int}
+
+ Returns
+ -------
+ None
+ """
+ _REGISTRATION_GUARDRAIL_THRESHOLDS['thresholds'].update(thresholds)
diff --git a/CPAC/qc/qcmetrics.py b/CPAC/qc/qcmetrics.py
index 6db977c495..b45430020c 100644
--- a/CPAC/qc/qcmetrics.py
+++ b/CPAC/qc/qcmetrics.py
@@ -1,24 +1,88 @@
+# Modifications: Copyright (C) 2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+
+# Original code: BSD 3-Clause License
+
+# Copyright (c) 2020, Lifespan Informatics and Neuroimaging Center
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# 3. Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
"""QC metrics from XCP-D v0.0.9
Ref: https://github.com/PennLINC/xcp_d/tree/0.0.9
"""
+# LGPL-3.0-or-later: Module docstring and lint exclusions
# pylint: disable=invalid-name, redefined-outer-name
+# BSD-3-Clause: imports and unspecified sections
import nibabel as nb
import numpy as np
-def regisQ(bold2t1w_mask, t1w_mask, bold2template_mask, template_mask):
- reg_qc = {'coregDice': [dc(bold2t1w_mask, t1w_mask)],
- 'coregJaccard': [jc(bold2t1w_mask, t1w_mask)],
- 'coregCrossCorr': [crosscorr(bold2t1w_mask, t1w_mask)],
- 'coregCoverage': [coverage(bold2t1w_mask, t1w_mask)],
- 'normDice': [dc(bold2template_mask, template_mask)],
- 'normJaccard': [jc(bold2template_mask, template_mask)],
- 'normCrossCorr': [crosscorr(bold2template_mask, template_mask)],
- 'normCoverage': [coverage(bold2template_mask, template_mask)]}
- return reg_qc
+# BSD-3-Clause
+def coverage(input1, input2):
+ """Estimate the coverage between two masks."""
+ input1 = nb.load(input1).get_fdata()
+ input2 = nb.load(input2).get_fdata()
+ input1 = np.atleast_1d(input1.astype(np.bool))
+ input2 = np.atleast_1d(input2.astype(np.bool))
+ intsec = np.count_nonzero(input1 & input2)
+ if np.sum(input1) > np.sum(input2):
+ smallv = np.sum(input2)
+ else:
+ smallv = np.sum(input1)
+ cov = float(intsec)/float(smallv)
+ return cov
+
+
+# BSD-3-Clause
+def crosscorr(input1, input2):
+ r"""cross correlation: compute cross correction bewteen input masks"""
+ input1 = nb.load(input1).get_fdata()
+ input2 = nb.load(input2).get_fdata()
+ input1 = np.atleast_1d(input1.astype(np.bool)).flatten()
+ input2 = np.atleast_1d(input2.astype(np.bool)).flatten()
+ cc = np.corrcoef(input1, input2)[0][1]
+ return cc
+# BSD-3-Clause
def dc(input1, input2):
r"""
Dice coefficient
@@ -71,6 +135,7 @@ def dc(input1, input2):
return dc
+# BSD-3-Clause
def jc(input1, input2):
r"""
Jaccard coefficient
@@ -106,26 +171,62 @@ def jc(input1, input2):
return jc
-def crosscorr(input1, input2):
- r"""cross correlation: compute cross correction bewteen input masks"""
- input1 = nb.load(input1).get_fdata()
- input2 = nb.load(input2).get_fdata()
- input1 = np.atleast_1d(input1.astype(np.bool)).flatten()
- input2 = np.atleast_1d(input2.astype(np.bool)).flatten()
- cc = np.corrcoef(input1, input2)[0][1]
- return cc
+# LGPL-3.0-or-later
+def _prefix_regqc_keys(qc_dict: dict, prefix: str) -> str:
+ """Prepend string to each key in a qc dict
+ Parameters
+ ----------
+ qc_dict : dict
+ output of ``qc_masks``
-def coverage(input1, input2):
- """Estimate the coverage between two masks."""
- input1 = nb.load(input1).get_fdata()
- input2 = nb.load(input2).get_fdata()
- input1 = np.atleast_1d(input1.astype(np.bool))
- input2 = np.atleast_1d(input2.astype(np.bool))
- intsec = np.count_nonzero(input1 & input2)
- if np.sum(input1) > np.sum(input2):
- smallv = np.sum(input2)
- else:
- smallv = np.sum(input1)
- cov = float(intsec)/float(smallv)
- return cov
+ prefix : str
+ string to prepend
+
+ Returns
+ -------
+ dict
+ """
+ return {f'{prefix}{_key}': _value for _key, _value in qc_dict.items()}
+
+
+# BSD-3-Clause: logic
+# LGPL-3.0-or-later: docstring and refactored function
+def qc_masks(registered_mask: str, native_mask: str) -> dict:
+ """Return QC measures for coregistration
+
+ Parameters
+ ----------
+ registered_mask : str
+ path to registered mask
+
+ native_mask : str
+ path to native-space mask
+
+ Returns
+ -------
+ dict
+ """
+ return {'Dice': [dc(registered_mask, native_mask)],
+ 'Jaccard': [jc(registered_mask, native_mask)],
+ 'CrossCorr': [crosscorr(registered_mask, native_mask)],
+ 'Coverage': [coverage(registered_mask, native_mask)]}
+
+
+# BSD-3-Clause: name and signature
+# LGPL-3.0-or-later: docstring and refactored function
+def regisQ(bold2t1w_mask: str, t1w_mask: str, bold2template_mask: str,
+ template_mask: str) -> dict:
+ """Collect coregistration QC measures
+
+ Parameters
+ ----------
+ bold2t1w_mask, t1w_mask, bold2template_mask, template_mask : str
+
+ Returns
+ -------
+ dict
+ """
+ return {**_prefix_regqc_keys(qc_masks(bold2t1w_mask, t1w_mask), 'coreg'),
+ **_prefix_regqc_keys(qc_masks(bold2template_mask, template_mask),
+ 'norm')}
diff --git a/CPAC/qc/tests/test_qc.py b/CPAC/qc/tests/test_qc.py
index 9407da8004..0abf56132d 100644
--- a/CPAC/qc/tests/test_qc.py
+++ b/CPAC/qc/tests/test_qc.py
@@ -1,14 +1,13 @@
import os
import pytest
-
+from nipype.interfaces import utility as util
from CPAC.pipeline import nipype_pipeline_engine as pe
-import nipype.interfaces.utility as util
-
+from CPAC.pipeline.cpac_group_runner import gather_outputs
from CPAC.qc.pipeline import create_qc_workflow
from CPAC.qc.utils import generate_qc_pages
-from CPAC.utils import Configuration, Strategy, Outputs
-
-from CPAC.pipeline.cpac_group_runner import gather_outputs
+from CPAC.utils.configuration import Configuration
+from CPAC.utils.outputs import Outputs
+from CPAC.utils.strategy import Strategy
def file_node(path):
diff --git a/CPAC/qc/xcp.py b/CPAC/qc/xcp.py
index fbb672c9f5..05429fcce8 100644
--- a/CPAC/qc/xcp.py
+++ b/CPAC/qc/xcp.py
@@ -1,3 +1,19 @@
+# Copyright (C) 2021-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
"""
.. seealso::
diff --git a/CPAC/randomise/randomise.py b/CPAC/randomise/randomise.py
index 77ee89e7c4..188d0fd436 100644
--- a/CPAC/randomise/randomise.py
+++ b/CPAC/randomise/randomise.py
@@ -1,6 +1,5 @@
from CPAC.pipeline import nipype_pipeline_engine as pe
-from CPAC.pipeline.cpac_group_runner import load_config_yml
def select(input_list):
@@ -122,6 +121,7 @@ def run(group_config_path):
import sys
import pickle
import yaml
+ from CPAC.pipeline.cpac_group_runner import load_config_yml
group_config_obj = load_config_yml(group_config_path)
pipeline_output_folder = group_config_obj.pipeline_dir
diff --git a/CPAC/registration/__init__.py b/CPAC/registration/__init__.py
index 2d5b934e1c..38bcac9bcf 100644
--- a/CPAC/registration/__init__.py
+++ b/CPAC/registration/__init__.py
@@ -1,17 +1,3 @@
-from .registration import create_fsl_flirt_linear_reg, \
- create_fsl_fnirt_nonlinear_reg, \
- create_fsl_fnirt_nonlinear_reg_nhp, \
- create_register_func_to_anat, \
- create_register_func_to_anat_use_T2, \
- create_bbregister_func_to_anat, \
- create_wf_calculate_ants_warp
-
from .output_func_to_standard import output_func_to_standard
-__all__ = ['create_fsl_flirt_linear_reg', \
- 'create_fsl_fnirt_nonlinear_reg', \
- 'create_fsl_fnirt_nonlinear_reg_nhp', \
- 'create_register_func_to_anat', \
- 'create_register_func_to_anat_use_T2', \
- 'create_bbregister_func_to_anat', \
- 'create_wf_calculate_ants_warp']
\ No newline at end of file
+__all__ = ['output_func_to_standard']
diff --git a/CPAC/registration/exceptions.py b/CPAC/registration/exceptions.py
new file mode 100644
index 0000000000..d962ddfa30
--- /dev/null
+++ b/CPAC/registration/exceptions.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Custom registration exceptions"""
+
+
+class BadRegistrationError(ValueError):
+ """Exception for when a QC measure for a registration falls below a
+ specified threshold"""
+ def __init__(self, *args, metric=None, value=None, threshold=None,
+ **kwargs):
+ """
+ Parameters
+ ----------
+ metric : str
+ QC metric
+
+ value : float
+ calculated QC value
+
+ threshold : float
+ specified threshold
+ """
+ msg = "Registration failed quality control"
+ if all(arg is not None for arg in (metric, value, threshold)):
+ msg += f" ({metric}: {value} < {threshold})"
+ msg += "."
+ super().__init__(msg, *args, **kwargs)
diff --git a/CPAC/registration/guardrails.py b/CPAC/registration/guardrails.py
new file mode 100644
index 0000000000..1ba3db6d91
--- /dev/null
+++ b/CPAC/registration/guardrails.py
@@ -0,0 +1,208 @@
+# Copyright (C) 2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Guardrails to protect against bad registrations"""
+import logging
+from typing import Tuple
+from nipype.interfaces.utility import Function, Merge, Select
+# pylint: disable=unused-import
+from CPAC.pipeline.nipype_pipeline_engine import Node, Workflow
+from CPAC.pipeline.random_state.seed import increment_seed
+from CPAC.qc import qc_masks, registration_guardrail_thresholds
+from CPAC.registration.exceptions import BadRegistrationError
+from CPAC.registration.utils import hardcoded_reg
+from CPAC.utils.docs import retry_docstring
+
+
+def guardrail_selection(wf: 'Workflow', node1: 'Node', node2: 'Node',
+ output_key: str = 'registered',
+ guardrail_node: 'Node' = None) -> Node:
+ """Generate requisite Nodes for choosing a path through the graph
+ with retries.
+
+ Takes two nodes to choose an output from. These nodes are assumed
+ to be guardrail nodes if `output_key` and `guardrail_node` are not
+ specified.
+
+ A ``nipype.interfaces.utility.Merge`` is generated, connecting
+ ``output_key`` from ``node1`` and ``node2`` in that order.
+
+ A ``nipype.interfaces.utility.Select`` node is generated taking the
+ output from the generated ``Merge`` and using the ``failed_qc``
+ output of ``guardrail_node`` (``node1`` if ``guardrail_node`` is
+ unspecified).
+
+ All relevant connections are made in the given Workflow.
+
+ The ``Select`` node is returned; its output is keyed ``out`` and
+ contains the value of the given ``output_key`` (``registered`` if
+ unspecified).
+
+ Parameters
+ ----------
+ wf : Workflow
+
+ node1, node2 : Node
+ first try, retry
+
+ output_key : str
+ field to choose
+
+ guardrail_node : Node
+ guardrail to collect 'failed_qc' from if not node1
+
+ Returns
+ -------
+ select : Node
+ """
+ # pylint: disable=redefined-outer-name,reimported,unused-import
+ # noqa: F401
+ from CPAC.pipeline.nipype_pipeline_engine import Node, Workflow
+ if guardrail_node is None:
+ guardrail_node = node1
+ name = node1.name
+ if output_key != 'registered':
+ name = f'{name}_{output_key}'
+ choices = Node(Merge(2), run_without_submitting=True,
+ name=f'{name}_choices')
+ select = Node(Select(), run_without_submitting=True,
+ name=f'choose_{name}')
+ wf.connect([(node1, choices, [(output_key, 'in1')]),
+ (node2, choices, [(output_key, 'in2')]),
+ (choices, select, [('out', 'inlist')]),
+ (guardrail_node, select, [('failed_qc', 'index')])])
+ return select
+
+
+def registration_guardrail(registered: str, reference: str,
+ retry: bool = False, retry_num: int = 0
+ ) -> Tuple[str, int]:
+ """Check QC metrics post-registration and throw an exception if
+ metrics are below given thresholds.
+
+ If inputs point to images that are not masks, images will be
+ binarized before being compared.
+
+ .. seealso::
+
+ :py:mod:`CPAC.qc.qcmetrics`
+ Documentation of the :py:mod:`CPAC.qc.qcmetrics` module.
+
+ Parameters
+ ----------
+ registered, reference : str
+ path to mask
+
+ retry : bool, optional
+ can retry?
+
+ retry_num : int, optional
+ how many previous tries?
+
+ Returns
+ -------
+ registered_mask : str
+ path to mask
+
+ failed_qc : int
+ metrics met specified thresholds?, used as index for selecting
+ outputs
+ .. seealso::
+
+ :py:mod:`guardrail_selection`
+ """
+ logger = logging.getLogger('nipype.workflow')
+ qc_metrics = qc_masks(registered, reference)
+ failed_qc = 0
+ for metric, threshold in registration_guardrail_thresholds().items():
+ if threshold is not None:
+ value = qc_metrics.get(metric)
+ if isinstance(value, list):
+ value = value[0]
+ if value < threshold:
+ failed_qc = 1
+ with open(f'{registered}.failed_qc', 'w',
+ encoding='utf-8') as _f:
+ _f.write(f'{metric}: {value} < {threshold}')
+ if retry:
+ registered = f'{registered}-failed'
+ else:
+ bad_registration = BadRegistrationError(
+ metric=metric, value=value, threshold=threshold)
+ logger.error(str(bad_registration))
+ if retry_num:
+ # if we've already retried, raise the error
+ raise bad_registration
+ return registered, failed_qc
+
+
+def registration_guardrail_node(name=None, retry_num=0):
+ """Convenience method to get a new registration_guardrail Node
+
+ Parameters
+ ----------
+ name : str, optional
+
+ retry_num : int, optional
+ how many previous tries?
+
+ Returns
+ -------
+ Node
+ """
+ if name is None:
+ name = 'registration_guardrail'
+ node = Node(Function(input_names=['registered', 'reference', 'retry_num'],
+ output_names=['registered', 'failed_qc'],
+ imports=['import logging',
+ 'from typing import Tuple',
+ 'from CPAC.qc import qc_masks, '
+ 'registration_guardrail_thresholds',
+ 'from CPAC.registration.guardrails '
+ 'import BadRegistrationError'],
+ function=registration_guardrail), name=name)
+ if retry_num:
+ node.inputs.retry_num = retry_num
+ return node
+
+
+def retry_clone(node: 'Node') -> 'Node':
+ """Function to clone a node, name the clone, and increment its
+ random seed
+
+ Parameters
+ ----------
+ node : Node
+
+ Returns
+ -------
+ Node
+ """
+ return increment_seed(node.clone(f'retry_{node.name}'))
+
+
+# pylint: disable=missing-function-docstring,too-many-arguments
+@retry_docstring(hardcoded_reg)
+def retry_hardcoded_reg(moving_brain, reference_brain, moving_skull,
+ reference_skull, ants_para, moving_mask=None,
+ reference_mask=None, fixed_image_mask=None,
+ interp=None, reg_with_skull=0, previous_failure=False):
+ if not previous_failure:
+ return [], None
+ return hardcoded_reg(moving_brain, reference_brain, moving_skull,
+ reference_skull, ants_para, moving_mask,
+ reference_mask, fixed_image_mask, interp,
+ reg_with_skull)
diff --git a/CPAC/registration/registration.py b/CPAC/registration/registration.py
index 7902673a6d..8df41a6b20 100644
--- a/CPAC/registration/registration.py
+++ b/CPAC/registration/registration.py
@@ -1,28 +1,30 @@
-"""Copyright (C) 2012-2022 C-PAC Developers
+# Copyright (C) 2012-2022 C-PAC Developers
-This file is part of C-PAC.
+# This file is part of C-PAC.
-C-PAC is free software: you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation, either version 3 of the License, or (at your
-option) any later version.
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
-C-PAC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
-License for more details.
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
-You should have received a copy of the GNU Lesser General Public
-License along with C-PAC. If not, see ."""
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+"""Registration functions"""
# pylint: disable=too-many-lines,ungrouped-imports,wrong-import-order
-# TODO: replace Tuple with tuple, Union with |, once Python >= 3.9, 3.10
from typing import Optional, Tuple, Union
from CPAC.pipeline import nipype_pipeline_engine as pe
from nipype.interfaces import afni, ants, c3, fsl, utility as util
from nipype.interfaces.afni import utils as afni_utils
-
from CPAC.anat_preproc.lesion_preproc import create_lesion_preproc
from CPAC.func_preproc.utils import chunk_ts, split_ts_chunks
+from CPAC.pipeline.random_state.seed import increment_seed
+from CPAC.registration.guardrails import guardrail_selection, \
+ registration_guardrail_node
from CPAC.registration.utils import seperate_warps_list, \
check_transforms, \
generate_inverse_transform_flags, \
@@ -55,7 +57,7 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
name='inputspec')
outputNode = pe.Node(
- util.IdentityInterface(fields=['output_image']),
+ util.IdentityInterface(fields=['output_image', 'failed_qc']),
name='outputspec')
if int(num_cpus) > 1 and time_series:
@@ -94,7 +96,7 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
'reg_tool'],
output_names=['interpolation'],
function=interpolation_string),
- name=f'interp_string',
+ name='interp_string',
mem_gb=2.5)
interp_string.inputs.reg_tool = reg_tool
@@ -106,7 +108,7 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
pe.Node(util.Function(input_names=['transform'],
output_names=['transform_list'],
function=single_ants_xfm_to_list),
- name=f'single_ants_xfm_to_list',
+ name='single_ants_xfm_to_list',
mem_gb=2.5)
wf.connect(inputNode, 'transform', ants_xfm_list, 'transform')
@@ -120,13 +122,13 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
chunk = pe.Node(util.Function(input_names=['func_file',
'n_chunks',
'chunk_size'],
- output_names=['TR_ranges'],
- function=chunk_ts,
- imports=chunk_imports),
+ output_names=['TR_ranges'],
+ function=chunk_ts,
+ imports=chunk_imports),
name=f'chunk_{wf_name}',
mem_gb=2.5)
- #chunk.inputs.n_chunks = int(num_cpus)
+ # chunk.inputs.n_chunks = int(num_cpus)
# 10-TR sized chunks
chunk.inputs.chunk_size = 10
@@ -136,9 +138,9 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
split_imports = ['import os', 'import subprocess']
split = pe.Node(util.Function(input_names=['func_file',
'tr_ranges'],
- output_names=['split_funcs'],
- function=split_ts_chunks,
- imports=split_imports),
+ output_names=['split_funcs'],
+ function=split_ts_chunks,
+ imports=split_imports),
name=f'split_{wf_name}',
mem_gb=2.5)
@@ -153,7 +155,6 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
func_concat.inputs.outputtype = 'NIFTI_GZ'
wf.connect(apply_warp, 'output_image', func_concat, 'in_files')
-
wf.connect(func_concat, 'out_file', outputNode, 'output_image')
else:
@@ -205,7 +206,7 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
name=f'chunk_{wf_name}',
mem_gb=2.5)
- #chunk.inputs.n_chunks = int(num_cpus)
+ # chunk.inputs.n_chunks = int(num_cpus)
# 10-TR sized chunks
chunk.inputs.chunk_size = 10
@@ -215,9 +216,9 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
split_imports = ['import os', 'import subprocess']
split = pe.Node(util.Function(input_names=['func_file',
'tr_ranges'],
- output_names=['split_funcs'],
- function=split_ts_chunks,
- imports=split_imports),
+ output_names=['split_funcs'],
+ function=split_ts_chunks,
+ imports=split_imports),
name=f'split_{wf_name}',
mem_gb=2.5)
@@ -231,7 +232,6 @@ def apply_transform(wf_name, reg_tool, time_series=False, multi_input=False,
func_concat.inputs.outputtype = 'NIFTI_GZ'
wf.connect(apply_warp, 'out_file', func_concat, 'in_files')
-
wf.connect(func_concat, 'out_file', outputNode, 'output_image')
else:
@@ -246,6 +246,7 @@ def transform_derivative(wf_name, label, reg_tool, num_cpus, num_ants_cores,
'''Transform output derivatives to template space.
This function is designed for use with the NodeBlock connection engine.
+ This function is not guardrailed.
'''
wf = pe.Workflow(name=wf_name)
@@ -278,10 +279,13 @@ def transform_derivative(wf_name, label, reg_tool, num_cpus, num_ants_cores,
wf.connect(inputnode, 'reference', apply_xfm, 'inputspec.reference')
wf.connect(inputnode, 'transform', apply_xfm, 'inputspec.transform')
- outputnode = pe.Node(util.IdentityInterface(fields=['out_file']),
+ outputnode = pe.Node(util.IdentityInterface(fields=['out_file',
+ 'failed_qc']),
name='outputspec')
- wf.connect(apply_xfm, 'outputspec.output_image', outputnode, 'out_file')
+ wf.connect([
+ (apply_xfm, outputnode, [('outputspec.output_image', 'out_file'),
+ ('outputspec.failed_qc', 'failed_qc')])])
return wf
@@ -322,31 +326,28 @@ def create_fsl_flirt_linear_reg(name='fsl_flirt_linear_reg'):
linear_reg = pe.Node(interface=fsl.FLIRT(), name='linear_reg_0')
linear_reg.inputs.cost = 'corratio'
+ nodes, guardrails = linear_register.nodes_and_guardrails(
+ linear_reg, registered='out_file')
+ linear_register.connect_retries(
+ guardrails, [(inputspec, 'reference_brain', 'reference')])
inv_flirt_xfm = pe.Node(interface=fsl.utils.ConvertXFM(),
name='inv_linear_reg0_xfm')
inv_flirt_xfm.inputs.invert_xfm = True
- linear_register.connect(inputspec, 'input_brain',
- linear_reg, 'in_file')
-
- linear_register.connect(inputspec, 'reference_brain',
- linear_reg, 'reference')
-
- linear_register.connect(inputspec, 'interp',
- linear_reg, 'interp')
-
- linear_register.connect(linear_reg, 'out_file',
- outputspec, 'output_brain')
-
- linear_register.connect(linear_reg, 'out_matrix_file',
- inv_flirt_xfm, 'in_file')
-
+ linear_register.connect_retries(nodes, [
+ (inputspec, 'input_brain', 'in_file'),
+ (inputspec, 'reference_brain', 'reference'),
+ (inputspec, 'interp', 'interp')])
+ # pylint: disable=no-value-for-parameter
+ registered = guardrail_selection(linear_register, *guardrails)
+ linear_register.connect(registered, 'out', outputspec, 'output_brain')
+ matrix = guardrail_selection(linear_register, *nodes, 'out_matrix_file',
+ guardrails[0])
+ linear_register.connect(matrix, 'out', inv_flirt_xfm, 'in_file')
linear_register.connect(inv_flirt_xfm, 'out_file',
outputspec, 'invlinear_xfm')
-
- linear_register.connect(linear_reg, 'out_matrix_file',
- outputspec, 'linear_xfm')
+ linear_register.connect(matrix, 'out', outputspec, 'linear_xfm')
return linear_register
@@ -393,14 +394,24 @@ def create_fsl_fnirt_nonlinear_reg(name='fsl_fnirt_nonlinear_reg'):
transformation (affine only) from the space of the reference
file to the input file.
+ .. exec::
+ from CPAC.registration.registration import \
+ create_fsl_fnirt_nonlinear_reg
+ wf = create_fsl_fnirt_nonlinear_reg()
+ wf.write_graph(
+ graph2use='orig',
+ dotfilename='./images/generated/nonlinear_register.dot'
+ )
+
Workflow Graph:
- .. image:: ../images/nonlinear_register.dot.png
- :width: 500
+ .. image:: ../../images/generated/nonlinear_register.png
+ :width: 500
Detailed Workflow Graph:
- .. image:: ../images/nonlinear_register_detailed.dot.png
- :width: 500
+
+ .. image:: ../../images/generated/nonlinear_register_detailed.png
+ :width: 500
"""
nonlinear_register = pe.Workflow(name=name)
@@ -421,45 +432,30 @@ def create_fsl_fnirt_nonlinear_reg(name='fsl_fnirt_nonlinear_reg'):
nonlinear_reg = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_reg_1')
-
nonlinear_reg.inputs.fieldcoeff_file = True
nonlinear_reg.inputs.jacobian_file = True
-
- brain_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='brain_warp')
-
- nonlinear_register.connect(inputspec, 'input_skull',
- nonlinear_reg, 'in_file')
-
- nonlinear_register.connect(inputspec, 'reference_skull',
- nonlinear_reg, 'ref_file')
-
- nonlinear_register.connect(inputspec, 'interp',
- brain_warp, 'interp')
-
- nonlinear_register.connect(inputspec, 'ref_mask',
- nonlinear_reg, 'refmask_file')
-
- # FNIRT parameters are specified by FSL config file
- # ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified)
- nonlinear_register.connect(inputspec, 'fnirt_config',
- nonlinear_reg, 'config_file')
-
- nonlinear_register.connect(inputspec, 'linear_aff',
- nonlinear_reg, 'affine_file')
-
- nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
- outputspec, 'nonlinear_xfm')
-
- nonlinear_register.connect(inputspec, 'input_brain',
- brain_warp, 'in_file')
-
- nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
- brain_warp, 'field_file')
-
- nonlinear_register.connect(inputspec, 'reference_brain',
- brain_warp, 'ref_file')
-
+ nodes, guardrails = nonlinear_register.nodes_and_guardrails(
+ nonlinear_reg, registered='warped_file')
+ nonlinear_register.connect_retries(
+ guardrails, [(inputspec, 'reference_skull', 'reference')])
+ nonlinear_register.connect_retries(nodes, [
+ (inputspec, 'input_skull', 'in_file'),
+ (inputspec, 'reference_skull', 'ref_file'),
+ (inputspec, 'ref_mask', 'refmask_file'),
+ # FNIRT parameters are specified by FSL config file
+ # ${FSLDIR}/etc/flirtsch/TI_2_MNI152_2mm.cnf (or user-specified)
+ (inputspec, 'fnirt_config', 'config_file'),
+ (inputspec, 'linear_aff', 'affine_file')])
+
+ brain_warp = pe.Node(interface=fsl.ApplyWarp(), name='brain_warp')
+ nonlinear_register.connect([
+ (inputspec, brain_warp, [('interp', 'interp'),
+ ('input_brain', 'in_file'),
+ ('reference_brain', 'ref_file')])])
+ field_coeff = guardrail_selection(nonlinear_register, *nodes,
+ 'fieldcoeff_file', guardrails[0])
+ nonlinear_register.connect(field_coeff, 'out', outputspec, 'nonlinear_xfm')
+ nonlinear_register.connect(field_coeff, 'out', brain_warp, 'field_file')
nonlinear_register.connect(brain_warp, 'out_file',
outputspec, 'output_brain')
@@ -510,14 +506,24 @@ def create_fsl_fnirt_nonlinear_reg_nhp(name='fsl_fnirt_nonlinear_reg_nhp'):
transformation (affine only) from the space of the reference
file to the input file.
+ .. exec::
+ from CPAC.registration.registration import \
+ create_fsl_fnirt_nonlinear_reg_nhp
+ wf = create_fsl_fnirt_nonlinear_reg_nhp()
+ wf.write_graph(
+ graph2use='orig',
+ dotfilename='./images/generated/nonlinear_register_nhp.dot'
+ )
+
Workflow Graph:
- .. image:: ../images/nonlinear_register.dot.png
- :width: 500
+ .. image:: ../../images/generated/nonlinear_register_nhp.png
+ :width: 500
Detailed Workflow Graph:
- .. image:: ../images/nonlinear_register_detailed.dot.png
- :width: 500
+
+ .. image:: ../../images/generated/nonlinear_register_nhp_detailed.png
+ :width: 500
"""
nonlinear_register = pe.Workflow(name=name)
@@ -542,99 +548,59 @@ def create_fsl_fnirt_nonlinear_reg_nhp(name='fsl_fnirt_nonlinear_reg_nhp'):
nonlinear_reg = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_reg_1')
-
nonlinear_reg.inputs.fieldcoeff_file = True
nonlinear_reg.inputs.jacobian_file = True
nonlinear_reg.inputs.field_file = True
-
- nonlinear_register.connect(inputspec, 'input_skull',
- nonlinear_reg, 'in_file')
-
- nonlinear_register.connect(inputspec, 'reference_skull',
- nonlinear_reg, 'ref_file')
-
- nonlinear_register.connect(inputspec, 'ref_mask',
- nonlinear_reg, 'refmask_file')
-
- nonlinear_register.connect(inputspec, 'fnirt_config',
- nonlinear_reg, 'config_file')
-
- nonlinear_register.connect(inputspec, 'linear_aff',
- nonlinear_reg, 'affine_file')
-
- brain_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='brain_warp')
+ nodes, guardrails = nonlinear_register.nodes_and_guardrails(
+ nonlinear_reg, registered='warped_file')
+ nonlinear_register.connect_retries(
+ guardrails, [(inputspec, 'reference_skull', 'reference')])
+ fieldcoeff_file = guardrail_selection(nonlinear_register, *nodes,
+ 'fieldcoeff_file', guardrails[0])
+ field_file = guardrail_selection(nonlinear_register, *nodes, 'field_file',
+ guardrails[0])
+
+ brain_warp = pe.Node(interface=fsl.ApplyWarp(), name='brain_warp')
brain_warp.inputs.interp = 'nn'
brain_warp.inputs.relwarp = True
- nonlinear_register.connect(inputspec, 'input_brain',
- brain_warp, 'in_file')
-
- nonlinear_register.connect(nonlinear_reg, 'field_file',
- brain_warp, 'field_file')
-
- nonlinear_register.connect(inputspec, 'reference_skull',
- brain_warp, 'ref_file')
-
- head_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='head_warp')
+ head_warp = pe.Node(interface=fsl.ApplyWarp(), name='head_warp')
head_warp.inputs.interp = 'spline'
head_warp.inputs.relwarp = True
- nonlinear_register.connect(inputspec, 'input_brain',
- head_warp, 'in_file')
-
- nonlinear_register.connect(nonlinear_reg, 'field_file',
- head_warp, 'field_file')
-
- nonlinear_register.connect(inputspec, 'reference_skull',
- head_warp, 'ref_file')
-
- mask_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='mask_warp')
+ mask_warp = pe.Node(interface=fsl.ApplyWarp(), name='mask_warp')
mask_warp.inputs.interp = 'nn'
mask_warp.inputs.relwarp = True
- nonlinear_register.connect(inputspec, 'input_brain',
- mask_warp, 'in_file')
-
- nonlinear_register.connect(nonlinear_reg, 'field_file',
- mask_warp, 'field_file')
-
- nonlinear_register.connect(inputspec, 'reference_skull',
- mask_warp, 'ref_file')
-
- biasfield_warp = pe.Node(interface=fsl.ApplyWarp(),
- name='biasfield_warp')
+ biasfield_warp = pe.Node(interface=fsl.ApplyWarp(), name='biasfield_warp')
biasfield_warp.inputs.interp = 'spline'
biasfield_warp.inputs.relwarp = True
- nonlinear_register.connect(inputspec, 'input_brain',
- biasfield_warp, 'in_file')
-
- nonlinear_register.connect(nonlinear_reg, 'field_file',
- biasfield_warp, 'field_file')
-
- nonlinear_register.connect(inputspec, 'reference_skull',
- biasfield_warp, 'ref_file')
-
- nonlinear_register.connect(nonlinear_reg, 'fieldcoeff_file',
- outputspec, 'nonlinear_xfm')
-
- nonlinear_register.connect(nonlinear_reg, 'field_file',
- outputspec, 'nonlinear_warp')
-
- nonlinear_register.connect(brain_warp, 'out_file',
- outputspec, 'output_brain')
-
- nonlinear_register.connect(head_warp, 'out_file',
- outputspec, 'output_head')
-
- nonlinear_register.connect(mask_warp, 'out_file',
- outputspec, 'output_mask')
-
- nonlinear_register.connect(biasfield_warp, 'out_file',
- outputspec, 'output_biasfield')
+ nonlinear_register.connect_retries(nodes, [
+ (inputspec, 'input_skull', 'in_file'),
+ (inputspec, 'reference_skull', 'ref_file'),
+ (inputspec, 'ref_mask', 'refmask_file'),
+ (inputspec, 'fnirt_config', 'config_file'),
+ (inputspec, 'linear_aff', 'affine_file')])
+ nonlinear_register.connect([
+ (inputspec, brain_warp, [('input_brain', 'in_file'),
+ ('reference_skull', 'ref_file')]),
+ (field_file, brain_warp, [('out', 'field_file')]),
+ (inputspec, head_warp, [('input_brain', 'in_file'),
+ ('reference_skull', 'ref_file')]),
+ (field_file, head_warp, [('out', 'field_file')]),
+ (inputspec, mask_warp, [('input_brain', 'in_file'),
+ ('reference_skull', 'ref_file')]),
+ (field_file, mask_warp, [('out', 'field_file')]),
+ (inputspec, biasfield_warp, [('input_brain', 'in_file'),
+ ('reference_skull', 'ref_file')]),
+ (field_file, biasfield_warp, [('out', 'field_file')]),
+ (fieldcoeff_file, outputspec, [('out', 'fieldcoeff_file')]),
+ (field_file, outputspec, [('out', 'nonlinear_warp')]),
+ (brain_warp, outputspec, [('out_file', 'output_brain')]),
+ (head_warp, outputspec, [('out_file', 'output_head')]),
+ (mask_warp, outputspec, [('out_file', 'output_mask')]),
+ (biasfield_warp, outputspec, [('out_file', 'output_biasfield')])])
return nonlinear_register
@@ -701,45 +667,48 @@ def create_register_func_to_anat(config, phase_diff_distcor=False,
fields=['func_to_anat_linear_xfm_nobbreg', 'anat_func_nobbreg']),
name='outputspec')
- linear_reg = pe.Node(interface=fsl.FLIRT(),
- name='linear_func_to_anat')
-
- linear_reg.inputs.interp = config.registration_workflows['functional_registration']['coregistration']['interpolation']
- linear_reg.inputs.cost = config.registration_workflows['functional_registration']['coregistration']['cost']
- linear_reg.inputs.dof = config.registration_workflows['functional_registration']['coregistration']['dof']
- if config.registration_workflows['functional_registration']['coregistration']['arguments'] is not None:
- linear_reg.inputs.args = config.registration_workflows['functional_registration']['coregistration']['arguments']
+ linear_reg = pe.Node(interface=fsl.FLIRT(), name='linear_func_to_anat')
+ linear_reg.inputs.interp = config.registration_workflows[
+ 'functional_registration']['coregistration']['interpolation']
+ linear_reg.inputs.cost = config.registration_workflows[
+ 'functional_registration']['coregistration']['cost']
+ linear_reg.inputs.dof = config.registration_workflows[
+ 'functional_registration']['coregistration']['dof']
+ if config.registration_workflows['functional_registration'][
+ 'coregistration']['arguments'] is not None:
+ linear_reg.inputs.args = config.registration_workflows[
+ 'functional_registration']['coregistration']['arguments']
+ nodes, guardrails = register_func_to_anat.nodes_and_guardrails(
+ linear_reg, registered='out_file')
if phase_diff_distcor:
- register_func_to_anat.connect(
- inputNode_pedir, ('pedir', convert_pedir),
- linear_reg, 'pedir')
- register_func_to_anat.connect(inputspec, 'fieldmap',
- linear_reg, 'fieldmap')
- register_func_to_anat.connect(inputspec, 'fieldmapmask',
- linear_reg, 'fieldmapmask')
- register_func_to_anat.connect(inputNode_echospacing, 'echospacing',
- linear_reg, 'echospacing')
-
- register_func_to_anat.connect(inputspec, 'func', linear_reg, 'in_file')
-
- register_func_to_anat.connect(inputspec, 'anat', linear_reg, 'reference')
-
- register_func_to_anat.connect(inputspec, 'dof', linear_reg, 'dof')
-
- register_func_to_anat.connect(inputspec, 'interp', linear_reg, 'interp')
-
- register_func_to_anat.connect(linear_reg, 'out_matrix_file',
- outputspec,
- 'func_to_anat_linear_xfm_nobbreg')
-
- register_func_to_anat.connect(linear_reg, 'out_file',
+ register_func_to_anat.connect_retries(nodes, [
+ (inputNode_pedir, ('pedir', convert_pedir), 'pedir'),
+ (inputspec, 'fieldmap', 'fieldmap'),
+ (inputspec, 'fieldmapmask', 'fieldmapmask'),
+ (inputNode_echospacing, 'echospacing', 'echospacing')])
+
+ register_func_to_anat.connect_retries(nodes, [
+ (inputspec, 'func', 'in_file'),
+ (inputspec, 'anat', 'reference'),
+ (inputspec, 'dof', 'dof'),
+ (inputspec, 'interp', 'interp')])
+ register_func_to_anat.connect_retries(guardrails, [
+ (inputspec, 'anat', 'reference')])
+ select_matrix = guardrail_selection(register_func_to_anat, *nodes,
+ 'out_matrix_file', guardrails[0])
+ register_func_to_anat.connect(
+ select_matrix, 'out',
+ outputspec, 'func_to_anat_linear_xfm_nobbreg')
+ # pylint: disable=no-value-for-parameter
+ select_reg = guardrail_selection(register_func_to_anat, *guardrails)
+ register_func_to_anat.connect(select_reg, 'out',
outputspec, 'anat_func_nobbreg')
return register_func_to_anat
-def create_register_func_to_anat_use_T2(config, name='register_func_to_anat_use_T2'):
+def create_register_func_to_anat_use_T2(name='register_func_to_anat_use_T2'):
# for monkey data
# ref: https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/fMRIVolume/GenericfMRIVolumeProcessingPipeline.sh#L287-L295
# https://github.com/HechengJin0/dcan-macaque-pipeline/blob/master/fMRIVolume/GenericfMRIVolumeProcessingPipeline.sh#L524-L535
@@ -750,8 +719,6 @@ def create_register_func_to_anat_use_T2(config, name='register_func_to_anat_use_
Parameters
----------
- config : configuration, mandatory
- Pipeline configuration.
name : string, optional
Name of the workflow.
@@ -776,8 +743,6 @@ def create_register_func_to_anat_use_T2(config, name='register_func_to_anat_use_
outputspec.anat_func_nobbreg : string (nifti file)
Functional scan registered to anatomical space
"""
-
-
register_func_to_anat_use_T2 = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
@@ -786,69 +751,90 @@ def create_register_func_to_anat_use_T2(config, name='register_func_to_anat_use_
'T2_brain']),
name='inputspec')
- outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm_nobbreg',
- 'func_to_anat_linear_warp_nobbreg',
+ outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_'
+ 'xfm_nobbreg',
+ 'func_to_anat_linear_'
+ 'warp_nobbreg',
'anat_func_nobbreg']),
- name='outputspec')
+ name='outputspec')
# ${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${fMRIFolder}/${ScoutName}_gdc -ref ${T1wFolder}/${T2wRestoreImage} -omat "$fMRIFolder"/Scout2T2w.mat -out ${fMRIFolder}/Scout2T2w.nii.gz -searchrx -30 30 -searchry -30 30 -searchrz -30 30 -cost mutualinfo
linear_reg_func_to_t2 = pe.Node(interface=fsl.FLIRT(),
- name='linear_reg_func_to_t2')
+ name='linear_reg_func_to_t2')
linear_reg_func_to_t2.inputs.interp = 'spline'
linear_reg_func_to_t2.inputs.cost = 'mutualinfo'
linear_reg_func_to_t2.inputs.dof = 6
linear_reg_func_to_t2.inputs.searchr_x = [30, 30]
linear_reg_func_to_t2.inputs.searchr_y = [30, 30]
linear_reg_func_to_t2.inputs.searchr_z = [30, 30]
-
- register_func_to_anat_use_T2.connect(inputspec, 'func', linear_reg_func_to_t2, 'in_file')
-
- register_func_to_anat_use_T2.connect(inputspec, 'T2_head', linear_reg_func_to_t2, 'reference')
+ nodes, guardrails = register_func_to_anat_use_T2.nodes_and_guardrails(
+ linear_reg_func_to_t2, registered='out_file')
+ register_func_to_anat_use_T2.connect_retries(nodes, [
+ (inputspec, 'func', 'in_file'), (inputspec, 'T2_head', 'reference')])
+ register_func_to_anat_use_T2.connect_retries(guardrails, [
+ (inputspec, 'T2_head', 'reference')])
+ linear_reg_func_to_t2_matrix = guardrail_selection(
+ register_func_to_anat_use_T2, *nodes, 'out_matrix_file', guardrails[0])
# ${FSLDIR}/bin/convert_xfm -omat "$fMRIFolder"/T2w2Scout.mat -inverse "$fMRIFolder"/Scout2T2w.mat
invt = pe.Node(interface=fsl.ConvertXFM(), name='convert_xfm')
invt.inputs.invert_xfm = True
- register_func_to_anat_use_T2.connect(linear_reg_func_to_t2, 'out_matrix_file', invt, 'in_file')
+ register_func_to_anat_use_T2.connect(linear_reg_func_to_t2_matrix, 'out',
+ invt, 'in_file')
# ${FSLDIR}/bin/applywarp --interp=nn -i ${T1wFolder}/${T2wRestoreImageBrain} -r ${fMRIFolder}/${ScoutName}_gdc --premat="$fMRIFolder"/T2w2Scout.mat -o ${fMRIFolder}/Scout_brain_mask.nii.gz
anat_to_func = pe.Node(interface=fsl.ApplyWarp(),
name='anat_to_func')
anat_to_func.inputs.interp = 'nn'
- register_func_to_anat_use_T2.connect(inputspec, 'T2_brain', anat_to_func, 'in_file')
- register_func_to_anat_use_T2.connect(inputspec, 'func', anat_to_func, 'ref_file')
- register_func_to_anat_use_T2.connect(invt, 'out_file', anat_to_func, 'premat')
+ register_func_to_anat_use_T2.connect(inputspec, 'T2_brain',
+ anat_to_func, 'in_file')
+ register_func_to_anat_use_T2.connect(inputspec, 'func',
+ anat_to_func, 'ref_file')
+ register_func_to_anat_use_T2.connect(invt, 'out_file',
+ anat_to_func, 'premat')
# ${FSLDIR}/bin/fslmaths ${fMRIFolder}/Scout_brain_mask.nii.gz -bin ${fMRIFolder}/Scout_brain_mask.nii.gz
func_brain_mask = pe.Node(interface=fsl.maths.MathsCommand(),
- name=f'func_brain_mask')
+ name='func_brain_mask')
func_brain_mask.inputs.args = '-bin'
- register_func_to_anat_use_T2.connect(anat_to_func, 'out_file', func_brain_mask, 'in_file')
+ register_func_to_anat_use_T2.connect(anat_to_func, 'out_file',
+ func_brain_mask, 'in_file')
# ${FSLDIR}/bin/fslmaths ${fMRIFolder}/${ScoutName}_gdc -mas ${fMRIFolder}/Scout_brain_mask.nii.gz ${fMRIFolder}/Scout_brain_dc.nii.gz
func_brain = pe.Node(interface=fsl.MultiImageMaths(),
- name='func_brain')
+ name='func_brain')
func_brain.inputs.op_string = "-mas %s "
- register_func_to_anat_use_T2.connect(inputspec, 'func', func_brain, 'in_file')
- register_func_to_anat_use_T2.connect(func_brain_mask, 'out_file', func_brain, 'operand_files')
+ register_func_to_anat_use_T2.connect(inputspec, 'func',
+ func_brain, 'in_file')
+ register_func_to_anat_use_T2.connect(func_brain_mask, 'out_file',
+ func_brain, 'operand_files')
# ## re-registering the maked brain to the T1 brain:
# ${FSLDIR}/bin/flirt -interp spline -dof 6 -in ${fMRIFolder}/Scout_brain_dc.nii.gz -ref ${T1wFolder}/${T1wRestoreImageBrain} -omat "$fMRIFolder"/${ScoutName}_gdc2T1w_init.mat -out ${fMRIFolder}/${ScoutName}_gdc2T1w_brain_init -searchrx -30 30 -searchry -30 30 -searchrz -30 30 -cost mutualinfo
linear_reg_func_to_t1 = pe.Node(interface=fsl.FLIRT(),
- name='linear_reg_func_to_t1')
+ name='linear_reg_func_to_t1')
linear_reg_func_to_t1.inputs.interp = 'spline'
linear_reg_func_to_t1.inputs.cost = 'mutualinfo'
linear_reg_func_to_t1.inputs.dof = 6
linear_reg_func_to_t1.inputs.searchr_x = [30, 30]
linear_reg_func_to_t1.inputs.searchr_y = [30, 30]
linear_reg_func_to_t1.inputs.searchr_z = [30, 30]
-
- register_func_to_anat_use_T2.connect(func_brain, 'out_file', linear_reg_func_to_t1, 'in_file')
-
- register_func_to_anat_use_T2.connect(inputspec, 'T1_brain', linear_reg_func_to_t1, 'reference')
+ nodes, guardrails = register_func_to_anat_use_T2.nodes_and_guardrails(
+ linear_reg_func_to_t1, registered='out_file')
+ register_func_to_anat_use_T2.connect_retries(nodes, [
+ (func_brain, 'out_file', 'in_file'),
+ (inputspec, 'T1_brain', 'reference')])
+ register_func_to_anat_use_T2.connect_retries(guardrails, [
+ (inputspec, 'T1_brain', 'reference')])
+ # pylint: disable=no-value-for-parameter
+ select_linear_reg_func_to_t1 = guardrail_selection(
+ register_func_to_anat_use_T2, *guardrails)
+ linear_reg_func_to_t1_matrix = guardrail_selection(
+ register_func_to_anat_use_T2, *nodes, 'out_matrix_file', guardrails[0])
# #taking out warpfield as it is not being made without a fieldmap.
# ${FSLDIR}/bin/convertwarp --relout --rel -r ${T1wFolder}/${T2wRestoreImage} --postmat=${fMRIFolder}/${ScoutName}_gdc2T1w_init.mat -o ${fMRIFolder}/${ScoutName}_gdc2T1w_init_warp
@@ -857,28 +843,25 @@ def create_register_func_to_anat_use_T2(config, name='register_func_to_anat_use_
convert_warp.inputs.out_relwarp = True
convert_warp.inputs.relwarp = True
- register_func_to_anat_use_T2.connect(linear_reg_func_to_t1, 'out_matrix_file', convert_warp, 'postmat')
-
- register_func_to_anat_use_T2.connect(inputspec, 'T2_head', convert_warp, 'reference')
-
-
- register_func_to_anat_use_T2.connect(linear_reg_func_to_t1, 'out_matrix_file',
- outputspec,
- 'func_to_anat_linear_xfm_nobbreg')
-
+ register_func_to_anat_use_T2.connect(linear_reg_func_to_t1_matrix, 'out',
+ convert_warp, 'postmat')
+ register_func_to_anat_use_T2.connect(inputspec, 'T2_head',
+ convert_warp, 'reference')
+ register_func_to_anat_use_T2.connect(
+ linear_reg_func_to_t1_matrix, 'out',
+ outputspec, 'func_to_anat_linear_xfm_nobbreg')
register_func_to_anat_use_T2.connect(convert_warp, 'out_file',
outputspec,
'func_to_anat_linear_warp_nobbreg')
-
- register_func_to_anat_use_T2.connect(linear_reg_func_to_t1, 'out_file',
+ register_func_to_anat_use_T2.connect(select_linear_reg_func_to_t1, 'out',
outputspec, 'anat_func_nobbreg')
return register_func_to_anat_use_T2
def create_bbregister_func_to_anat(phase_diff_distcor=False,
- name='bbregister_func_to_anat'):
-
+ name='bbregister_func_to_anat',
+ retry=False):
"""
Registers a functional scan in native space to structural. This is
meant to be used after create_nonlinear_register() has been run and
@@ -891,6 +874,8 @@ def create_bbregister_func_to_anat(phase_diff_distcor=False,
take in the appropriate field map-related inputs.
name : string, optional
Name of the workflow.
+ retry : bool
+ Is this a second attempt?
Returns
-------
@@ -919,7 +904,6 @@ def create_bbregister_func_to_anat(phase_diff_distcor=False,
outputspec.anat_func : string (nifti file)
Functional data in anatomical space
"""
-
register_bbregister_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
@@ -948,7 +932,6 @@ def create_bbregister_func_to_anat(phase_diff_distcor=False,
register_bbregister_func_to_anat.connect(
inputspec, 'bbr_wm_mask_args',
wm_bb_mask, 'op_string')
-
register_bbregister_func_to_anat.connect(inputspec,
'anat_wm_segmentation',
wm_bb_mask, 'in_file')
@@ -959,55 +942,43 @@ def bbreg_args(bbreg_target):
bbreg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='bbreg_func_to_anat')
bbreg_func_to_anat.inputs.dof = 6
-
- register_bbregister_func_to_anat.connect(
- inputspec, 'bbr_schedule',
- bbreg_func_to_anat, 'schedule')
-
- register_bbregister_func_to_anat.connect(
- wm_bb_mask, ('out_file', bbreg_args),
- bbreg_func_to_anat, 'args')
-
- register_bbregister_func_to_anat.connect(
- inputspec, 'func',
- bbreg_func_to_anat, 'in_file')
-
- register_bbregister_func_to_anat.connect(
- inputspec, 'anat',
- bbreg_func_to_anat, 'reference')
-
- register_bbregister_func_to_anat.connect(
- inputspec, 'linear_reg_matrix',
- bbreg_func_to_anat, 'in_matrix_file')
-
+ nodes, guardrails = register_bbregister_func_to_anat.nodes_and_guardrails(
+ bbreg_func_to_anat, registered='out_file', add_clones=bool(retry))
+ register_bbregister_func_to_anat.connect_retries(nodes, [
+ (inputspec, 'bbr_schedule', 'schedule'),
+ (wm_bb_mask, ('out_file', bbreg_args), 'args'),
+ (inputspec, 'func', 'in_file'),
+ (inputspec, 'anat', 'reference'),
+ (inputspec, 'linear_reg_matrix', 'in_matrix_file')])
if phase_diff_distcor:
+ register_bbregister_func_to_anat.connect_retries(nodes, [
+ (inputNode_pedir, ('pedir', convert_pedir), 'pedir'),
+ (inputspec, 'fieldmap', 'fieldmap'),
+ (inputspec, 'fieldmapmask', 'fieldmapmask'),
+ (inputNode_echospacing, 'echospacing', 'echospacing')])
+ register_bbregister_func_to_anat.connect_retries(guardrails, [
+ (inputspec, 'anat', 'reference')])
+ if retry:
+ # pylint: disable=no-value-for-parameter
+ outfile = guardrail_selection(register_bbregister_func_to_anat,
+ *guardrails)
+ matrix = guardrail_selection(register_bbregister_func_to_anat, *nodes,
+ 'out_matrix_file', guardrails[0])
register_bbregister_func_to_anat.connect(
- inputNode_pedir, ('pedir', convert_pedir),
- bbreg_func_to_anat, 'pedir')
- register_bbregister_func_to_anat.connect(
- inputspec, 'fieldmap',
- bbreg_func_to_anat, 'fieldmap')
- register_bbregister_func_to_anat.connect(
- inputspec, 'fieldmapmask',
- bbreg_func_to_anat, 'fieldmapmask')
+ matrix, 'out', outputspec, 'func_to_anat_linear_xfm')
+ register_bbregister_func_to_anat.connect(outfile, 'out',
+ outputspec, 'anat_func')
+ else:
register_bbregister_func_to_anat.connect(
- inputNode_echospacing, 'echospacing',
- bbreg_func_to_anat, 'echospacing')
-
- register_bbregister_func_to_anat.connect(
- bbreg_func_to_anat, 'out_matrix_file',
- outputspec, 'func_to_anat_linear_xfm')
-
- register_bbregister_func_to_anat.connect(
- bbreg_func_to_anat, 'out_file',
- outputspec, 'anat_func')
-
+ bbreg_func_to_anat, 'out_matrix_file',
+ outputspec, 'func_to_anat_linear_xfm')
+ register_bbregister_func_to_anat.connect(guardrails[0], 'registered',
+ outputspec, 'anat_func')
return register_bbregister_func_to_anat
-def create_wf_calculate_ants_warp(
- name='create_wf_calculate_ants_warp', num_threads=1, reg_ants_skull=1
-):
+def create_wf_calculate_ants_warp(name='create_wf_calculate_ants_warp',
+ num_threads=1, reg_ants_skull=1):
'''
Calculates the nonlinear ANTS registration transform. This workflow
employs the antsRegistration tool:
@@ -1118,7 +1089,8 @@ def create_wf_calculate_ants_warp(
1. Calculates a nonlinear anatomical-to-template registration.
.. exec::
- from CPAC.registration import create_wf_calculate_ants_warp
+ from CPAC.registration.registration import \
+ create_wf_calculate_ants_warp
wf = create_wf_calculate_ants_warp()
wf.write_graph(
graph2use='orig',
@@ -1126,28 +1098,23 @@ def create_wf_calculate_ants_warp(
)
Workflow Graph:
- .. image::
- :width: 500
+
+ .. image:: ../../images/generated/calculate_ants_warp.png
+ :width: 500
Detailed Workflow Graph:
- .. image::
- :width: 500
+ .. image:: ../../images/generated/calculate_ants_warp_detailed.png
+ :width: 500
'''
-
+ from CPAC.registration.guardrails import retry_hardcoded_reg
calc_ants_warp_wf = pe.Workflow(name=name)
- inputspec = pe.Node(util.IdentityInterface(
- fields=['moving_brain',
- 'reference_brain',
- 'moving_skull',
- 'reference_skull',
- 'reference_mask',
- 'moving_mask',
- 'fixed_image_mask',
- 'ants_para',
- 'interp']),
- name='inputspec')
+ warp_inputs = ['moving_brain', 'reference_brain', 'moving_skull',
+ 'reference_skull', 'ants_para', 'moving_mask',
+ 'reference_mask', 'fixed_image_mask', 'interp']
+ inputspec = pe.Node(util.IdentityInterface(fields=warp_inputs),
+ name='inputspec')
outputspec = pe.Node(util.IdentityInterface(
fields=['ants_initial_xfm',
@@ -1168,155 +1135,117 @@ def create_wf_calculate_ants_warp(
calculate_ants_warp.inputs.initial_moving_transform_com = 0
'''
reg_imports = ['import os', 'import subprocess']
- calculate_ants_warp = \
- pe.Node(interface=util.Function(input_names=['moving_brain',
- 'reference_brain',
- 'moving_skull',
- 'reference_skull',
- 'ants_para',
- 'moving_mask',
- 'reference_mask',
- 'fixed_image_mask',
- 'interp',
- 'reg_with_skull'],
- output_names=['warp_list',
- 'warped_image'],
- function=hardcoded_reg,
- imports=reg_imports),
- name='calc_ants_warp',
- mem_gb=2.8,
- mem_x=(2e-7, 'moving_brain', 'xyz'))
-
+ warp_inputs += ['reg_with_skull']
+ warp_outputs = ['warp_list', 'warped_image']
+ calculate_ants_warp = pe.Node(
+ interface=util.Function(input_names=warp_inputs,
+ output_names=warp_outputs,
+ function=hardcoded_reg,
+ imports=reg_imports),
+ name='calc_ants_warp', mem_gb=2.8,
+ mem_x=(2e-7, 'moving_brain', 'xyz'))
+ retry_calculate_ants_warp = increment_seed(pe.Node(
+ interface=util.Function(input_names=[*warp_inputs, 'previous_failure'],
+ output_names=warp_outputs,
+ function=retry_hardcoded_reg,
+ imports=[*reg_imports,
+ 'from CPAC.registration.utils '
+ 'import hardcoded_reg',
+ 'from CPAC.utils.docs import '
+ 'retry_docstring']),
+ name='retry_calc_ants_warp', mem_gb=2.8,
+ mem_x=(2e-7, 'moving_brain', 'xyz')))
calculate_ants_warp.interface.num_threads = num_threads
+ retry_calculate_ants_warp.interface.num_threads = num_threads
+ nodes, guardrails = calc_ants_warp_wf.nodes_and_guardrails(
+ calculate_ants_warp, retry_calculate_ants_warp,
+ registered='warped_image', add_clones=False)
select_forward_initial = pe.Node(util.Function(
input_names=['warp_list', 'selection'],
output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_initial')
-
select_forward_initial.inputs.selection = "Initial"
select_forward_rigid = pe.Node(util.Function(
input_names=['warp_list', 'selection'],
output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_rigid')
-
select_forward_rigid.inputs.selection = "Rigid"
select_forward_affine = pe.Node(util.Function(
input_names=['warp_list', 'selection'],
output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_affine')
-
select_forward_affine.inputs.selection = "Affine"
select_forward_warp = pe.Node(util.Function(
input_names=['warp_list', 'selection'],
output_names=['selected_warp'],
function=seperate_warps_list), name='select_forward_warp')
-
select_forward_warp.inputs.selection = "Warp"
select_inverse_warp = pe.Node(util.Function(
input_names=['warp_list', 'selection'],
output_names=['selected_warp'],
function=seperate_warps_list), name='select_inverse_warp')
-
select_inverse_warp.inputs.selection = "Inverse"
- calc_ants_warp_wf.connect(
- inputspec, 'moving_brain',
- calculate_ants_warp, 'moving_brain')
-
- calc_ants_warp_wf.connect(
- inputspec, 'reference_brain',
- calculate_ants_warp, 'reference_brain')
+ calc_ants_warp_wf.connect_retries(nodes, [
+ (inputspec, 'moving_brain', 'moving_brain'),
+ (inputspec, 'reference_brain', 'reference_brain')])
if reg_ants_skull == 1:
-
calculate_ants_warp.inputs.reg_with_skull = 1
-
- calc_ants_warp_wf.connect(
- inputspec, 'moving_skull',
- calculate_ants_warp, 'moving_skull')
-
- calc_ants_warp_wf.connect(
- inputspec, 'reference_skull',
- calculate_ants_warp, 'reference_skull')
-
+ retry_calculate_ants_warp.inputs.reg_with_skull = 1
+ calc_ants_warp_wf.connect_retries(nodes, [
+ (inputspec, 'moving_skull', 'moving_skull'),
+ (inputspec, 'reference_skull', 'reference_skull')])
+ calc_ants_warp_wf.connect_retries(guardrails, [
+ (inputspec, 'reference_skull', 'reference')])
else:
- calc_ants_warp_wf.connect(
- inputspec, 'moving_brain',
- calculate_ants_warp, 'moving_skull')
-
- calc_ants_warp_wf.connect(
- inputspec, 'reference_brain',
- calculate_ants_warp, 'reference_skull')
-
- calc_ants_warp_wf.connect(
- inputspec, 'fixed_image_mask',
- calculate_ants_warp, 'fixed_image_mask')
-
- calc_ants_warp_wf.connect(inputspec, 'reference_mask',
- calculate_ants_warp, 'reference_mask')
-
- calc_ants_warp_wf.connect(inputspec, 'moving_mask',
- calculate_ants_warp, 'moving_mask')
-
- calc_ants_warp_wf.connect(inputspec, 'ants_para',
- calculate_ants_warp, 'ants_para')
-
- calc_ants_warp_wf.connect(
- inputspec, 'interp',
- calculate_ants_warp, 'interp')
-
+ calc_ants_warp_wf.connect_retries(nodes, [
+ (inputspec, 'moving_brain', 'moving_skull'),
+ (inputspec, 'reference_brain', 'reference_skull')])
+ calc_ants_warp_wf.connect_retries(guardrails, [
+ (inputspec, 'reference_brain', 'reference')])
+
+ calc_ants_warp_wf.connect_retries(nodes, [
+ (inputspec, 'fixed_image_mask', 'fixed_image_mask'),
+ (inputspec, 'reference_mask', 'reference_mask'),
+ (inputspec, 'moving_mask', 'moving_mask'),
+ (inputspec, 'ants_para', 'ants_para'),
+ (inputspec, 'interp', 'interp')])
# inter-workflow connections
-
- calc_ants_warp_wf.connect(
- calculate_ants_warp, 'warp_list',
- select_forward_initial, 'warp_list')
-
- calc_ants_warp_wf.connect(
- calculate_ants_warp, 'warp_list',
- select_forward_rigid, 'warp_list')
-
- calc_ants_warp_wf.connect(
- calculate_ants_warp, 'warp_list',
- select_forward_affine, 'warp_list')
-
- calc_ants_warp_wf.connect(
- calculate_ants_warp, 'warp_list',
- select_forward_warp, 'warp_list')
-
- calc_ants_warp_wf.connect(
- calculate_ants_warp, 'warp_list',
- select_inverse_warp, 'warp_list')
-
+ # pylint: disable=no-value-for-parameter
+ select = guardrail_selection(calc_ants_warp_wf, *guardrails)
+ warp_list = guardrail_selection(calc_ants_warp_wf, *nodes, 'warp_list',
+ guardrails[0])
+ calc_ants_warp_wf.connect(guardrails[0], 'failed_qc',
+ retry_calculate_ants_warp, 'previous_failure')
+ calc_ants_warp_wf.connect(warp_list, 'out',
+ select_forward_initial, 'warp_list')
+ calc_ants_warp_wf.connect(warp_list, 'out',
+ select_forward_rigid, 'warp_list')
+ calc_ants_warp_wf.connect(warp_list, 'out',
+ select_forward_affine, 'warp_list')
+ calc_ants_warp_wf.connect(warp_list, 'out',
+ select_forward_warp, 'warp_list')
+ calc_ants_warp_wf.connect(warp_list, 'out',
+ select_inverse_warp, 'warp_list')
# connections to outputspec
-
- calc_ants_warp_wf.connect(
- select_forward_initial, 'selected_warp',
- outputspec, 'ants_initial_xfm')
-
- calc_ants_warp_wf.connect(
- select_forward_rigid, 'selected_warp',
- outputspec, 'ants_rigid_xfm')
-
- calc_ants_warp_wf.connect(
- select_forward_affine, 'selected_warp',
- outputspec, 'ants_affine_xfm')
-
- calc_ants_warp_wf.connect(
- select_forward_warp, 'selected_warp',
- outputspec, 'warp_field')
-
- calc_ants_warp_wf.connect(
- select_inverse_warp, 'selected_warp',
- outputspec, 'inverse_warp_field')
-
- calc_ants_warp_wf.connect(
- calculate_ants_warp, 'warped_image',
- outputspec, 'normalized_output_brain')
+ calc_ants_warp_wf.connect(select_forward_initial, 'selected_warp',
+ outputspec, 'ants_initial_xfm')
+ calc_ants_warp_wf.connect(select_forward_rigid, 'selected_warp',
+ outputspec, 'ants_rigid_xfm')
+ calc_ants_warp_wf.connect(select_forward_affine, 'selected_warp',
+ outputspec, 'ants_affine_xfm')
+ calc_ants_warp_wf.connect(select_forward_warp, 'selected_warp',
+ outputspec, 'warp_field')
+ calc_ants_warp_wf.connect(select_inverse_warp, 'selected_warp',
+ outputspec, 'inverse_warp_field')
+ calc_ants_warp_wf.connect(select, 'out',
+ outputspec, 'normalized_output_brain')
return calc_ants_warp_wf
@@ -1357,10 +1286,8 @@ def FSL_registration_connector(wf_name, cfg, orig="T1w", opt=None,
# Input registration parameters
wf.connect(inputNode, 'interpolation',
flirt_reg_anat_mni, 'inputspec.interp')
-
wf.connect(inputNode, 'input_brain',
flirt_reg_anat_mni, 'inputspec.input_brain')
-
wf.connect(inputNode, 'reference_brain', flirt_reg_anat_mni,
'inputspec.reference_brain')
@@ -1369,7 +1296,6 @@ def FSL_registration_connector(wf_name, cfg, orig="T1w", opt=None,
wf.connect(inputNode, 'reference_brain',
write_lin_composite_xfm, 'reference')
-
wf.connect(flirt_reg_anat_mni, 'outputspec.linear_xfm',
write_lin_composite_xfm, 'premat')
@@ -1379,7 +1305,6 @@ def FSL_registration_connector(wf_name, cfg, orig="T1w", opt=None,
wf.connect(inputNode, 'reference_brain',
write_invlin_composite_xfm, 'reference')
-
wf.connect(flirt_reg_anat_mni, 'outputspec.invlinear_xfm',
write_invlin_composite_xfm, 'premat')
@@ -1394,10 +1319,11 @@ def FSL_registration_connector(wf_name, cfg, orig="T1w", opt=None,
write_lin_composite_xfm, 'out_file')
}
-
if opt == 'FSL':
- if cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['ref_resolution'] == \
- cfg.registration_workflows['anatomical_registration']['resolution_for_anat']:
+ if cfg.registration_workflows['anatomical_registration'][
+ 'registration']['FSL-FNIRT']['ref_resolution'] == \
+ cfg.registration_workflows['anatomical_registration'][
+ 'resolution_for_anat']:
fnirt_reg_anat_mni = create_fsl_fnirt_nonlinear_reg(
f'anat_mni_fnirt_register{symm}'
)
@@ -1408,29 +1334,25 @@ def FSL_registration_connector(wf_name, cfg, orig="T1w", opt=None,
wf.connect(inputNode, 'input_brain',
fnirt_reg_anat_mni, 'inputspec.input_brain')
-
wf.connect(inputNode, 'reference_brain',
fnirt_reg_anat_mni, 'inputspec.reference_brain')
-
wf.connect(inputNode, 'input_head',
fnirt_reg_anat_mni, 'inputspec.input_skull')
-
# NOTE: crossover from above opt block
wf.connect(flirt_reg_anat_mni, 'outputspec.linear_xfm',
fnirt_reg_anat_mni, 'inputspec.linear_aff')
-
wf.connect(inputNode, 'reference_head',
fnirt_reg_anat_mni, 'inputspec.reference_skull')
-
wf.connect(inputNode, 'reference_mask',
fnirt_reg_anat_mni, 'inputspec.ref_mask')
-
# assign the FSL FNIRT config file specified in pipeline config.yml
wf.connect(inputNode, 'fnirt_config',
fnirt_reg_anat_mni, 'inputspec.fnirt_config')
- if cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['ref_resolution'] == \
- cfg.registration_workflows['anatomical_registration']['resolution_for_anat']:
+ if cfg.registration_workflows['anatomical_registration'][
+ 'registration']['FSL-FNIRT']['ref_resolution'] == \
+ cfg.registration_workflows['anatomical_registration'][
+ 'resolution_for_anat']:
# NOTE: this is an UPDATE because of the opt block above
added_outputs = {
f'space-{sym}{tmpl}template_desc-brain_{orig}': (
@@ -1457,7 +1379,7 @@ def FSL_registration_connector(wf_name, cfg, orig="T1w", opt=None,
}
outputs.update(added_outputs)
- return (wf, outputs)
+ return wf, outputs
def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
@@ -1513,19 +1435,14 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(inputNode, 'input_brain',
ants_reg_anat_mni, 'inputspec.moving_brain')
-
wf.connect(inputNode, 'reference_brain',
ants_reg_anat_mni, 'inputspec.reference_brain')
-
wf.connect(inputNode, 'input_head',
ants_reg_anat_mni, 'inputspec.moving_skull')
-
wf.connect(inputNode, 'reference_head',
ants_reg_anat_mni, 'inputspec.reference_skull')
-
wf.connect(inputNode, 'input_mask',
ants_reg_anat_mni, 'inputspec.moving_mask')
-
wf.connect(inputNode, 'reference_mask',
ants_reg_anat_mni, 'inputspec.reference_mask')
@@ -1533,7 +1450,7 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
if orig == 'T1w':
if cfg.registration_workflows['anatomical_registration'][
- 'registration']['ANTs']['use_lesion_mask']:
+ 'registration']['ANTs']['use_lesion_mask']:
# Create lesion preproc node to apply afni Refit and Resample
lesion_preproc = create_lesion_preproc(
wf_name=f'lesion_preproc{symm}'
@@ -1574,10 +1491,8 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(ants_reg_anat_mni, 'outputspec.ants_affine_xfm',
collect_transforms, 'in1')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_rigid_xfm',
collect_transforms, 'in2')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_initial_xfm',
collect_transforms, 'in3')
@@ -1587,11 +1502,10 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
output_names=['checked_transform_list',
'list_length'],
function=check_transforms),
- name=f'check_transforms',
+ name='check_transforms',
mem_gb=6)
wf.connect(collect_transforms, 'out', check_transform, 'transform_list')
-
wf.connect(check_transform, 'checked_transform_list',
write_composite_linear_xfm, 'transforms')
@@ -1607,10 +1521,8 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(inputNode, 'reference_brain',
write_composite_invlinear_xfm, 'input_image')
-
wf.connect(inputNode, 'input_brain',
write_composite_invlinear_xfm, 'reference_image')
-
wf.connect(inputNode, 'interpolation',
write_composite_invlinear_xfm, 'interpolation')
@@ -1623,10 +1535,8 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(ants_reg_anat_mni, 'outputspec.ants_initial_xfm',
collect_inv_transforms, 'in1')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_rigid_xfm',
collect_inv_transforms, 'in2')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_affine_xfm',
collect_inv_transforms, 'in3')
@@ -1640,7 +1550,6 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(collect_inv_transforms, 'out',
check_invlinear_transform, 'transform_list')
-
wf.connect(check_invlinear_transform, 'checked_transform_list',
write_composite_invlinear_xfm, 'transforms')
@@ -1654,7 +1563,6 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(check_invlinear_transform, 'checked_transform_list',
inverse_transform_flags, 'transform_list')
-
wf.connect(inverse_transform_flags, 'inverse_transform_flags',
write_composite_invlinear_xfm, 'invert_transform_flags')
@@ -1668,10 +1576,8 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
f"from-{orig}_to-{sym}{tmpl}template_mode-image_xfm.nii.gz"
wf.connect(inputNode, 'input_brain', write_composite_xfm, 'input_image')
-
wf.connect(inputNode, 'reference_brain',
write_composite_xfm, 'reference_image')
-
wf.connect(inputNode, 'interpolation',
write_composite_xfm, 'interpolation')
@@ -1684,13 +1590,10 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(ants_reg_anat_mni, 'outputspec.warp_field',
collect_all_transforms, 'in1')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_affine_xfm',
collect_all_transforms, 'in2')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_rigid_xfm',
collect_all_transforms, 'in3')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_initial_xfm',
collect_all_transforms, 'in4')
@@ -1704,7 +1607,6 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(collect_all_transforms, 'out',
check_all_transform, 'transform_list')
-
wf.connect(check_all_transform, 'checked_transform_list',
write_composite_xfm, 'transforms')
@@ -1720,10 +1622,8 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(inputNode, 'reference_brain',
write_composite_inv_xfm, 'input_image')
-
wf.connect(inputNode, 'input_brain',
write_composite_inv_xfm, 'reference_image')
-
wf.connect(inputNode, 'interpolation',
write_composite_inv_xfm, 'interpolation')
@@ -1736,13 +1636,10 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(ants_reg_anat_mni, 'outputspec.ants_initial_xfm',
collect_all_inv_transforms, 'in1')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_rigid_xfm',
collect_all_inv_transforms, 'in2')
-
wf.connect(ants_reg_anat_mni, 'outputspec.ants_affine_xfm',
collect_all_inv_transforms, 'in3')
-
wf.connect(ants_reg_anat_mni, 'outputspec.inverse_warp_field',
collect_all_inv_transforms, 'in4')
@@ -1756,7 +1653,6 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(collect_all_inv_transforms, 'out',
check_all_inv_transform, 'transform_list')
-
wf.connect(check_all_inv_transform, 'checked_transform_list',
write_composite_inv_xfm, 'transforms')
@@ -1770,7 +1666,6 @@ def ANTs_registration_connector(wf_name, cfg, params, orig="T1w",
wf.connect(check_all_inv_transform, 'checked_transform_list',
inverse_all_transform_flags, 'transform_list')
-
wf.connect(inverse_all_transform_flags, 'inverse_transform_flags',
write_composite_inv_xfm, 'invert_transform_flags')
@@ -1819,9 +1714,7 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
# convert the .mat from linear Func->Anat to
# ANTS format
wf.connect(inputNode, 'coreg_xfm', fsl_reg_2_itk, 'transform_file')
-
wf.connect(inputNode, 'input_brain', fsl_reg_2_itk, 'reference_file')
-
wf.connect(inputNode, 'mean_bold', fsl_reg_2_itk, 'source_file')
itk_imports = ['import os']
@@ -1833,12 +1726,12 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
name='change_transform_type')
wf.connect(fsl_reg_2_itk, 'itk_transform',
- change_transform, 'input_affine_file')
+ change_transform, 'input_affine_file')
# combine ALL xfm's into one - makes it easier downstream
write_composite_xfm = pe.Node(
interface=ants.ApplyTransforms(),
- name=f'write_composite_xfm',
+ name='write_composite_xfm',
mem_gb=1.5)
write_composite_xfm.inputs.print_out_composite_warp_file = True
write_composite_xfm.inputs.output_image = \
@@ -1846,7 +1739,6 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
wf.connect(inputNode, 'mean_bold',
write_composite_xfm, 'input_image')
-
wf.connect(inputNode, 'T1w-brain-template_funcreg',
write_composite_xfm, 'reference_image')
@@ -1857,20 +1749,18 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
'registration']['ANTs']['interpolation']
collect_all_transforms = pe.Node(util.Merge(2),
- name=f'collect_all_transforms')
+ name='collect_all_transforms')
wf.connect(inputNode, 'T1w_to_template_xfm',
collect_all_transforms, 'in1')
-
wf.connect(change_transform, 'updated_affine_file',
collect_all_transforms, 'in2')
-
wf.connect(collect_all_transforms, 'out',
write_composite_xfm, 'transforms')
write_composite_inv_xfm = pe.Node(
interface=ants.ApplyTransforms(),
- name=f'write_composite_inv_xfm',
+ name='write_composite_inv_xfm',
mem_gb=1.5)
write_composite_inv_xfm.inputs.print_out_composite_warp_file = True
write_composite_inv_xfm.inputs.invert_transform_flags = [True, False]
@@ -1879,7 +1769,6 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
wf.connect(inputNode, 'T1w-brain-template_funcreg',
write_composite_inv_xfm, 'input_image')
-
wf.connect(inputNode, 'mean_bold',
write_composite_inv_xfm, 'reference_image')
@@ -1894,10 +1783,8 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
wf.connect(change_transform, 'updated_affine_file',
collect_inv_transforms, 'in1')
-
wf.connect(inputNode, 'template_to_T1w_xfm',
collect_inv_transforms, 'in2')
-
wf.connect(collect_inv_transforms, 'out',
write_composite_inv_xfm, 'transforms')
@@ -1915,9 +1802,7 @@ def bold_to_T1template_xfm_connector(wf_name, cfg, reg_tool, symmetric=False):
wf.connect(inputNode, 'T1w-brain-template_funcreg',
write_composite_xfm, 'reference')
-
wf.connect(inputNode, 'coreg_xfm', write_composite_xfm, 'premat')
-
wf.connect(inputNode, 'T1w_to_template_xfm',
write_composite_xfm, 'warp1')
@@ -1956,7 +1841,6 @@ def register_FSL_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None):
"from-template_to-longitudinal_mode-image_desc-linear_xfm",
"from-longitudinal_to-template_mode-image_xfm"]}
'''
-
fsl, outputs = FSL_registration_connector(f'register_{opt}_anat_to_'
f'template_{pipe_num}', cfg,
orig='T1w', opt=opt)
@@ -1976,8 +1860,10 @@ def register_FSL_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None):
node, out = connect
wf.connect(node, out, fsl, 'inputspec.input_brain')
- if cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['ref_resolution'] == \
- cfg.registration_workflows['anatomical_registration']['resolution_for_anat']:
+ if cfg.registration_workflows['anatomical_registration']['registration'][
+ 'FSL-FNIRT']['ref_resolution'] == \
+ cfg.registration_workflows['anatomical_registration'][
+ 'resolution_for_anat']:
node, out = strat_pool.get_data('T1w-brain-template')
wf.connect(node, out, fsl, 'inputspec.reference_brain')
@@ -2231,7 +2117,6 @@ def register_ANTs_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None):
"space-longitudinal_desc-reorient_T1w"])
wf.connect(node, out, ants_rc, 'inputspec.input_head')
-
t1w_template = strat_pool.node_data('T1w-template')
wf.connect(t1w_template.node, t1w_template.out,
ants_rc, 'inputspec.reference_head')
@@ -2404,18 +2289,21 @@ def register_ANTs_EPI_to_template(wf, cfg, strat_pool, pipe_num, opt=None):
return (wf, outputs)
-def overwrite_transform_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None):
+def overwrite_transform_anat_to_template(wf, cfg, strat_pool, pipe_num,
+ opt=None):
'''
{"name": "overwrite_transform_anat_to_template",
"config": "None",
"switch": [["registration_workflows", "anatomical_registration", "run"],
- ["registration_workflows", "anatomical_registration", "overwrite_transform", "run"]],
+ ["registration_workflows", "anatomical_registration",
+ "overwrite_transform", "run"]],
"option_key": ["registration_workflows", "anatomical_registration",
"overwrite_transform", "using"],
"option_val": "FSL",
"inputs": [("desc-restore-brain_T1w",
["desc-brain_T1w", "space-longitudinal_desc-brain_T1w"],
- ["desc-restore_T1w", "desc-preproc_T1w", "desc-reorient_T1w", "T1w"],
+ ["desc-restore_T1w", "desc-preproc_T1w", "desc-reorient_T1w",
+ "T1w"],
["desc-preproc_T1w", "desc-reorient_T1w", "T1w"],
"space-T1w_desc-brain_mask",
"T1w-template",
@@ -2446,17 +2334,23 @@ def overwrite_transform_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None
# -t ${WD}/xfms/T1w_to_MNI_1Rigid.mat \
# -t ${WD}/xfms/T1w_to_MNI_0DerivedInitialMovingTranslation.mat \
# -o [${WD}/xfms/ANTs_CombinedWarp.nii.gz,1]
- ants_apply_warp_t1_to_template = pe.Node(interface=ants.ApplyTransforms(),
- name=f'ANTS-ABCD_T1_to_template_{pipe_num}')
+ ants_apply_warp_t1_to_template = pe.Node(
+ interface=ants.ApplyTransforms(),
+ name=f'ANTS-ABCD_T1_to_template_{pipe_num}')
ants_apply_warp_t1_to_template.inputs.dimension = 3
- ants_apply_warp_t1_to_template.inputs.print_out_composite_warp_file = True
- ants_apply_warp_t1_to_template.inputs.output_image = 'ANTs_CombinedWarp.nii.gz'
-
- node, out = strat_pool.get_data(['desc-restore_T1w', 'desc-preproc_T1w', 'desc-reorient_T1w', 'T1w'])
+ ants_apply_warp_t1_to_template.inputs.print_out_composite_warp_file = \
+ True
+ ants_apply_warp_t1_to_template.inputs.output_image = \
+ 'ANTs_CombinedWarp.nii.gz'
+
+ node, out = strat_pool.get_data(['desc-restore_T1w',
+ 'desc-preproc_T1w',
+ 'desc-reorient_T1w', 'T1w'])
wf.connect(node, out, ants_apply_warp_t1_to_template, 'input_image')
node, out = strat_pool.get_data('T1w-template')
- wf.connect(node, out, ants_apply_warp_t1_to_template, 'reference_image')
+ wf.connect(node, out,
+ ants_apply_warp_t1_to_template, 'reference_image')
node, out = strat_pool.get_data('from-T1w_to-template_mode-image_xfm')
wf.connect(node, out, ants_apply_warp_t1_to_template, 'transforms')
@@ -2469,118 +2363,119 @@ def overwrite_transform_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None
# -o [${WD}/xfms/ANTs_CombinedInvWarp.nii.gz,1]
# T1wImage is ACPC aligned head
- ants_apply_warp_template_to_t1 = pe.Node(interface=ants.ApplyTransforms(),
- name=f'ANTS-ABCD_template_to_T1_{pipe_num}')
+ ants_apply_warp_template_to_t1 = pe.Node(
+ interface=ants.ApplyTransforms(),
+ name=f'ANTS-ABCD_template_to_T1_{pipe_num}')
ants_apply_warp_template_to_t1.inputs.dimension = 3
ants_apply_warp_template_to_t1.inputs.print_out_composite_warp_file = True
ants_apply_warp_template_to_t1.inputs.output_image = 'ANTs_CombinedInvWarp.nii.gz'
- node, out = strat_pool.get_data(['desc-preproc_T1w', 'desc-reorient_T1w', 'T1w'])
+ node, out = strat_pool.get_data(['desc-preproc_T1w',
+ 'desc-reorient_T1w', 'T1w'])
wf.connect(node, out, ants_apply_warp_template_to_t1, 'input_image')
node, out = strat_pool.get_data('T1w-template')
- wf.connect(node, out, ants_apply_warp_template_to_t1, 'reference_image')
+ wf.connect(node, out,
+ ants_apply_warp_template_to_t1, 'reference_image')
node, out = strat_pool.get_data('from-template_to-T1w_mode-image_xfm')
wf.connect(node, out, ants_apply_warp_template_to_t1, 'transforms')
# c4d -mcs ${WD}/xfms/ANTs_CombinedWarp.nii.gz -oo ${WD}/xfms/e1.nii.gz ${WD}/xfms/e2.nii.gz ${WD}/xfms/e3.nii.gz
# -mcs: -multicomponent-split, -oo: -output-multiple
- split_combined_warp = pe.Node(util.Function(input_names=['input',
- 'output_name'],
- output_names=['output1',
- 'output2',
- 'output3'],
- function=run_c4d),
- name=f'split_combined_warp_{pipe_num}')
+ split_combined_warp = pe.Node(util.Function(
+ input_names=['input', 'output_name'],
+ output_names=['output1', 'output2', 'output3'],
+ function=run_c4d), name=f'split_combined_warp_{pipe_num}')
split_combined_warp.inputs.output_name = 'e'
wf.connect(ants_apply_warp_t1_to_template, 'output_image',
- split_combined_warp, 'input')
+ split_combined_warp, 'input')
# c4d -mcs ${WD}/xfms/ANTs_CombinedInvWarp.nii.gz -oo ${WD}/xfms/e1inv.nii.gz ${WD}/xfms/e2inv.nii.gz ${WD}/xfms/e3inv.nii.gz
- split_combined_inv_warp = pe.Node(util.Function(input_names=['input',
- 'output_name'],
- output_names=['output1',
- 'output2',
- 'output3'],
- function=run_c4d),
- name=f'split_combined_inv_warp_{pipe_num}')
+ split_combined_inv_warp = pe.Node(util.Function(
+ input_names=['input', 'output_name'],
+ output_names=['output1', 'output2', 'output3'],
+ function=run_c4d), name=f'split_combined_inv_warp_{pipe_num}')
split_combined_inv_warp.inputs.output_name = 'einv'
wf.connect(ants_apply_warp_template_to_t1, 'output_image',
- split_combined_inv_warp, 'input')
+ split_combined_inv_warp, 'input')
# fslmaths ${WD}/xfms/e2.nii.gz -mul -1 ${WD}/xfms/e-2.nii.gz
change_e2_sign = pe.Node(interface=fsl.maths.MathsCommand(),
- name=f'change_e2_sign_{pipe_num}')
+ name=f'change_e2_sign_{pipe_num}')
change_e2_sign.inputs.args = '-mul -1'
- wf.connect(split_combined_warp, 'output2',
- change_e2_sign, 'in_file')
+ wf.connect(split_combined_warp, 'output2', change_e2_sign, 'in_file')
# fslmaths ${WD}/xfms/e2inv.nii.gz -mul -1 ${WD}/xfms/e-2inv.nii.gz
change_e2inv_sign = pe.Node(interface=fsl.maths.MathsCommand(),
- name=f'change_e2inv_sign_{pipe_num}')
+ name=f'change_e2inv_sign_{pipe_num}')
change_e2inv_sign.inputs.args = '-mul -1'
wf.connect(split_combined_inv_warp, 'output2',
- change_e2inv_sign, 'in_file')
+ change_e2inv_sign, 'in_file')
# fslmerge -t ${OutputTransform} ${WD}/xfms/e1.nii.gz ${WD}/xfms/e-2.nii.gz ${WD}/xfms/e3.nii.gz
- merge_xfms_to_list = pe.Node(util.Merge(3),
- name=f'merge_t1_to_template_xfms_to_list_{pipe_num}')
+ merge_xfms_to_list = pe.Node(
+ util.Merge(3),
+ name=f'merge_t1_to_template_xfms_to_list_{pipe_num}')
wf.connect(split_combined_warp, 'output1',
- merge_xfms_to_list, 'in1')
+ merge_xfms_to_list, 'in1')
wf.connect(change_e2_sign, 'out_file',
- merge_xfms_to_list, 'in2')
+ merge_xfms_to_list, 'in2')
wf.connect(split_combined_warp, 'output3',
- merge_xfms_to_list, 'in3')
+ merge_xfms_to_list, 'in3')
merge_xfms = pe.Node(interface=fslMerge(),
name=f'merge_t1_to_template_xfms_{pipe_num}')
merge_xfms.inputs.dimension = 't'
wf.connect(merge_xfms_to_list, 'out',
- merge_xfms, 'in_files')
+ merge_xfms, 'in_files')
# fslmerge -t ${OutputInvTransform} ${WD}/xfms/e1inv.nii.gz ${WD}/xfms/e-2inv.nii.gz ${WD}/xfms/e3inv.nii.gz
- merge_inv_xfms_to_list = pe.Node(util.Merge(3),
- name=f'merge_template_to_t1_xfms_to_list_{pipe_num}')
+ merge_inv_xfms_to_list = pe.Node(
+ util.Merge(3),
+ name=f'merge_template_to_t1_xfms_to_list_{pipe_num}')
wf.connect(split_combined_inv_warp, 'output1',
- merge_inv_xfms_to_list, 'in1')
+ merge_inv_xfms_to_list, 'in1')
wf.connect(change_e2inv_sign, 'out_file',
- merge_inv_xfms_to_list, 'in2')
+ merge_inv_xfms_to_list, 'in2')
wf.connect(split_combined_inv_warp, 'output3',
- merge_inv_xfms_to_list, 'in3')
+ merge_inv_xfms_to_list, 'in3')
merge_inv_xfms = pe.Node(interface=fslMerge(),
name=f'merge_template_to_t1_xfms_{pipe_num}')
merge_inv_xfms.inputs.dimension = 't'
wf.connect(merge_inv_xfms_to_list, 'out',
- merge_inv_xfms, 'in_files')
+ merge_inv_xfms, 'in_files')
# applywarp --rel --interp=spline -i ${T1wRestore} -r ${Reference} -w ${OutputTransform} -o ${OutputT1wImageRestore}
fsl_apply_warp_t1_to_template = pe.Node(interface=fsl.ApplyWarp(),
- name=f'FSL-ABCD_T1_to_template_{pipe_num}')
+ name='FSL-ABCD_T1_to_'
+ f'template_{pipe_num}')
fsl_apply_warp_t1_to_template.inputs.relwarp = True
fsl_apply_warp_t1_to_template.inputs.interp = 'spline'
- node, out = strat_pool.get_data(['desc-restore_T1w', 'desc-preproc_T1w', 'desc-reorient_T1w', 'T1w'])
+ node, out = strat_pool.get_data(['desc-restore_T1w',
+ 'desc-preproc_T1w',
+ 'desc-reorient_T1w', 'T1w'])
wf.connect(node, out, fsl_apply_warp_t1_to_template, 'in_file')
node, out = strat_pool.get_data('T1w-template')
wf.connect(node, out, fsl_apply_warp_t1_to_template, 'ref_file')
-
wf.connect(merge_xfms, 'merged_file',
- fsl_apply_warp_t1_to_template, 'field_file')
+ fsl_apply_warp_t1_to_template, 'field_file')
# applywarp --rel --interp=nn -i ${T1wRestoreBrain} -r ${Reference} -w ${OutputTransform} -o ${OutputT1wImageRestoreBrain}
- fsl_apply_warp_t1_brain_to_template = pe.Node(interface=fsl.ApplyWarp(),
- name=f'FSL-ABCD_T1_brain_to_template_{pipe_num}')
+ fsl_apply_warp_t1_brain_to_template = pe.Node(
+ interface=fsl.ApplyWarp(),
+ name=f'FSL-ABCD_T1_brain_to_template_{pipe_num}')
fsl_apply_warp_t1_brain_to_template.inputs.relwarp = True
fsl_apply_warp_t1_brain_to_template.inputs.interp = 'nn'
@@ -2592,38 +2487,42 @@ def overwrite_transform_anat_to_template(wf, cfg, strat_pool, pipe_num, opt=None
wf.connect(node, out, fsl_apply_warp_t1_brain_to_template, 'ref_file')
wf.connect(merge_xfms, 'merged_file',
- fsl_apply_warp_t1_brain_to_template, 'field_file')
+ fsl_apply_warp_t1_brain_to_template, 'field_file')
- fsl_apply_warp_t1_brain_mask_to_template = pe.Node(interface=fsl.ApplyWarp(),
- name=f'FSL-ABCD_T1_brain_mask_to_template_{pipe_num}')
+ fsl_apply_warp_t1_brain_mask_to_template = pe.Node(
+ interface=fsl.ApplyWarp(),
+ name=f'FSL-ABCD_T1_brain_mask_to_template_{pipe_num}')
fsl_apply_warp_t1_brain_mask_to_template.inputs.relwarp = True
fsl_apply_warp_t1_brain_mask_to_template.inputs.interp = 'nn'
node, out = strat_pool.get_data('space-T1w_desc-brain_mask')
- wf.connect(node, out, fsl_apply_warp_t1_brain_mask_to_template, 'in_file')
+ wf.connect(node, out,
+ fsl_apply_warp_t1_brain_mask_to_template, 'in_file')
node, out = strat_pool.get_data('T1w-template')
- wf.connect(node, out, fsl_apply_warp_t1_brain_mask_to_template, 'ref_file')
-
+ wf.connect(node, out,
+ fsl_apply_warp_t1_brain_mask_to_template, 'ref_file')
wf.connect(merge_xfms, 'merged_file',
- fsl_apply_warp_t1_brain_mask_to_template, 'field_file')
+ fsl_apply_warp_t1_brain_mask_to_template, 'field_file')
# fslmaths ${OutputT1wImageRestore} -mas ${OutputT1wImageRestoreBrain} ${OutputT1wImageRestoreBrain}
apply_mask = pe.Node(interface=fsl.maths.ApplyMask(),
- name=f'get_t1_brain_{pipe_num}')
+ name=f'get_t1_brain_{pipe_num}')
wf.connect(fsl_apply_warp_t1_to_template, 'out_file',
- apply_mask, 'in_file')
-
+ apply_mask, 'in_file')
wf.connect(fsl_apply_warp_t1_brain_to_template, 'out_file',
- apply_mask, 'mask_file')
+ apply_mask, 'mask_file')
outputs = {
'space-template_desc-brain_T1w': (apply_mask, 'out_file'),
- 'space-template_desc-head_T1w': (fsl_apply_warp_t1_to_template, 'out_file'),
- 'space-template_desc-T1w_mask': (fsl_apply_warp_t1_brain_mask_to_template, 'out_file'),
+ 'space-template_desc-head_T1w': (fsl_apply_warp_t1_to_template,
+ 'out_file'),
+ 'space-template_desc-T1w_mask': (
+ fsl_apply_warp_t1_brain_mask_to_template, 'out_file'),
'from-T1w_to-template_mode-image_xfm': (merge_xfms, 'merged_file'),
- 'from-template_to-T1w_mode-image_xfm': (merge_inv_xfms, 'merged_file')
+ 'from-template_to-T1w_mode-image_xfm': (merge_inv_xfms,
+ 'merged_file')
}
return (wf, outputs)
@@ -2648,13 +2547,15 @@ def coregistration_prep_vol(wf, cfg, strat_pool, pipe_num, opt=None):
get_func_volume.inputs.set(
expr='a',
- single_idx=cfg.registration_workflows['functional_registration']['coregistration'][
- 'func_input_prep']['Selected Functional Volume']['func_reg_input_volume'],
+ single_idx=cfg.registration_workflows['functional_registration'][
+ 'coregistration'][
+ 'func_input_prep']['Selected Functional Volume'][
+ 'func_reg_input_volume'],
outputtype='NIFTI_GZ'
)
if not cfg.registration_workflows['functional_registration'][
- 'coregistration']['func_input_prep']['reg_with_skull']:
+ 'coregistration']['func_input_prep']['reg_with_skull']:
node, out = strat_pool.get_data("desc-brain_bold")
else:
# TODO check which file is functional_skull_leaf
@@ -2665,9 +2566,7 @@ def coregistration_prep_vol(wf, cfg, strat_pool, pipe_num, opt=None):
coreg_input = (get_func_volume, 'out_file')
- outputs = {
- 'desc-reginput_bold': coreg_input
- }
+ outputs = {'desc-reginput_bold': coreg_input}
return (wf, outputs)
@@ -2691,10 +2590,9 @@ def coregistration_prep_mean(wf, cfg, strat_pool, pipe_num, opt=None):
'coregistration']['func_input_prep']['Mean Functional'][
'n4_correct_func']:
n4_correct_func = pe.Node(
- interface=
- ants.N4BiasFieldCorrection(dimension=3,
- copy_header=True,
- bspline_fitting_distance=200),
+ interface=ants.N4BiasFieldCorrection(dimension=3,
+ copy_header=True,
+ bspline_fitting_distance=200),
shrink_factor=2,
name=f'func_mean_n4_corrected_{pipe_num}')
n4_correct_func.inputs.args = '-r True'
@@ -2704,9 +2602,7 @@ def coregistration_prep_mean(wf, cfg, strat_pool, pipe_num, opt=None):
coreg_input = (n4_correct_func, 'output_image')
- outputs = {
- 'desc-reginput_bold': coreg_input
- }
+ outputs = {'desc-reginput_bold': coreg_input}
return (wf, outputs)
@@ -2725,9 +2621,7 @@ def coregistration_prep_fmriprep(wf, cfg, strat_pool, pipe_num, opt=None):
coreg_input = strat_pool.get_data("desc-ref_bold")
- outputs = {
- 'desc-reginput_bold': coreg_input
- }
+ outputs = {'desc-reginput_bold': coreg_input}
return (wf, outputs)
@@ -2738,13 +2632,15 @@ def coregistration(wf, cfg, strat_pool, pipe_num, opt=None):
"config": ["registration_workflows", "functional_registration",
"coregistration"],
"switch": ["run"],
- "option_key": "None",
- "option_val": "None",
+ "option_key": ["boundary_based_registration", "run"],
+ "option_val": [True, False, "fallback"],
"inputs": [("desc-reginput_bold",
"desc-motion_bold",
"space-bold_label-WM_mask",
"despiked-fieldmap",
- "fieldmap-mask"),
+ "fieldmap-mask",
+ "effectiveEchoSpacing",
+ "diffphase-pedir"),
("desc-brain_T1w",
"desc-restore-brain_T1w",
"desc-preproc_T2w",
@@ -2752,29 +2648,24 @@ def coregistration(wf, cfg, strat_pool, pipe_num, opt=None):
"T2w",
["label-WM_probseg", "label-WM_mask"],
["label-WM_pveseg", "label-WM_mask"],
- "T1w"),
- "diffphase-dwell",
- "diffphase-pedir"],
+ "T1w")],
"outputs": ["space-T1w_desc-mean_bold",
"from-bold_to-T1w_mode-image_desc-linear_xfm",
"from-bold_to-T1w_mode-image_desc-linear_warp"]}
'''
-
- diff_complete = False
- if strat_pool.check_rpool("despiked-fieldmap") and \
- strat_pool.check_rpool("fieldmap-mask"):
- diff_complete = True
-
+ diff_complete = (strat_pool.check_rpool("despiked-fieldmap") and
+ strat_pool.check_rpool("fieldmap-mask"))
+ bbreg_status = "On" if opt is True else "Off" if isinstance(
+ opt, bool) else opt.title()
+ subwfname = f'func_to_anat_FLIRT_bbreg{bbreg_status}_{pipe_num}'
if strat_pool.check_rpool('T2w') and cfg.anatomical_preproc['run_t2']:
# monkey data
- func_to_anat = create_register_func_to_anat_use_T2(cfg,
- f'func_to_anat_FLIRT_'
- f'{pipe_num}')
+ func_to_anat = create_register_func_to_anat_use_T2(subwfname)
# https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/fMRIVolume/GenericfMRIVolumeProcessingPipeline.sh#L177
# fslmaths "$fMRIFolder"/"$NameOffMRI"_mc -Tmean "$fMRIFolder"/"$ScoutName"_gdc
func_mc_mean = pe.Node(interface=afni_utils.TStat(),
- name=f'func_motion_corrected_mean_{pipe_num}')
+ name=f'func_motion_corrected_mean_{pipe_num}')
func_mc_mean.inputs.options = '-mean'
func_mc_mean.inputs.outputtype = 'NIFTI_GZ'
@@ -2797,29 +2688,27 @@ def coregistration(wf, cfg, strat_pool, pipe_num, opt=None):
# if field map-based distortion correction is on, but BBR is off,
# send in the distortion correction files here
func_to_anat = create_register_func_to_anat(cfg, diff_complete,
- f'func_to_anat_FLIRT_'
- f'{pipe_num}')
+ subwfname)
func_to_anat.inputs.inputspec.dof = cfg.registration_workflows[
- 'functional_registration']['coregistration']['dof']
+ 'functional_registration']['coregistration']['dof']
func_to_anat.inputs.inputspec.interp = cfg.registration_workflows[
- 'functional_registration']['coregistration']['interpolation']
+ 'functional_registration']['coregistration']['interpolation']
node, out = strat_pool.get_data('desc-reginput_bold')
wf.connect(node, out, func_to_anat, 'inputspec.func')
if cfg.registration_workflows['functional_registration'][
- 'coregistration']['reference'] == 'brain':
+ 'coregistration']['reference'] == 'brain':
node, out = strat_pool.get_data('desc-brain_T1w')
elif cfg.registration_workflows['functional_registration'][
- 'coregistration']['reference'] == 'restore-brain':
+ 'coregistration']['reference'] == 'restore-brain':
node, out = strat_pool.get_data('desc-restore-brain_T1w')
wf.connect(node, out, func_to_anat, 'inputspec.anat')
-
if diff_complete:
- node, out = strat_pool.get_data('diffphase-dwell')
+ node, out = strat_pool.get_data('effectiveEchoSpacing')
wf.connect(node, out, func_to_anat, 'echospacing_input.echospacing')
node, out = strat_pool.get_data('diffphase-pedir')
@@ -2848,22 +2737,22 @@ def coregistration(wf, cfg, strat_pool, pipe_num, opt=None):
(func_to_anat, 'outputspec.func_to_anat_linear_xfm_nobbreg')
}
- if True in cfg.registration_workflows['functional_registration'][
- 'coregistration']["boundary_based_registration"]["run"]:
-
- func_to_anat_bbreg = create_bbregister_func_to_anat(diff_complete,
- f'func_to_anat_'
- f'bbreg_'
- f'{pipe_num}')
+ if opt in [True, 'fallback']:
+ fallback = opt == 'fallback'
+ func_to_anat_bbreg = create_bbregister_func_to_anat(
+ diff_complete, f'func_to_anat_bbreg{bbreg_status}_{pipe_num}',
+ opt is True)
func_to_anat_bbreg.inputs.inputspec.bbr_schedule = \
cfg.registration_workflows['functional_registration'][
'coregistration']['boundary_based_registration'][
'bbr_schedule']
-
func_to_anat_bbreg.inputs.inputspec.bbr_wm_mask_args = \
cfg.registration_workflows['functional_registration'][
'coregistration']['boundary_based_registration'][
'bbr_wm_mask_args']
+ if fallback:
+ bbreg_guardrail = registration_guardrail_node(
+ f'bbreg{bbreg_status}_guardrail_{pipe_num}', 1)
node, out = strat_pool.get_data('desc-reginput_bold')
wf.connect(node, out, func_to_anat_bbreg, 'inputspec.func')
@@ -2873,34 +2762,38 @@ def coregistration(wf, cfg, strat_pool, pipe_num, opt=None):
'reference'] == 'whole-head':
node, out = strat_pool.get_data('T1w')
wf.connect(node, out, func_to_anat_bbreg, 'inputspec.anat')
+ if fallback:
+ wf.connect(node, out, bbreg_guardrail, 'reference')
elif cfg.registration_workflows['functional_registration'][
'coregistration']['boundary_based_registration'][
'reference'] == 'brain':
node, out = strat_pool.get_data('desc-brain_T1w')
wf.connect(node, out, func_to_anat_bbreg, 'inputspec.anat')
+ if fallback:
+ wf.connect(node, out, bbreg_guardrail, 'reference')
wf.connect(func_to_anat, 'outputspec.func_to_anat_linear_xfm_nobbreg',
func_to_anat_bbreg, 'inputspec.linear_reg_matrix')
if strat_pool.check_rpool('space-bold_label-WM_mask'):
node, out = strat_pool.get_data(["space-bold_label-WM_mask"])
- wf.connect(node, out,
- func_to_anat_bbreg, 'inputspec.anat_wm_segmentation')
else:
- if cfg.registration_workflows['functional_registration'][
- 'coregistration']['boundary_based_registration']['bbr_wm_map'] == 'probability_map':
+ if cfg['registration_workflows', 'functional_registration',
+ 'coregistration', 'boundary_based_registration',
+ 'bbr_wm_map'] == 'probability_map':
node, out = strat_pool.get_data(["label-WM_probseg",
"label-WM_mask"])
- elif cfg.registration_workflows['functional_registration'][
- 'coregistration']['boundary_based_registration']['bbr_wm_map'] == 'partial_volume_map':
+ elif cfg['registration_workflows', 'functional_registration',
+ 'coregistration', 'boundary_based_registration',
+ 'bbr_wm_map'] == 'partial_volume_map':
node, out = strat_pool.get_data(["label-WM_pveseg",
"label-WM_mask"])
- wf.connect(node, out,
- func_to_anat_bbreg, 'inputspec.anat_wm_segmentation')
+ wf.connect(node, out,
+ func_to_anat_bbreg, 'inputspec.anat_wm_segmentation')
if diff_complete:
- node, out = strat_pool.get_data('diffphase-dwell')
+ node, out = strat_pool.get_data('effectiveEchoSpacing')
wf.connect(node, out,
func_to_anat_bbreg, 'echospacing_input.echospacing')
@@ -2913,15 +2806,45 @@ def coregistration(wf, cfg, strat_pool, pipe_num, opt=None):
node, out = strat_pool.get_data("fieldmap-mask")
wf.connect(node, out,
func_to_anat_bbreg, 'inputspec.fieldmapmask')
-
- outputs = {
- 'space-T1w_desc-mean_bold':
- (func_to_anat_bbreg, 'outputspec.anat_func'),
- 'from-bold_to-T1w_mode-image_desc-linear_xfm':
- (func_to_anat_bbreg, 'outputspec.func_to_anat_linear_xfm')
- }
-
- return (wf, outputs)
+ if fallback:
+ # Fall back to no-BBReg
+ mean_bolds = pe.Node(util.Merge(2), run_without_submitting=True,
+ name=f'bbreg_mean_bold_choices_{pipe_num}')
+ xfms = pe.Node(util.Merge(2), run_without_submitting=True,
+ name=f'bbreg_xfm_choices_{pipe_num}')
+ fallback_mean_bolds = pe.Node(util.Select(),
+ run_without_submitting=True,
+ name='bbreg_choose_mean_bold_'
+ f'{pipe_num}')
+ fallback_xfms = pe.Node(util.Select(), run_without_submitting=True,
+ name=f'bbreg_choose_xfm_{pipe_num}')
+ wf.connect([
+ (func_to_anat_bbreg, bbreg_guardrail, [
+ ('outputspec.anat_func', 'registered')]),
+ (bbreg_guardrail, mean_bolds, [('registered', 'in1')]),
+ (func_to_anat, mean_bolds, [('outputspec.anat_func_nobbreg',
+ 'in2')]),
+ (func_to_anat_bbreg, xfms, [
+ ('outputspec.func_to_anat_linear_xfm', 'in1')]),
+ (func_to_anat, xfms, [
+ ('outputspec.func_to_anat_linear_xfm_nobbreg', 'in2')]),
+ (mean_bolds, fallback_mean_bolds, [('out', 'inlist')]),
+ (xfms, fallback_xfms, [('out', 'inlist')]),
+ (bbreg_guardrail, fallback_mean_bolds, [
+ ('failed_qc', 'index')]),
+ (bbreg_guardrail, fallback_xfms, [('failed_qc', 'index')])])
+ outputs = {
+ 'space-T1w_desc-mean_bold': (fallback_mean_bolds, 'out'),
+ 'from-bold_to-T1w_mode-image_desc-linear_xfm': (fallback_xfms,
+ 'out')}
+ else:
+ outputs = {
+ 'space-T1w_desc-mean_bold': (func_to_anat_bbreg,
+ 'outputspec.anat_func'),
+ 'from-bold_to-T1w_mode-image_desc-linear_xfm': (
+ func_to_anat_bbreg,
+ 'outputspec.func_to_anat_linear_xfm')}
+ return wf, outputs
def create_func_to_T1template_xfm(wf, cfg, strat_pool, pipe_num, opt=None):
@@ -3098,7 +3021,8 @@ def warp_timeseries_to_T1template(wf, cfg, strat_pool, pipe_num, opt=None):
return (wf, outputs)
-def warp_timeseries_to_T1template_abcd(wf, cfg, strat_pool, pipe_num, opt=None):
+def warp_timeseries_to_T1template_abcd(wf, cfg, strat_pool, pipe_num, opt=None
+ ):
"""
{"name": "transform_timeseries_to_T1template_abcd",
"config": ["registration_workflows", "functional_registration",
@@ -3134,10 +3058,10 @@ def warp_timeseries_to_T1template_abcd(wf, cfg, strat_pool, pipe_num, opt=None):
convert_func_to_anat_linear_warp.inputs.out_relwarp = True
convert_func_to_anat_linear_warp.inputs.relwarp = True
-
+
node, out = strat_pool.get_data('desc-preproc_T1w')
wf.connect(node, out, convert_func_to_anat_linear_warp, 'reference')
-
+
if strat_pool.check_rpool('blip-warp'):
node, out = strat_pool.get_data('from-bold_to-T1w_mode-image_desc-linear_xfm')
wf.connect(node, out, convert_func_to_anat_linear_warp, 'postmat')
@@ -3408,8 +3332,7 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
# https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/fMRIVolume/scripts/OneStepResampling.sh#L131
# ${FSLDIR}/bin/flirt -interp spline -in ${T1wImage} -ref ${T1wImage} -applyisoxfm $FinalfMRIResolution -out ${WD}/${T1wImageFile}.${FinalfMRIResolution}
anat_resample = pe.Node(interface=fsl.FLIRT(),
- name=f'anat_resample_func_res_{pipe_num}'
- )
+ name=f'anat_resample_func_res_{pipe_num}')
anat_resample.inputs.apply_isoxfm = float(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'].replace("mm", ""))
anat_resample.inputs.interp = 'spline'
@@ -3419,7 +3342,7 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
# ${FSLDIR}/bin/applywarp --rel --interp=spline -i ${T1wImage} -r ${ResampRefIm} --premat=$FSLDIR/etc/flirtsch/ident.mat -o ${WD}/${T1wImageFile}.${FinalfMRIResolution}
applywarp_anat_res = pe.Node(interface=fsl.ApplyWarp(),
- name=f'anat_func_res_{pipe_num}')
+ name=f'anat_func_res_{pipe_num}')
applywarp_anat_res.inputs.relwarp = True
applywarp_anat_res.inputs.interp = 'spline'
@@ -3440,7 +3363,8 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
node, out = strat_pool.get_data('space-template_desc-T1w_mask')
wf.connect(node, out, applywarp_anat_mask_res, 'in_file')
- wf.connect(applywarp_anat_res, 'out_file', applywarp_anat_mask_res, 'ref_file')
+ wf.connect(applywarp_anat_res, 'out_file',
+ applywarp_anat_mask_res, 'ref_file')
# ${FSLDIR}/bin/fslmaths ${WD}/${T1wImageFile}.${FinalfMRIResolution} -mas ${WD}/${FreeSurferBrainMaskFile}.${FinalfMRIResolution}.nii.gz ${WD}/${FreeSurferBrainMaskFile}.${FinalfMRIResolution}.nii.gz
T1_brain_res = pe.Node(interface=fsl.MultiImageMaths(),
@@ -3482,8 +3406,8 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
node, out = strat_pool.get_data('from-T1w_to-template_mode-image_warp')
wf.connect(node, out, convert_func_to_standard_warp, 'warp2')
-
- wf.connect(applywarp_anat_res, 'out_file', convert_func_to_standard_warp, 'reference')
+ wf.connect(applywarp_anat_res, 'out_file',
+ convert_func_to_standard_warp, 'reference')
# https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/fMRIVolume/GenericfMRIVolumeProcessingPipeline.sh#L157-L158
# fslroi "$fMRIFolder"/"$NameOffMRI"_gdc "$fMRIFolder"/"$NameOffMRI"_gdc_warp 0 3
@@ -3503,7 +3427,7 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
multiply_func_roi_by_zero.inputs.args = '-mul 0'
wf.connect(extract_func_roi, 'roi_file',
- multiply_func_roi_by_zero, 'in_file')
+ multiply_func_roi_by_zero, 'in_file')
# https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/fMRIVolume/scripts/OneStepResampling.sh#L173
# fslsplit ${InputfMRI} ${WD}/prevols/vol -t
@@ -3525,10 +3449,9 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
convert_motion_distortion_warp.inputs.relwarp = True
wf.connect(multiply_func_roi_by_zero, 'out_file',
- convert_motion_distortion_warp, 'warp1')
-
+ convert_motion_distortion_warp, 'warp1')
wf.connect(split_func, 'out_files',
- convert_motion_distortion_warp, 'reference')
+ convert_motion_distortion_warp, 'reference')
node, out = strat_pool.get_data('coordinate-transformation')
wf.connect(node, out, convert_motion_distortion_warp, 'postmat')
@@ -3541,13 +3464,12 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
convert_registration_warp.inputs.out_relwarp = True
convert_registration_warp.inputs.relwarp = True
- wf.connect(applywarp_anat_res, 'out_file', convert_registration_warp, 'reference')
-
+ wf.connect(applywarp_anat_res, 'out_file',
+ convert_registration_warp, 'reference')
wf.connect(convert_motion_distortion_warp, 'out_file',
- convert_registration_warp, 'warp1')
-
+ convert_registration_warp, 'warp1')
wf.connect(convert_func_to_standard_warp, 'out_file',
- convert_registration_warp, 'warp2')
+ convert_registration_warp, 'warp2')
# fslmaths ${WD}/prevols/vol${vnum}.nii.gz -mul 0 -add 1 ${WD}/prevols/vol${vnum}_mask.nii.gz
generate_vol_mask = pe.MapNode(interface=fsl.maths.MathsCommand(),
@@ -3556,8 +3478,7 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
generate_vol_mask.inputs.args = '-mul 0 -add 1'
- wf.connect(split_func, 'out_files',
- generate_vol_mask, 'in_file')
+ wf.connect(split_func, 'out_files', generate_vol_mask, 'in_file')
# applywarp --rel --interp=spline --in=${WD}/prevols/vol${vnum}.nii.gz --warp=${MotionMatrixFolder}/${MotionMatrixPrefix}${vnum}_all_warp.nii.gz --ref=${WD}/${T1wImageFile}.${FinalfMRIResolution} --out=${WD}/postvols/vol${vnum}.nii.gz
applywarp_func_to_standard = pe.MapNode(interface=fsl.ApplyWarp(),
@@ -3567,14 +3488,11 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
applywarp_func_to_standard.inputs.relwarp = True
applywarp_func_to_standard.inputs.interp = 'spline'
- wf.connect(split_func, 'out_files',
- applywarp_func_to_standard, 'in_file')
-
+ wf.connect(split_func, 'out_files', applywarp_func_to_standard, 'in_file')
wf.connect(convert_registration_warp, 'out_file',
- applywarp_func_to_standard, 'field_file')
-
+ applywarp_func_to_standard, 'field_file')
wf.connect(applywarp_anat_res, 'out_file',
- applywarp_func_to_standard, 'ref_file')
+ applywarp_func_to_standard, 'ref_file')
# applywarp --rel --interp=nn --in=${WD}/prevols/vol${vnum}_mask.nii.gz --warp=${MotionMatrixFolder}/${MotionMatrixPrefix}${vnum}_all_warp.nii.gz --ref=${WD}/${T1wImageFile}.${FinalfMRIResolution} --out=${WD}/postvols/vol${vnum}_mask.nii.gz
applywarp_func_mask_to_standard = pe.MapNode(interface=fsl.ApplyWarp(),
@@ -3585,13 +3503,11 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
applywarp_func_mask_to_standard.inputs.interp = 'nn'
wf.connect(generate_vol_mask, 'out_file',
- applywarp_func_mask_to_standard, 'in_file')
-
+ applywarp_func_mask_to_standard, 'in_file')
wf.connect(convert_registration_warp, 'out_file',
- applywarp_func_mask_to_standard, 'field_file')
-
+ applywarp_func_mask_to_standard, 'field_file')
wf.connect(applywarp_anat_res, 'out_file',
- applywarp_func_mask_to_standard, 'ref_file')
+ applywarp_func_mask_to_standard, 'ref_file')
### Loop ends! ###
@@ -3602,7 +3518,7 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
merge_func_to_standard.inputs.dimension = 't'
wf.connect(applywarp_func_to_standard, 'out_file',
- merge_func_to_standard, 'in_files')
+ merge_func_to_standard, 'in_files')
# fslmerge -tr ${OutputfMRI}_mask $FrameMergeSTRINGII $TR_vol
merge_func_mask_to_standard = pe.Node(interface=fslMerge(),
@@ -3612,7 +3528,7 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
merge_func_mask_to_standard.inputs.dimension = 't'
wf.connect(applywarp_func_mask_to_standard, 'out_file',
- merge_func_mask_to_standard, 'in_files')
+ merge_func_mask_to_standard, 'in_files')
# fslmaths ${OutputfMRI}_mask -Tmin ${OutputfMRI}_mask
find_min_mask = pe.Node(interface=fsl.maths.MathsCommand(),
@@ -3621,39 +3537,34 @@ def warp_timeseries_to_T1template_dcan_nhp(wf, cfg, strat_pool, pipe_num, opt=No
find_min_mask.inputs.args = '-Tmin'
wf.connect(merge_func_mask_to_standard, 'merged_file',
- find_min_mask, 'in_file')
+ find_min_mask, 'in_file')
# https://github.com/DCAN-Labs/dcan-macaque-pipeline/blob/master/fMRIVolume/scripts/IntensityNormalization.sh#L113-L119
# fslmaths ${InputfMRI} -div ${BiasField} $jacobiancom -mas ${BrainMask} -mas ${InputfMRI}_mask -ing 10000 ${OutputfMRI} -odt float
merge_func_mask = pe.Node(util.Merge(3),
- name=f'merge_operand_files_{pipe_num}')
+ name=f'merge_operand_files_{pipe_num}')
wf.connect(biasfield_thr, 'out_file', merge_func_mask, 'in1')
-
wf.connect(applywarp_anat_mask_res, 'out_file', merge_func_mask, 'in2')
-
wf.connect(find_min_mask, 'out_file', merge_func_mask, 'in3')
-
extract_func_brain = pe.Node(interface=fsl.MultiImageMaths(),
- name=f'extract_func_brain_{pipe_num}')
+ name=f'extract_func_brain_{pipe_num}')
extract_func_brain.inputs.op_string = '-div %s -mas %s -mas %s -ing 10000'
extract_func_brain.inputs.output_datatype = 'float'
wf.connect(merge_func_to_standard, 'merged_file',
- extract_func_brain, 'in_file')
-
+ extract_func_brain, 'in_file')
wf.connect(merge_func_mask, 'out',
- extract_func_brain, 'operand_files')
+ extract_func_brain, 'operand_files')
func_mask_final = pe.Node(interface=fsl.MultiImageMaths(),
- name=f'func_mask_final_{pipe_num}')
+ name=f'func_mask_final_{pipe_num}')
func_mask_final.inputs.op_string = "-mas %s "
wf.connect(applywarp_anat_mask_res, 'out_file', func_mask_final, 'in_file')
-
wf.connect(find_min_mask, 'out_file', func_mask_final, 'operand_files')
outputs = {
@@ -3674,34 +3585,6 @@ def single_step_resample_timeseries_to_T1template(wf, cfg, strat_pool,
Copyright (c) 2015-2018, the CRN developers team.
All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the name of fmriprep nor the names of its contributors
- may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
- OF THE POSSIBILITY OF SUCH DAMAGE.
-
Ref: https://github.com/nipreps/fmriprep/blob/84a6005b/fmriprep/workflows/bold/resampling.py#L159-L419
Node Block:
@@ -3726,6 +3609,33 @@ def single_step_resample_timeseries_to_T1template(wf, cfg, strat_pool,
"space-template_desc-brain_bold",
"space-template_desc-bold_mask"]}
""" # noqa: 501
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted provided that the following conditions
+ # are met:
+
+ # * Redistributions of source code must retain the above copyright
+ # notice, this list of conditions and the following disclaimer.
+
+ # * Redistributions in binary form must reproduce the above copyright
+ # notice, this list of conditions and the following disclaimer in the
+ # documentation and/or other materials provided with the distribution.
+
+ # * Neither the name of fmriprep nor the names of its contributors
+ # may be used to endorse or promote products derived from this
+ # software without specific prior written permission.
+
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ # OF THE POSSIBILITY OF SUCH DAMAGE.
bbr2itk = pe.Node(util.Function(input_names=['reference_file',
'source_file',
'transform_file'],
@@ -3748,11 +3658,12 @@ def single_step_resample_timeseries_to_T1template(wf, cfg, strat_pool,
node, out = strat_pool.get_data(['desc-reginput_bold', 'desc-mean_bold'])
wf.connect(node, out, bbr2itk, 'source_file')
- node, out = strat_pool.get_data('from-bold_to-T1w_mode-image_desc-linear_xfm')
+ node, out = strat_pool.get_data('from-bold_to-T1w_mode-image_desc-linear_'
+ 'xfm')
wf.connect(node, out, bbr2itk, 'transform_file')
split_func = pe.Node(interface=fsl.Split(),
- name=f'split_func_{pipe_num}')
+ name=f'split_func_{pipe_num}')
split_func.inputs.dimension = 't'
@@ -3795,9 +3706,7 @@ def single_step_resample_timeseries_to_T1template(wf, cfg, strat_pool,
node, out = strat_pool.get_data('from-T1w_to-template_mode-image_xfm')
wf.connect(node, out, collectxfm, 'in1')
-
- wf.connect(bbr2itk, 'itk_transform',
- collectxfm, 'in2')
+ wf.connect(bbr2itk, 'itk_transform', collectxfm, 'in2')
collectxfm.inputs.in3 = 'identity'
@@ -3828,10 +3737,11 @@ def single_step_resample_timeseries_to_T1template(wf, cfg, strat_pool,
merge_func_to_standard.inputs.dimension = 't'
wf.connect(applyxfm_func_to_standard, 'output_image',
- merge_func_to_standard, 'in_files')
+ merge_func_to_standard, 'in_files')
applyxfm_func_mask_to_standard = pe.Node(interface=ants.ApplyTransforms(),
- name=f'applyxfm_func_mask_to_standard_{pipe_num}')
+ name='applyxfm_func_mask_to_'
+ f'standard_{pipe_num}')
applyxfm_func_mask_to_standard.inputs.interpolation = 'MultiLabel'
@@ -3846,28 +3756,24 @@ def single_step_resample_timeseries_to_T1template(wf, cfg, strat_pool,
node, out = strat_pool.get_data('from-T1w_to-template_mode-image_xfm')
wf.connect(node, out, collectxfm_mask, 'in1')
-
- wf.connect(bbr2itk, 'itk_transform',
- collectxfm_mask, 'in2')
-
+ wf.connect(bbr2itk, 'itk_transform', collectxfm_mask, 'in2')
wf.connect(collectxfm_mask, 'out',
- applyxfm_func_mask_to_standard, 'transforms')
+ applyxfm_func_mask_to_standard, 'transforms')
apply_mask = pe.Node(interface=fsl.maths.ApplyMask(),
name=f'get_func_brain_to_standard_{pipe_num}')
wf.connect(merge_func_to_standard, 'merged_file',
- apply_mask, 'in_file')
-
+ apply_mask, 'in_file')
wf.connect(applyxfm_func_mask_to_standard, 'output_image',
- apply_mask, 'mask_file')
+ apply_mask, 'mask_file')
outputs = {
'space-template_desc-preproc_bold': (merge_func_to_standard,
'merged_file'),
'space-template_desc-brain_bold': (apply_mask, 'out_file'),
'space-template_desc-bold_mask': (applyxfm_func_mask_to_standard,
- 'output_image'),
+ 'output_image'),
}
return (wf, outputs)
diff --git a/CPAC/registration/tests/mocks.py b/CPAC/registration/tests/mocks.py
index b460fb5675..f3d19bda14 100644
--- a/CPAC/registration/tests/mocks.py
+++ b/CPAC/registration/tests/mocks.py
@@ -1,9 +1,10 @@
import os
+from nipype.interfaces import utility as util
from CPAC.pipeline import nipype_pipeline_engine as pe
-import nipype.interfaces.utility as util
-from CPAC.utils import Configuration, Strategy
-from CPAC.utils.interfaces.function import Function
+from CPAC.utils.configuration import Configuration
from CPAC.utils.datasource import resolve_resolution
+from CPAC.utils.interfaces.function import Function
+from CPAC.utils.strategy import Strategy
def file_node(path, file_node_num=0):
input_node = pe.Node(
diff --git a/CPAC/registration/utils.py b/CPAC/registration/utils.py
index 1185f0190b..62c884989d 100644
--- a/CPAC/registration/utils.py
+++ b/CPAC/registration/utils.py
@@ -66,6 +66,8 @@ def hardcoded_reg(moving_brain, reference_brain, moving_skull,
reference_skull, ants_para, moving_mask=None,
reference_mask=None, fixed_image_mask=None, interp=None,
reg_with_skull=0):
+ import subprocess
+ from CPAC.registration.exceptions import BadRegistrationError
# TODO: expand transforms to cover all in ANTs para
regcmd = ["antsRegistration"]
@@ -444,10 +446,15 @@ def hardcoded_reg(moving_brain, reference_brain, moving_skull,
f.write(' '.join(regcmd))
try:
- retcode = subprocess.check_output(regcmd)
+ # pylint: disable=unused-variable
+ retcode = subprocess.check_output(regcmd) # noqa: F841
+ except BadRegistrationError as bad_registration:
+ raise bad_registration
except Exception as e:
- raise Exception('[!] ANTS registration did not complete successfully.'
- '\n\nError details:\n{0}\n{1}\n'.format(e, e.output))
+ msg = '[!] ANTS registration did not complete successfully.'
+ if hasattr(e, 'output'):
+ msg += '\n\nError details:\n{e}\n{e.output}\n'
+ raise Exception(msg) # pylint: disable=raise-missing-from
warp_list = []
warped_image = None
@@ -545,7 +552,8 @@ def run_ants_apply_warp(moving_image, reference, initial=None, rigid=None,
# to be converted to ITK format via c3d_affine_tool
cmd = ['c3d_affine_tool', '-ref', anatomical_brain, '-src',
moving_image, func_to_anat, '-fsl2ras', '-oitk', 'affine.txt']
- retcode = subprocess.check_output(cmd)
+ # pylint: disable=unused-variable
+ retcode = subprocess.check_output(cmd) # noqa: F841
func_to_anat = change_itk_transform_type(os.path.join(os.getcwd(),
'affine.txt'))
@@ -591,7 +599,8 @@ def run_ants_apply_warp(moving_image, reference, initial=None, rigid=None,
else:
cmd.append(os.path.abspath(func_to_anat))
- retcode = subprocess.check_output(cmd)
+ # pylint: disable=unused-variable
+ retcode = subprocess.check_output(cmd) # noqa: F841
return out_image
@@ -612,16 +621,19 @@ def cpac_ants_apply_nonlinear_inverse_warp(cpac_dir, moving_image, reference,
# run_ants_apply_warp()
-def run_c3d(reference_file, source_file, transform_file):
-
+def run_c3d(reference_file, source_file, transform_file,
+ retry=False, previous_failure=False):
+ # pylint: disable=redefined-outer-name,reimported
import os
import subprocess
itk_transform = os.path.join(os.getcwd(), 'affine.txt')
- cmd = ['c3d_affine_tool', '-ref', reference_file, '-src',
- source_file, transform_file, '-fsl2ras', '-oitk', itk_transform]
- retcode = subprocess.check_output(cmd)
+ if (not retry) or previous_failure:
+ cmd = ['c3d_affine_tool', '-ref', reference_file, '-src',
+ source_file, transform_file, '-fsl2ras', '-oitk', itk_transform]
+ # pylint: disable=unused-variable
+ retcode = subprocess.check_output(cmd) # noqa: F841
return itk_transform
diff --git a/CPAC/resources/configs/group_config_template.yml b/CPAC/resources/configs/group_config_template.yml
index 544b996dc5..ec5b223410 100644
--- a/CPAC/resources/configs/group_config_template.yml
+++ b/CPAC/resources/configs/group_config_template.yml
@@ -7,334 +7,328 @@
# General Group-Level Analysis Settings
-##############################################################################
-# The main input of group-level analysis- the output directory of your individual-level analysis pipeline run (pre-processing & derivatives for each participant). This should be a path to your C-PAC individual-level run's pipeline folder, which includes the sub-directories labeled with the participant IDs.
-pipeline_dir: /path/to/output_dir
+pipeline_setup:
+ # Name for this pipeline configuration - useful for identification.
+ pipeline_name: cpac-group-template
-# (Optional) Full path to a list of participants to be included in the model. You can use this to easily prune participants from your model. In group-level analyses involving phenotype files, this allows you to prune participants without removing them from the phenotype CSV/TSV file. This should be a text file with one subject per line. An easy way to manually create this file is to copy the participant ID column from your phenotype file.
-participant_list: None
+ output_directory:
+ # The main input of group-level analysis- the output directory of your individual-level analysis pipeline run (pre-processing & derivatives for each participant). This should be a path to your C-PAC individual-level run's pipeline folder, which includes the sub-directories labeled with the participant IDs.
+ source_outputs_path : /source_output
-# Full path to the directory where CPAC should place group-level analysis outputs and any applicable statistical model files.
-output_dir: /path/to/output/dir
+ # (Optional) Full path to a list of participants to be included in the model. You can use this to easily prune participants from your model. In group-level analyses involving phenotype files, this allows you to prune participants without removing them from the phenotype CSV/TSV file. This should be a text file with one subject per line. An easy way to manually create this file is to copy the participant ID column from your phenotype file.
+ participant_list: None
+ # Full path to the directory where CPAC should place group-level analysis outputs and any applicable statistical model files.
+ output_path: /output
-#Much like the working directory for individual-level analysis, this is where the intermediate and working files will be stored during your run. This directory can be deleted later on. However, saving this directory allows the group analysis run to skip steps that have been already completed, in the case of re-runs.
-work_dir: /path/to/work/dir
+ working_directory:
+ #Much like the working directory for individual-level analysis, this is where the intermediate and working files will be stored during your run. This directory can be deleted later on. However, saving this directory allows the group analysis run to skip steps that have been already completed, in the case of re-runs.
+ path: /tmp
-#Where to write out log information for your group analysis run.
-log_dir: /path/to/log/dir
+ #Deletes the contents of the Working Directory after running.
+ # This saves disk space, but any additional preprocessing or analysis will have to be completely re-run.
+ remove_working_dir: True
+ log_directory:
+
+ # Whether to write log details of the pipeline run to the logging files.
+ run_logging: True
-# The path to your FSL installation directory. This can be left as 'FSLDIR' to grab your system's default FSL installation. However, if you prefer to use a specific install of FSL, you can enter the path here.
-FSLDIR: FSLDIR
+ #Where to write out log information for your group analysis run.
+ path: /logs
+ crash_log_directory:
-# Number of CPUs to dedicate to the group-level analysis run. Parallelizes the pipeline where applicable.
-num_cpus: 1
+ # Directory where CPAC should write crash logs.
+ path: /crash
+ system_config:
-# Scan inclusion list. For most group-level analyses, a separate model is run for each scan/series in your individual-level analysis pipeline directory.
-# Use this list to prune your run to only specific scans.
-# Example:
-# scan_inclusion: ['rest_run-1', 'rest_run-2']
-scan_inclusion: []
+ # The path to your FSL installation directory. This can be left as 'FSLDIR' to grab your system's default FSL installation. However, if you prefer to use a specific install of FSL, you can enter the path here.
+ FSLDIR: /usr/share/fsl/5.0
+ # Number of CPUs to dedicate to the group-level analysis run. Parallelizes the pipeline where applicable.
+ num_cpus: 1
-# FSL-FEAT
-##############################################################################
-
-# Run FSL FEAT group-level analysis.
-run_fsl_feat : [1]
-
-
-# How many statistical models to run in parallel. This number depends on computing resources.
-num_models_at_once : 1
-
-
-# Specify a name for the new model.
-model_name: model_name_here
-
+ # The maximum amount of memory each participant's workflow can allocate.
+ # Use this to place an upper bound of memory usage.
+ # - Warning: 'Memory Per Participant' multiplied by 'Number of Participants to Run Simultaneously'
+ # must not be more than the total amount of RAM.
+ # - Conversely, using too little RAM can impede the speed of a pipeline run.
+ # - It is recommended that you set this to a value that when multiplied by
+ # 'Number of Participants to Run Simultaneously' is as much RAM you can safely allocate.
+ num_memory: 10
-# Phenotype file
-# Full path to a .csv or .tsv file containing EV/regressor information for each subject.
-pheno_file: /path/to/phenotypic/file.csv
+ # Scan inclusion list. For most group-level analyses, a separate model is run for each scan/series in your individual-level analysis pipeline directory.
+ # Use this list to prune your run to only specific scans.
+ # Example:
+ # scan_inclusion: ['rest_run-1', 'rest_run-2']
+ scan_inclusion: []
+ Amazon-AWS:
-# Name of the participants column in your phenotype file.
-participant_id_label: Participant
+ # If setting the 'Output Directory' to an S3 bucket, insert the path to your AWS credentials file here.
+ aws_output_bucket_credentials:
+ # Enable server-side 256-AES encryption on data to the S3 bucket
+ s3_encryption: False
-# Specify which EVs from your phenotype are categorical or numerical. Of those which are numerical, specify which are to be demeaned.
-# ev_selections: {'demean': ['Age'], 'categorical': ['Sex', 'Diagnosis']}
-ev_selections: {'demean': [], 'categorical': []}
+ Debugging:
-
-# Specify the formula to describe your model design. Essentially, including EVs in this formula inserts them into the model. The most basic format to include each EV you select would be 'EV + EV + EV + ..', etc. You can also select to include MeanFD, Measure_Mean, and Custom_ROI_Mean here. See the C-PAC User Guide for more detailed information regarding formatting your design formula.
-# design_formula: Sex + Diagnosis + Age + MeanFD_Jenkinson + Custom_ROI_Mean
-design_formula:
-
-
-# Choose the derivatives to run the group model on.
-#
-# These must be written out as a list, and must be one of the options listed below.
-#
-# For z-scored analyses:
-# 'alff_to_standard_zstd', 'alff_to_standard_smooth_zstd', 'falff_to_standard_zstd', 'falff_to_standard_smooth_zstd', 'reho_to_standard_zstd', 'reho_to_standard_smooth_zstd', 'sca_roi_files_to_standard_fisher_zstd', 'sca_roi_files_to_standard_smooth_fisher_zstd', 'vmhc_fisher_zstd_zstat_map', 'dr_tempreg_maps_zstat_files_to_standard', 'dr_tempreg_maps_zstat_files_to_standard_smooth', 'sca_tempreg_maps_zstat_files', 'sca_tempreg_maps_zstat_files_smooth', 'centrality_outputs_zstd', 'centrality_outputs_smoothed_zstd'
-#
-# Example input: derivative_list : ['alff_to_standard_smooth_zstd', 'sca_roi_files_to_standard_smooth_fisher_zstd']
-#
-derivative_list: []
+ # Verbose developer messages.
+ verbose: Off
-# Choose whether to use a group mask or individual-specific mask when calculating the output means to be used as a regressor.
-#
-# This only takes effect if you include the 'Measure_Mean' regressor in your Design Matrix Formula.
-mean_mask: ['Group Mask']
+# FSL-FEAT
+fsl_feat:
-# Full path to a NIFTI file containing one or more ROI masks. The means of the masked regions will then be computed for each subject's output and will be included in the model as regressors (one for each ROI in the mask file) if you include 'Custom_ROI_Mean' in the Design Matrix Formula.
-# custom_roi_mask: /path/to/mask.nii.gz
-custom_roi_mask: None
+ # Run FSL FEAT group-level analysis.
+ run: Off
+ # How many statistical models to run in parallel. This number depends on computing resources.
+ num_models_at_once: 1
-# Choose the coding scheme to use when generating your model. 'Treatment' encoding is generally considered the typical scheme. Consult the User Guide for more information.
-#
-# Available options:
-# 'Treatment', 'Sum'
-#
-coding_scheme: ['Treatment']
+ # Specify a name for the new model.
+ model_name: model_name_here
+ # Phenotype file
+ # Full path to a .csv or .tsv file containing EV/regressor information for each subject.
+ pheno_file: /path
-# Specify whether FSL should model the variance for each group separately.
-#
-# If this option is enabled, you must specify a grouping variable below.
-group_sep: Off
-
+ # Name of the participants column in your phenotype file.
+ participant_id_label: Participant
-# The name of the EV that should be used to group subjects when modeling variances.
-#
-# If you do not wish to model group variances separately, set this value to None.
-grouping_var: None
+ # Specify which EVs from your phenotype are categorical or numerical. Of those which are numerical, specify which are to be demeaned.
+ # ev_selections: {'demean': ['Age'], 'categorical': ['Sex', 'Diagnosis']}
+ ev_selections: {'demean': [], 'categorical': []}
+ # Specify the formula to describe your model design. Essentially, including EVs in this formula inserts them into the model. The most basic format to include each EV you select would be 'EV + EV + EV + ..', etc. You can also select to include MeanFD, Measure_Mean, and Custom_ROI_Mean here. See the C-PAC User Guide for more detailed information regarding formatting your design formula.
+ # design_formula: Sex + Diagnosis + Age + MeanFD_Jenkinson + Custom_ROI_Mean
+ design_formula:
-# Only voxels with a Z-score higher than this value will be considered significant.
-z_threshold: ['2.3']
+ # Choose the derivatives to run the group model on.
+ #
+ # These must be written out as a list, and must be one of the options listed below.
+ #
+ # For z-scored analyses:
+ # 'desc-zstd_alff', 'desc-sm-zstd_alff', 'desc-zstd_falff', 'desc-sm-zstd_falff', 'desc-zstd_reho', 'desc-sm-zstd_reho', 'desc-zstd_sca_roi', 'desc-sm-zstd_sca_roi', 'desc-zstd_vmhc', 'desc-zstd_dr_tempreg_maps', 'desc-sm-zstd_dr_tempreg_maps', 'desc-zstd_sca_tempreg_maps', 'desc-sm-zstd_sca_tempreg_maps', 'desc-zstd_centrality', 'desc-sm-zstd_centrality'
+ #
+ # Example input: derivative_list : ['desc-sm-zstd_alff', 'desc-sm-zstd_sca_roi']
+ #
+ derivative_list: []
+ # Choose whether to use a group mask or individual-specific mask when calculating the output means to be used as a regressor.
+ #
+ # This only takes effect if you include the 'Measure_Mean' regressor in your Design Matrix Formula.
+ mean_mask: ['Group Mask']
-# Significance threshold (P-value) to use when doing cluster correction for multiple comparisons.
-p_threshold: ['0.05']
+ # Full path to a NIFTI file containing one or more ROI masks. The means of the masked regions will then be computed for each subject's output and will be included in the model as regressors (one for each ROI in the mask file) if you include 'Custom_ROI_Mean' in the Design Matrix Formula.
+ # custom_roi_mask: /path/to/mask.nii.gz
+ custom_roi_mask: None
+ # Choose the coding scheme to use when generating your model. 'Treatment' encoding is generally considered the typical scheme. Consult the User Guide for more information.
+ #
+ # Available options:
+ # 'Treatment', 'Sum'
+ #
+ coding_scheme: ['Treatment']
-# For repeated measures only. Enter the session names in your dataset that you wish to include within the same model (this is for repeated measures / within-subject designs).\n\nTip: These will be the names listed as "unique_id" in the original individual-level participant list, or the labels in the original data directories you marked as {session} while creating the CPAC participant list.
-# sessions_list: ['ses-01', 'ses-02']
-sessions_list: []
+ # Specify whether FSL should model the variance for each group separately.
+ #
+ # If this option is enabled, you must specify a grouping variable below.
+ group_sep: Off
+ # The name of the EV that should be used to group subjects when modeling variances.
+ #
+ # If you do not wish to model group variances separately, set this value to None.
+ grouping_var: None
-# For repeated measures only. Enter the series names in your dataset that you wish to include within the same model (this is for repeated measures / within-subject designs).\n\nTip: These will be the labels listed under "func:" in the original individual-level participant list, or the labels in the original data directories you marked as {series} while creating the CPAC participant list.
-# series_list: ['task-rest_run-1', 'task-rest_run-2']
-series_list: []
+ # Only voxels with a Z-score higher than this value will be considered significant.
+ z_threshold: ['2.3']
+ # Significance threshold (P-value) to use when doing cluster correction for multiple comparisons.
+ p_threshold: ['0.05']
-# Specify your contrasts here. For example, if two of your available contrasts are EV1 and EV0, you can enter contrast descriptions such as 'EV1 - EV0 = 0' or 'EV1 = 0'. Consult the User Guide for more information about describing contrasts. Alternatively, you can provide your own custom-written contrasts matrix in a CSV file in the 'Custom Contrasts Matrix' field below.
-# contrasts: ['C(Diagnosis)[T.ADHD] - C(Diagnosis)[T.Typical] = 0', 'C(Diagnosis)[T.Typical] - C(Diagnosis)[T.ADHD] = 0']
-contrasts: []
+ # For repeated measures only. Enter the session names in your dataset that you wish to include within the same model (this is for repeated measures / within-subject designs).\n\nTip: These will be the names listed as "unique_id" in the original individual-level participant list, or the labels in the original data directories you marked as {session} while creating the CPAC participant list.
+ # sessions_list: ['ses-01', 'ses-02']
+ sessions_list: []
+ # For repeated measures only. Enter the series names in your dataset that you wish to include within the same model (this is for repeated measures / within-subject designs).\n\nTip: These will be the labels listed under "func:" in the original individual-level participant list, or the labels in the original data directories you marked as {series} while creating the CPAC participant list.
+ # series_list: ['task-rest_run-1', 'task-rest_run-2']
+ series_list: []
-# Optional: A list of f-test strings containing contrasts. If you do not wish to run f-tests, leave this blank.
-f_tests: []
+ # Specify your contrasts here. For example, if two of your available contrasts are EV1 and EV0, you can enter contrast descriptions such as 'EV1 - EV0 = 0' or 'EV1 = 0'. Consult the User Guide for more information about describing contrasts. Alternatively, you can provide your own custom-written contrasts matrix in a CSV file in the 'Custom Contrasts Matrix' field below.
+ # contrasts: ['C(Diagnosis)[T.ADHD] - C(Diagnosis)[T.Typical] = 0', 'C(Diagnosis)[T.Typical] - C(Diagnosis)[T.ADHD] = 0']
+ contrasts: []
+ # Optional: A list of f-test strings containing contrasts. If you do not wish to run f-tests, leave this blank.
+ f_tests: []
-# Optional: Full path to a CSV file which specifies the contrasts you wish to run in group analysis. Consult the User Guide for proper formatting.
-# If you wish to use the standard contrast builder, leave this field blank. If you provide a path for this option, CPAC will use your custom contrasts matrix instead, and will use the f-tests described in this custom file only (ignoring those you have input in the f-tests field above).
-# If you wish to include f-tests, create a new column in your CSV file for each f-test named 'f_test_1', 'f_test_2', .. etc. Then, mark the contrasts you would like to include in each f-test with a 1, and mark the rest 0. Note that you must select at least two contrasts per f-test.
-custom_contrasts: None
-
+ # Optional: Full path to a CSV file which specifies the contrasts you wish to run in group analysis. Consult the User Guide for proper formatting.
+ # If you wish to use the standard contrast builder, leave this field blank. If you provide a path for this option, CPAC will use your custom contrasts matrix instead, and will use the f-tests described in this custom file only (ignoring those you have input in the f-tests field above).
+ # If you wish to include f-tests, create a new column in your CSV file for each f-test named 'f_test_1', 'f_test_2', .. etc. Then, mark the contrasts you would like to include in each f-test with a 1, and mark the rest 0. Note that you must select at least two contrasts per f-test.
+ custom_contrasts: None
# FSL-Randomise
-##############################################################################
-
-# Run Randomise
-run_randomise : [0]
-
+fsl_randomise:
-# Number of permutations you would like to use when building up the null distribution to test against.
-randomise_permutation : 500
+ # Run Randomise
+ run: [0]
+ # Number of permutations you would like to use when building up the null distribution to test against.
+ permutation: 500
-# Cluster-based thresholding corrected for multiple comparisons by using the null distribution of the max (across the image) cluster mask.
-randomise_thresh : 5
+ # Cluster-based thresholding corrected for multiple comparisons by using the null distribution of the max (across the image) cluster mask.
+ thresh: 5
+ # Demean data temporally before model fitting.
+ demean: True
-# Demean data temporally before model fitting.
-randomise_demean : True
-
-
-# From the FMRIB FSL-Randomise user guide: TFCE (Threshold-Free Cluster Enhancement) is a new method for finding 'clusters' in your data without having to define clusters in a binary way. Cluster-like structures are enhanced but the image remains fundamentally voxelwise.
-randomise_tfce : True
+ # From the FMRIB FSL-Randomise user guide: TFCE (Threshold-Free Cluster Enhancement) is a new method for finding 'clusters' in your data without having to define clusters in a binary way. Cluster-like structures are enhanced but the image remains fundamentally voxelwise.
+ tfce: True
# Bootstrap Analysis of Stable Clusters (BASC) - via PyBASC
-##############################################################################
-
-# Run Bootstrap Analysis of Stable Clusters
-run_basc : [0]
+basc:
+ # Run Bootstrap Analysis of Stable Clusters
+ run: [0]
-# If there are multiple series or scans in any of the pipeline outputs for which PyBASC is being run, and you only want to run for some of them, you can list them here - scan labels separated by commas (ex. 'rest_run-1, rest_run-3').
-# If nothing is listed, all available pipelines will be run.
-basc_scan_inclusion : None
+ # If there are multiple series or scans in any of the pipeline outputs for which PyBASC is being run, and you only want to run for some of them, you can list them here - scan labels separated by commas (ex. 'rest_run-1, rest_run-3').
+ # If nothing is listed, all available pipelines will be run.
+ scan_inclusion: None
+ # The resolution to run PyBASC with.
+ resolution: 4mm
-# The resolution to run PyBASC with.
-basc_resolution : 4mm
+ # Maximum amount of processors to use while performing BASC.
+ proc: 2
+ # Maximum amount of RAM (in GB) to be used when running BASC.
+ memory: 4
-# Maximum amount of processors to use while performing BASC.
-basc_proc : 2
+ # Standard FSL Skull Stripped Template.
+ template_brain_only_for_func: $FSLDIR/data/standard/MNI152_T1_${basc_resolution}_brain.nii.gz
+ # Full path to a mask file to be used when running BASC. Voxels outside this mask will be excluded from analysis. This is the region that you’d like to parcellate.
+ # If you do not wish to use a mask, set this field to None.
+ # Note: BASC is very computationally intensive, we strongly recommend you limit your analysis to specific brain areas of interest.
+ roi_mask_file: None
-# Maximum amount of RAM (in GB) to be used when running BASC.
-basc_memory : 4
+ # If cross clustering is enabled, then clustering of the first region will be calculated based on pairwise similarity between the timeseries of the ROI Mask File, and this second ROI.
+ cross_cluster_mask_file: None
+ # The metric used to compare similarity between voxel timeseries.
+ # Options: ['correlation', 'euclidean', 'cityblock', 'cosine']
+ similarity_metric_list: ['correlation']
-# Standard FSL Skull Stripped Template.
-template_brain_only_for_func : $FSLDIR/data/standard/MNI152_T1_${basc_resolution}_brain.nii.gz
+ # How many times individual level circular block bootstrapping of the timeseries will be applied.
+ timeseries_bootstrap_list: 100
+ # Number of bootstraps to apply to the original dataset.
+ dataset_bootstrap_list: 30
-# Full path to a mask file to be used when running BASC. Voxels outside this mask will be excluded from analysis. This is the region that you’d like to parcellate.
-# If you do not wish to use a mask, set this field to None.
-# Note: BASC is very computationally intensive, we strongly recommend you limit your analysis to specific brain areas of interest.
-basc_roi_mask_file : None
+ # Number of clusters to create during clustering at both the individual and group levels.
+ n_clusters_list: 2
+ # The similarity threshold at which the similarity matrices will be set to 0.
+ affinity_thresh: [0.0]
-# If cross clustering is enabled, then clustering of the first region will be calculated based on pairwise similarity between the timeseries of the ROI Mask File, and this second ROI.
-basc_cross_cluster_mask_file : None
+ # This is the amount of feature agglomeration that will be applied. Smaller values mean more feature agglomeration.
+ output_sizes: 800
+ # If set to true, then the ROI Mask file parcellation will be based on the similarity between ROI Mask file voxels based on their connectivity to each voxel in ROI mask file for cross-clustering.
+ cross_cluster: True
-# The metric used to compare similarity between voxel timeseries.
-# Options: ['correlation', 'euclidean', 'cityblock', 'cosine']
-basc_similarity_metric_list : ['correlation']
+ # This parameter determines the width of the time window used in the circular block bootstrap.
+ blocklength_list: 1
-
-# How many times individual level circular block bootstrapping of the timeseries will be applied.
-basc_timeseries_bootstrap_list : 100
-
-
-# Number of bootstraps to apply to the original dataset.
-basc_dataset_bootstrap_list : 30
-
-
-# Number of clusters to create during clustering at both the individual and group levels.
-basc_n_clusters_list : 2
-
-
-# The similarity threshold at which the similarity matrices will be set to 0.
-basc_affinity_thresh : [0.0]
-
-
-# This is the amount of feature agglomeration that will be applied. Smaller values mean more feature agglomeration.
-basc_output_sizes : 800
-
-
-# If set to true, then the ROI Mask file parcellation will be based on the similarity between ROI Mask file voxels based on their connectivity to each voxel in ROI mask file for cross-clustering.
-basc_cross_cluster : True
-
-
-# This parameter determines the width of the time window used in the circular block bootstrap.
-basc_blocklength_list : 1
-
-
-# If this is set to true, the all individuals will have feature agglomeration applied together, resulting in the same mapping across subjects. Use this only when memory demands limit ability to process ROIs with a high number of voxels.
-basc_group_dim_reduce : False
+ # If this is set to true, the all individuals will have feature agglomeration applied together, resulting in the same mapping across subjects. Use this only when memory demands limit ability to process ROIs with a high number of voxels.
+ group_dim_reduce: False
# Multivariate Distance Matrix Regression (MDMR)
-##############################################################################
-
-# Used to determine if Multivariate Distance Matrix Regression (MDMR) will be added to the pipeline or not.
-runMDMR : [0]
-
-
-# Inclusion list text file listing the participant IDs you wish to include in the MDMR analysis. If left as None, will include all subjects.
-mdmr_inclusion : None
-
-
-# Path to a mask file. Voxels outside of the mask will be excluded from MDMR.
-mdmr_roi_file :
-
+mdmr:
-# Path to a CSV file containing the phenotypic regressor.
-mdmr_regressor_file :
+ # Used to determine if Multivariate Distance Matrix Regression (MDMR) will be added to the pipeline or not.
+ run: [0]
+ # Inclusion list text file listing the participant IDs you wish to include in the MDMR analysis. If left as None, will include all subjects.
+ inclusion_list : None
-# Name of the participants column in your regressor file.
-mdmr_regressor_participant_column :
+ # Path to a mask file. Voxels outside of the mask will be excluded from MDMR.
+ roi_file: /path
+ # Path to a CSV file containing the phenotypic regressor.
+ regressor_file:
-# Columns from the CSV file indicating factor variables. Other columns will be handled as covariates. Separated by commas.
-mdmr_regressor_columns :
+ # Name of the participants column in your regressor file.
+ regressor_participant_column: ''
+ # Columns from the CSV file indicating factor variables. Other columns will be handled as covariates. Separated by commas.
+ regressor_columns: ''
-# Number of permutation tests to run on the Pseudo-F statistics.
-mdmr_permutations : 500
+ # Number of permutation tests to run on the Pseudo-F statistics.
+ permutations: 15000
+ # Number of Nipype nodes created while computing MDMR. Dependent upon computing resources.
+ parallel_nodes: 10
-# Number of Nipype nodes created while computing MDMR. Dependent upon computing resources.
-mdmr_parallel_nodes : 1
+ # If you want to create zstat maps
+ zscore: [1]
# Inter-Subject Correlation (ISC) & Inter-Subject Functional Correlation (ISFC)
-###############################################################################
-
-# Used to determine if Inter-subject Correlation (ISC) will be added to the pipeline or not.
-runISC : [0]
-
-
-# Used to determine if Inter-subject Functional Correlation (ISFC) will be added to the pipeline or not.
-runISFC : [0]
-
-
-# Used to determine if the ISC and ISFC will run in the ROI level.
-isc_level_roi : [0]
-
+isc_isfc:
-# Used to determine if the ISC and ISFC will run in the voxel level. Depending on the image resolution, it may take several hours and consume a great amount of available memory.
-isc_level_voxel : [0]
+ # Used to determine if Inter-subject Correlation (ISC) will be added to the pipeline or not.
+ runISC: [0]
+ # Used to determine if Inter-subject Functional Correlation (ISFC) will be added to the pipeline or not.
+ runISFC: [0]
-# Filter out voxels that, in the correlation distribution, is greater then the informed standard deviation. Zero value will disable the filter.
-isc_level_voxel_std_filter : 0.0
+ # Used to determine if the ISC and ISFC will run in the ROI level.
+ level_roi: [0]
+ # Used to determine if the ISC and ISFC will run in the voxel level. Depending on the image resolution, it may take several hours and consume a great amount of available memory.
+ level_voxel: [0]
-# Number of permutation tests to compute the statistics.
-isc_permutations : 1000
+ # Filter out voxels that, in the correlation distribution, is greater then the informed standard deviation. Zero value will disable the filter.
+ level_voxel_std_filter: 0.0
+ # Number of permutation tests to compute the statistics.
+ permutations: 1000
-# ROI/atlases to include in the analysis. For ROI-level ISC/ISFC runs.
-# This should be a list of names/strings of the ROI names used in individual-level analysis, if ROI timeseries extraction was performed.
-isc_roi_inclusion: [""]
+ # ROI/atlases to include in the analysis. For ROI-level ISC/ISFC runs.
+ # This should be a list of names/strings of the ROI names used in individual-level analysis, if ROI timeseries extraction was performed.
+ roi_inclusion: [""]
#Quasi Periodic Patterns (QPP)
-###############################
+qpp:
-# Run Quasi Periodic Pattern Analysis
-runQPP : [1]
+ # Run Quasi Periodic Pattern Analysis
+ run: [1]
+ scan_inclusion:
-qpp_scan_inclusion :
-qpp_session_inclusion :
-qpp_stratification :
+ session_inclusion:
+
+ stratification:
-qpp_permutations: 100
-qpp_window: 30
+ permutations: 100
+
+ window: 30
-qpp_initial_threshold: 0.2
-qpp_final_threshold: 0.3
-qpp_initial_threshold_iterations : 20
+ initial_threshold: 0.2
+
+ final_threshold: 0.3
+
+ initial_threshold_iterations : 20
-qpp_iterations : 15
+ qpp_iterations : 15
diff --git a/CPAC/resources/configs/pipeline_config_blank.yml b/CPAC/resources/configs/pipeline_config_blank.yml
index 1b47e0681a..6a86f9a83f 100644
--- a/CPAC/resources/configs/pipeline_config_blank.yml
+++ b/CPAC/resources/configs/pipeline_config_blank.yml
@@ -11,7 +11,7 @@ pipeline_setup:
# Name for this pipeline configuration - useful for identification.
pipeline_name: cpac-blank-template
-
+
output_directory:
# Directory where C-PAC should write out processed data, logs, and crash reports.
@@ -80,6 +80,7 @@ pipeline_setup:
path: /output/crash
system_config:
+ fail_fast: Off
# Select Off if you intend to run CPAC on a single machine.
# If set to On, CPAC will attempt to submit jobs through the job scheduler / resource manager selected below.
diff --git a/CPAC/resources/configs/pipeline_config_default.yml b/CPAC/resources/configs/pipeline_config_default.yml
index d149e40919..dc1f8062e8 100644
--- a/CPAC/resources/configs/pipeline_config_default.yml
+++ b/CPAC/resources/configs/pipeline_config_default.yml
@@ -10,6 +10,7 @@
pipeline_setup:
# Name for this pipeline configuration - useful for identification.
+ # This string will be sanitized and used in filepaths
pipeline_name: cpac-default-pipeline
output_directory:
@@ -78,12 +79,27 @@ pipeline_setup:
path: /outputs/logs
+ # Configuration options for logging visualizations of the workflow graph
+ graphviz:
+ # Configuration for a graphviz visualization of the entire workflow. See https://fcp-indi.github.io/docs/developer/nodes#CPAC.pipeline.nipype_pipeline_engine.Workflow.write_graph for details about the various options
+ entire_workflow:
+ # Whether to generate the graph visualization
+ generate: Off
+ # Options: [orig, hierarchical, flat, exec, colored]
+ graph2use: []
+ # Options: [svg, png]
+ format: []
+ # The node name will be displayed in the form `nodename (package)` when On or `nodename.Class.package` when Off
+ simple_form: On
+
crash_log_directory:
# Directory where CPAC should write crash logs.
path: /outputs/crash
system_config:
+ # Stop worklow execution on first crash?
+ fail_fast: Off
# Random seed used to fix the state of execution.
# If unset, each process uses its own default.
@@ -586,6 +602,16 @@ segmentation:
registration_workflows:
+ # Minimum QC values to allow a run to complete post-registration
+ # Set any metric empty (like "Dice:") or to None to disable that guardrail
+ # Default thresholds adopted from XCP-Engine
+ # (https://github.com/PennLINC/xcpEngine/blob/397ab6cf/designs/cbf_all.dsn#L66)
+ quality_thresholds:
+ Dice: 0.8
+ Jaccard: 0.9
+ CrossCorr: 0.7
+ Coverage: 0.8
+
anatomical_registration:
run: On
@@ -783,7 +809,8 @@ registration_workflows:
boundary_based_registration:
# this is a fork point
- # run: [On, Off] - this will run both and fork the pipeline
+ # run: [On, Off, fallback] - this will run both and fork the pipeline
+ # if 'fallback' is one of the selected options, BBR will run and, if its output fails quality_thresholds, the pipeline will fallback to BBR's input image
run: [On]
# Standard FSL 5.0 Scheduler used for Boundary Based Registration.
diff --git a/CPAC/resources/configs/pipeline_config_rbc-options.yml b/CPAC/resources/configs/pipeline_config_rbc-options.yml
index b8950b759c..b20f0c617a 100644
--- a/CPAC/resources/configs/pipeline_config_rbc-options.yml
+++ b/CPAC/resources/configs/pipeline_config_rbc-options.yml
@@ -7,7 +7,7 @@
#
# Tip: This file can be edited manually with a text editor for quick modifications.
-FROM: fx-options
+FROM: fmriprep-options
pipeline_setup:
# Name for this pipeline configuration - useful for identification.
@@ -15,6 +15,8 @@ pipeline_setup:
system_config:
+ fail_fast: On
+
# Random seed used to fix the state of execution.
# If unset, each process uses its own default.
# If set, a `random.log` file will be generated logging the random seed and each node to which that seed was applied.
@@ -33,24 +35,12 @@ pipeline_setup:
# This saves disk space, but any additional preprocessing or analysis will have to be completely re-run.
remove_working_dir: On
-anatomical_preproc:
- # N4 bias field correction via ANTs
- n4_bias_field_correction:
- # this is a fork option
- run:
- - On
-
registration_workflows:
anatomical_registration:
- T1w_brain_template: /usr/share/fsl/5.0/data/standard/MNI152_T1_${resolution_for_anat}_brain.nii.gz
- T1w_brain_template_mask: /usr/share/fsl/5.0/data/standard/MNI152_T1_${resolution_for_anat}_brain_mask.nii.gz
- T1w_template: /usr/share/fsl/5.0/data/standard/MNI152_T1_${resolution_for_anat}.nii.gz
-
+ T1w_brain_template: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain.nii.gz
+ T1w_brain_template_mask: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}_brain_mask.nii.gz
+ T1w_template: $FSLDIR/data/standard/MNI152_T1_${resolution_for_anat}.nii.gz
functional_registration:
- coregistration:
- func_input_prep:
- input: [fmriprep_reference]
-
func_registration_to_template:
# these options modify the application (to the functional data), not the calculation, of the
# T1-to-template and EPI-to-template transforms calculated earlier during registration
@@ -73,11 +63,10 @@ registration_workflows:
target_template:
T1_template:
- T1w_brain_template_funcreg: /usr/share/fsl/5.0/data/standard/MNI152_T1_${func_resolution}_brain.nii.gz
- T1w_brain_template_mask_funcreg: /usr/share/fsl/5.0/data/standard/MNI152_T1_${func_resolution}_brain_mask.nii.gz
- T1w_template_for_resample:
- T1w_template_funcreg: /usr/share/fsl/5.0/data/standard/MNI152_T1_${func_resolution}.nii.gz
-
+ T1w_brain_template_funcreg: $FSLDIR/data/standard/MNI152_T1_${func_resolution}_brain.nii.gz
+ T1w_brain_template_mask_funcreg: $FSLDIR/data/standard/MNI152_T1_${func_resolution}_brain_mask.nii.gz
+ T1w_template_for_resample: $FSLDIR/data/standard/MNI152_T1_${func_resolution}_brain.nii.gz
+ T1w_template_funcreg: $FSLDIR/data/standard/MNI152_T1_${func_resolution}.nii.gz
functional_preproc:
truncation:
@@ -94,10 +83,6 @@ functional_preproc:
run:
- On
- motion_estimates_and_correction:
- motion_correction:
- using: [3dvolreg]
-
distortion_correction:
using:
- PhaseDiff
@@ -186,7 +171,6 @@ nuisance_corrections:
# CSF erosion default is using millimeter erosion method when use erosion for CSF.
csf_mask_erosion_mm:
-
timeseries_extraction:
run: On
connectivity_matrix:
diff --git a/CPAC/resources/cpac_outputs.tsv b/CPAC/resources/cpac_outputs.tsv
index ac6c1e6cb5..989eef7fbf 100644
--- a/CPAC/resources/cpac_outputs.tsv
+++ b/CPAC/resources/cpac_outputs.tsv
@@ -218,3 +218,5 @@ space-template_label-GM_mask mask template anat NIfTI
space-EPItemplate_label-CSF_mask mask template func NIfTI
space-EPItemplate_label-WM_mask mask template func NIfTI
space-EPItemplate_label-GM_mask mask template func NIfTI
+mdmr group functional group_analysis NIfTI
+desc-zstd-mdmr group functional group_analysis NIfTI Yes
\ No newline at end of file
diff --git a/CPAC/sca/sca.py b/CPAC/sca/sca.py
index 64f6fb37db..cafddf3539 100644
--- a/CPAC/sca/sca.py
+++ b/CPAC/sca/sca.py
@@ -6,6 +6,7 @@
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
+from CPAC.registration.guardrails import guardrail_selection
from CPAC.sca.utils import *
from CPAC.utils.utils import extract_one_d
from CPAC.utils.datasource import resample_func_roi, \
@@ -505,6 +506,9 @@ def dual_regression(wf, cfg, strat_pool, pipe_num, opt=None):
'func_registration_to_template']['FNIRT_pipelines'][
'identity_matrix']
)
+ (spatial_to_native_nodes,
+ spatial_to_native_guardrails) = wf.nodes_and_guardrails(
+ resample_spatial_map_to_native_space_for_dr, registered='out_file')
spatial_map_dataflow_for_dr = create_spatial_map_dataflow(
cfg.seed_based_correlation_analysis['sca_atlases']['DualReg'],
@@ -528,18 +532,19 @@ def dual_regression(wf, cfg, strat_pool, pipe_num, opt=None):
"space-template_desc-motion_bold",
"space-template_desc-preproc_bold",
"space-template_bold"])
- wf.connect(node, out,
- resample_spatial_map_to_native_space_for_dr, 'reference')
wf.connect(node, out,
spatial_map_timeseries_for_dr, 'inputspec.subject_rest')
-
- wf.connect(spatial_map_dataflow_for_dr, 'select_spatial_map.out_file',
- resample_spatial_map_to_native_space_for_dr, 'in_file')
+ wf.connect_retries(spatial_to_native_nodes, [
+ (node, out, 'reference'),
+ (spatial_map_dataflow_for_dr, 'select_spatial_map.out_file', 'in_file')
+ ])
+ wf.connect_retries(spatial_to_native_guardrails, [
+ (node, out, 'reference')])
+ spatial_to_native = guardrail_selection(wf, *spatial_to_native_guardrails)
# connect it to the spatial_map_timeseries
- wf.connect(resample_spatial_map_to_native_space_for_dr, 'out_file',
- spatial_map_timeseries_for_dr, 'inputspec.spatial_map'
- )
+ wf.connect(spatial_to_native, 'out',
+ spatial_map_timeseries_for_dr, 'inputspec.spatial_map')
dr_temp_reg = create_temporal_reg(f'temporal_regression_{pipe_num}')
dr_temp_reg.inputs.inputspec.normalize = \
diff --git a/CPAC/seg_preproc/seg_preproc.py b/CPAC/seg_preproc/seg_preproc.py
index e157c52e23..10c4c23b3c 100644
--- a/CPAC/seg_preproc/seg_preproc.py
+++ b/CPAC/seg_preproc/seg_preproc.py
@@ -3,6 +3,7 @@
from CPAC.anat_preproc.utils import freesurfer_hemispheres, mri_convert
from CPAC.pipeline import nipype_pipeline_engine as pe
+from CPAC.registration.guardrails import guardrail_selection
from CPAC.registration.registration import apply_transform
from CPAC.registration.utils import (
check_transforms,
@@ -300,16 +301,19 @@ def tissue_mask_template_to_t1(wf_name, use_ants):
tissueprior_mni_to_t1.inputs.apply_xfm = True
tissueprior_mni_to_t1.inputs.interp = 'nearestneighbour'
- # mni to t1
- preproc.connect(inputNode, 'tissue_mask_template',
- tissueprior_mni_to_t1, 'in_file')
- preproc.connect(inputNode, 'brain', tissueprior_mni_to_t1,
- 'reference')
- preproc.connect(inputNode, 'standard2highres_mat',
- tissueprior_mni_to_t1, 'in_matrix_file')
+ mni_to_t1_nodes, mni_to_t1_guardrails = preproc.nodes_and_guardrails(
+ tissueprior_mni_to_t1, registered='out_file')
- preproc.connect(tissueprior_mni_to_t1, 'out_file',
- outputNode, 'segment_mask_temp2t1')
+ # mni to t1
+ preproc.connect_retries(mni_to_t1_nodes, [
+ (inputNode, 'tissue_mask_template', 'in_file'),
+ (inputNode, 'brain', 'reference'),
+ (inputNode, 'standard2highres_mat', 'in_matrix_file')])
+ preproc.connect_retries(mni_to_t1_guardrails, [
+ (inputNode, 'brain', 'reference')])
+ # pylint: disable=no-value-for-parameter
+ mni_to_t1 = guardrail_selection(preproc, *mni_to_t1_guardrails)
+ preproc.connect(mni_to_t1, 'out', outputNode, 'segment_mask_temp2t1')
return preproc
diff --git a/CPAC/timeseries/timeseries_analysis.py b/CPAC/timeseries/timeseries_analysis.py
index 415ea6fe09..187a0282aa 100644
--- a/CPAC/timeseries/timeseries_analysis.py
+++ b/CPAC/timeseries/timeseries_analysis.py
@@ -6,6 +6,7 @@
create_connectome_nilearn, \
get_connectome_method
from CPAC.pipeline import nipype_pipeline_engine as pe
+from CPAC.registration.guardrails import guardrail_selection
from CPAC.utils.datasource import create_roi_mask_dataflow, \
create_spatial_map_dataflow, \
resample_func_roi
@@ -1001,6 +1002,9 @@ def spatial_regression(wf, cfg, strat_pool, pipe_num, opt=None):
in_matrix_file=cfg.registration_workflows['functional_registration'][
'func_registration_to_template']['FNIRT_pipelines'][
'identity_matrix'])
+ (spatial_to_native_nodes,
+ spatial_to_native_guardrails) = wf.nodes_and_guardrails(
+ resample_spatial_map_to_native_space, registered='out_file')
spatial_map_dataflow = create_spatial_map_dataflow(
cfg.timeseries_extraction['tse_atlases']['SpatialReg'],
@@ -1022,14 +1026,18 @@ def spatial_regression(wf, cfg, strat_pool, pipe_num, opt=None):
# resample the input functional file and functional mask
# to spatial map
- wf.connect(node, out, resample_spatial_map_to_native_space, 'reference')
- wf.connect(spatial_map_dataflow, 'select_spatial_map.out_file',
- resample_spatial_map_to_native_space, 'in_file')
+ wf.connect_retries(spatial_to_native_nodes, [
+ (node, out, 'reference'),
+ (spatial_map_dataflow, 'select_spatial_map.out_file', 'in_file')])
+ wf.connect_retries(spatial_to_native_guardrails, [
+ (node, out, 'reference')])
+ # pylint: disable=no-value-for-parameter
+ spatial_to_native = guardrail_selection(wf, *spatial_to_native_guardrails)
wf.connect(node, out, spatial_map_timeseries, 'inputspec.subject_rest')
# connect it to the spatial_map_timeseries
- wf.connect(resample_spatial_map_to_native_space, 'out_file',
+ wf.connect(spatial_to_native, 'out',
spatial_map_timeseries, 'inputspec.spatial_map')
node, out = strat_pool.get_data('space-template_desc-bold_mask')
diff --git a/CPAC/utils/__init__.py b/CPAC/utils/__init__.py
index 9d4106cbb2..b13af927fe 100644
--- a/CPAC/utils/__init__.py
+++ b/CPAC/utils/__init__.py
@@ -4,16 +4,8 @@
from . import build_data_config
from .interfaces import function, masktool
from .extract_data import run
-from .datasource import create_anat_datasource
-from .datasource import create_func_datasource
-from .datasource import create_fmap_datasource
-from .datasource import create_roi_mask_dataflow
-from .datasource import create_grp_analysis_dataflow
-from .datasource import create_spatial_map_dataflow
from .datatypes import ListFromItem
-from .configuration import Configuration
-from .strategy import Strategy
-from .outputs import Outputs
+from .configuration import check_pname, Configuration, set_subject
from .utils import (
get_zscore,
@@ -43,6 +35,5 @@
repickle,
)
-__all__ = [
- 'function', 'ListFromItem'
-]
+__all__ = ['check_pname', 'Configuration', 'function', 'ListFromItem',
+ 'set_subject']
diff --git a/CPAC/utils/bids_utils.py b/CPAC/utils/bids_utils.py
index 5e91585228..ad7be177d9 100755
--- a/CPAC/utils/bids_utils.py
+++ b/CPAC/utils/bids_utils.py
@@ -702,7 +702,7 @@ def collect_bids_files_configs(bids_dir, aws_input_creds=''):
file_paths = []
config_dict = {}
- suffixes = ['T1w', 'T2w', 'bold', 'acq-fMRI_epi', 'phasediff', 'magnitude',
+ suffixes = ['T1w', 'T2w', 'bold', 'epi', 'phasediff', 'magnitude',
'magnitude1', 'magnitude2']
if bids_dir.lower().startswith("s3://"):
@@ -724,6 +724,8 @@ def collect_bids_files_configs(bids_dir, aws_input_creds=''):
for s3_obj in bucket.objects.filter(Prefix=prefix):
for suf in suffixes:
if suf in str(s3_obj.key):
+ if suf == 'epi' and 'acq-fMRI' not in s3_obj.key:
+ continue
if str(s3_obj.key).endswith("json"):
try:
config_dict[s3_obj.key.replace(prefix, "")
@@ -743,6 +745,8 @@ def collect_bids_files_configs(bids_dir, aws_input_creds=''):
if files:
for f in files:
for suf in suffixes:
+ if suf == 'epi' and 'acq-fMRI' not in f:
+ continue
if 'nii' in f and suf in f:
file_paths += [os.path.join(root, f)
.replace(bids_dir, '').lstrip('/')]
diff --git a/CPAC/utils/build_data_config.py b/CPAC/utils/build_data_config.py
index c21ae8ba26..684562c149 100644
--- a/CPAC/utils/build_data_config.py
+++ b/CPAC/utils/build_data_config.py
@@ -564,11 +564,11 @@ def get_BIDS_data_dct(bids_base_dir, file_list=None, anat_scan=None,
fmap_pedir_sess = os.path.join(bids_base_dir,
"sub-{participant}/ses-{session}/fmap/"
"sub-{participant}_ses-{session}/"
- "dir-*_acq-fMRI_epi.nii.gz")
+ "*acq-fMR*_epi.nii.gz")
fmap_pedir = os.path.join(bids_base_dir,
"sub-{participant}/fmap/sub-{participant}"
- "_dir-*_acq-fMRI_epi.nii.gz")
+ "*acq-fMR*_epi.nii.gz")
sess_glob = os.path.join(bids_base_dir, "sub-*/ses-*/*")
@@ -582,7 +582,7 @@ def get_BIDS_data_dct(bids_base_dir, file_list=None, anat_scan=None,
fmap_pedir_scan_glob = os.path.join(bids_base_dir,
"sub-*fmap/"
- "sub-*_dir-*_acq-fMRI_epi.nii.gz")
+ "sub-*_*acq-fMR*_epi.nii.gz")
part_tsv_glob = os.path.join(bids_base_dir, "*participants.tsv")
@@ -648,7 +648,7 @@ def get_BIDS_data_dct(bids_base_dir, file_list=None, anat_scan=None,
fmap_mag = os.path.join(bids_base_dir,
"sub-{participant}/fmap/sub-{participant}"
"*magnitud*.nii.gz")
-
+
'''
if fnmatch.fnmatch(filepath, fmap_pedir_scan_glob):
# check if there is a scan level for the fmap magnitude files
diff --git a/CPAC/utils/configuration/__init__.py b/CPAC/utils/configuration/__init__.py
index afed809515..58a524d89d 100644
--- a/CPAC/utils/configuration/__init__.py
+++ b/CPAC/utils/configuration/__init__.py
@@ -16,9 +16,9 @@
You should have received a copy of the GNU Lesser General Public
License along with C-PAC. If not, see ."""
-from .configuration import Configuration, DEFAULT_PIPELINE_FILE, \
- Preconfiguration
+from .configuration import check_pname, Configuration, DEFAULT_PIPELINE_FILE, \
+ Preconfiguration, set_subject
from . import configuration, diff
-__all__ = ['Configuration', 'configuration', 'DEFAULT_PIPELINE_FILE', 'diff',
- 'Preconfiguration']
+__all__ = ['check_pname', 'Configuration', 'configuration',
+ 'DEFAULT_PIPELINE_FILE', 'diff', 'Preconfiguration', 'set_subject']
diff --git a/CPAC/utils/configuration/configuration.py b/CPAC/utils/configuration/configuration.py
index 108638cb8d..c0312d4a45 100644
--- a/CPAC/utils/configuration/configuration.py
+++ b/CPAC/utils/configuration/configuration.py
@@ -19,12 +19,11 @@
import re
import os
import warnings
-
from itertools import repeat
+from typing import Optional, Tuple
from warnings import warn
-
import yaml
-
+from CPAC.qc import update_thresholds
from CPAC.utils.utils import load_preconfig
from .diff import dct_diff
@@ -152,6 +151,9 @@ def __init__(self, config_map=None):
# set attribute
setattr(self, key, set_from_ENV(config_map[key]))
+ # set global QC thresholds
+ update_thresholds(self['registration_workflows', 'quality_thresholds'])
+
self.__update_attr()
def __str__(self):
@@ -357,6 +359,43 @@ def key_type_error(self, key):
]))
+def check_pname(p_name: str, pipe_config: Configuration) -> str:
+ '''Function to check / set `p_name`, the string representation of a
+ pipeline for use in filetrees
+
+ Parameters
+ ----------
+ p_name : str or None
+
+ pipe_config : Configuration
+
+ Returns
+ -------
+ p_name
+
+ Examples
+ --------
+ >>> c = Configuration()
+ >>> check_pname(None, c)
+ 'pipeline_cpac-default-pipeline'
+ >>> check_pname('cpac-default-pipeline', c)
+ 'pipeline_cpac-default-pipeline'
+ >>> check_pname('pipeline_cpac-default-pipeline', c)
+ 'pipeline_cpac-default-pipeline'
+ >>> check_pname('different-name', Configuration())
+ 'pipeline_different-name'
+ >>> p_name = check_pname(None, Preconfiguration('blank'))
+ Loading the 'blank' pre-configured pipeline.
+ >>> p_name
+ 'pipeline_cpac-blank-template'
+ '''
+ if p_name is None:
+ p_name = f'pipeline_{pipe_config["pipeline_setup", "pipeline_name"]}'
+ elif not p_name.startswith('pipeline_'):
+ p_name = f'pipeline_{p_name}'
+ return p_name
+
+
def collect_key_list(config_dict):
'''Function to return a list of lists of keys for a nested dictionary
@@ -454,3 +493,55 @@ def set_from_ENV(conf): # pylint: disable=invalid-name
conf = re.sub(
_pattern, os.environ.get(_match, f'${_match}'), conf)
return conf
+
+
+def set_subject(sub_dict: dict, pipe_config: 'Configuration',
+ p_name: Optional[str] = None) -> Tuple[str, str, str]:
+ '''Function to set pipeline name and log directory path for a given
+ sub_dict
+
+ Parameters
+ ----------
+ sub_dict : dict
+
+ pipe_config : CPAC.utils.configuration.Configuration
+
+ p_name : str, optional
+ pipeline name string
+
+ Returns
+ -------
+ subject_id : str
+
+ p_name : str
+ pipeline name string
+
+ log_dir : str
+ path to subject log directory
+
+ Examples
+ --------
+ >>> from tempfile import TemporaryDirectory
+ >>> from CPAC.utils.configuration import Configuration
+ >>> sub_dict = {'site_id': 'site1', 'subject_id': 'sub1',
+ ... 'unique_id': 'uid1'}
+ >>> with TemporaryDirectory() as tmpdir:
+ ... subject_id, p_name, log_dir = set_subject(
+ ... sub_dict, Configuration({'pipeline_setup': {'log_directory':
+ ... {'path': tmpdir}}}))
+ >>> subject_id
+ 'sub1_uid1'
+ >>> p_name
+ 'pipeline_cpac-default-pipeline'
+ >>> log_dir.endswith(f'{p_name}/{subject_id}')
+ True
+ '''
+ subject_id = sub_dict['subject_id']
+ if sub_dict.get('unique_id'):
+ subject_id += f'_{sub_dict["unique_id"]}'
+ p_name = check_pname(p_name, pipe_config)
+ log_dir = os.path.join(pipe_config.pipeline_setup['log_directory']['path'],
+ p_name, subject_id)
+ if not os.path.exists(log_dir):
+ os.makedirs(os.path.join(log_dir))
+ return subject_id, p_name, log_dir
diff --git a/CPAC/utils/datasource.py b/CPAC/utils/datasource.py
index bb660d1ab7..5b9271922b 100644
--- a/CPAC/utils/datasource.py
+++ b/CPAC/utils/datasource.py
@@ -1,11 +1,26 @@
+# Copyright (C) 2012-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
import csv
import json
-import nipype.interfaces.utility as util
+import re
+from typing import Optional, Tuple
from nipype import logging
-# pylint: disable=ungrouped-imports, wrong-import-order
+from nipype.interfaces import utility as util
from CPAC.pipeline import nipype_pipeline_engine as pe
-import nipype.interfaces.afni as afni
-
from CPAC.utils import function
from CPAC.utils.interfaces.function import Function
from CPAC.utils.utils import get_scan_params
@@ -327,20 +342,46 @@ def create_fmap_datasource(fmap_dct, wf_name='fmap_datasource'):
def get_fmap_phasediff_metadata(data_config_scan_params):
- if not isinstance(data_config_scan_params, dict) and \
- ".json" in data_config_scan_params:
- with open(data_config_scan_params, 'r') as f:
- data_config_scan_params = json.load(f)
+ if (not isinstance(data_config_scan_params, dict) and
+ ".json" in data_config_scan_params):
+ with open(data_config_scan_params, 'r', encoding='utf-8') as _f:
+ data_config_scan_params = json.load(_f)
echo_time = data_config_scan_params.get("EchoTime")
dwell_time = data_config_scan_params.get("DwellTime")
pe_direction = data_config_scan_params.get("PhaseEncodingDirection")
+ total_readout = data_config_scan_params.get("TotalReadoutTime")
+
+ return (echo_time, dwell_time, pe_direction, total_readout)
+
+
+def calc_delta_te_and_asym_ratio(effective_echo_spacing: float,
+ echo_time_one: float, echo_time_two: float,
+ echo_time_three: Optional[float] = None
+ ) -> Tuple[float, float]:
+ """Calcluate ``deltaTE`` and ``ees_asym_ratio`` from given metadata
+
+ Parameters
+ ----------
+ effective_echo_spacing : float
+ EffectiveEchoSpacing from sidecar JSON
- return (echo_time, dwell_time, pe_direction)
+ echo_time_one : float
+ echo_time_two : float
-def calc_deltaTE_and_asym_ratio(dwell_time, echo_time_one, echo_time_two,
- echo_time_three=None):
+ echo_time_three : float, optional
+
+ Returns
+ -------
+ deltaTE : float
+
+ ees_asym_ratio : float
+ """
+ if not isinstance(effective_echo_spacing, float):
+ raise LookupError('C-PAC could not find `EffectiveEchoSpacing` in '
+ 'either fmap or func sidecar JSON, but that field '
+ 'is required for PhaseDiff distortion correction.')
echo_times = [echo_time_one, echo_time_two]
if echo_time_three:
# get only the two different ones
@@ -354,9 +395,8 @@ def calc_deltaTE_and_asym_ratio(dwell_time, echo_time_one, echo_time_two,
echo_times[1] = echo_times[1] * 1000
deltaTE = abs(echo_times[0] - echo_times[1])
- dwell_asym_ratio = (dwell_time / deltaTE)
-
- return (deltaTE, dwell_asym_ratio)
+ ees_asym_ratio = (effective_echo_spacing / deltaTE)
+ return deltaTE, ees_asym_ratio
def match_epi_fmaps(bold_pedir, epi_fmap_one, epi_fmap_params_one,
@@ -405,24 +445,24 @@ def match_epi_fmaps(bold_pedir, epi_fmap_one, epi_fmap_params_one,
def ingress_func_metadata(wf, cfg, rpool, sub_dict, subject_id,
- input_creds_path, unique_id=None):
+ input_creds_path, unique_id=None, num_strat=None):
+ name_suffix = ''
+ for suffix_part in (unique_id, num_strat):
+ if suffix_part is not None:
+ name_suffix += f'_{suffix_part}'
# Grab field maps
diff = False
blip = False
fmap_rp_list = []
fmap_TE_list = []
-
if "fmap" in sub_dict:
second = False
for key in sub_dict["fmap"]:
- gather_fmap = create_fmap_datasource(sub_dict["fmap"],
- f"fmap_gather_{key}_"
- f"{subject_id}")
+ gather_fmap = create_fmap_datasource(
+ sub_dict["fmap"], f"fmap_gather_{key}_{subject_id}")
gather_fmap.inputs.inputnode.set(
- subject=subject_id,
- creds_path=input_creds_path,
- dl_dir=cfg.pipeline_setup['working_directory']['path']
- )
+ subject=subject_id, creds_path=input_creds_path,
+ dl_dir=cfg.pipeline_setup['working_directory']['path'])
gather_fmap.inputs.inputnode.scan = key
orig_key = key
@@ -445,10 +485,11 @@ def ingress_func_metadata(wf, cfg, rpool, sub_dict, subject_id,
input_names=['data_config_scan_params'],
output_names=['echo_time',
'dwell_time',
- 'pe_direction'],
+ 'pe_direction',
+ 'total_readout'],
function=get_fmap_phasediff_metadata,
imports=get_fmap_metadata_imports),
- name=f'{key}_get_metadata')
+ name=f'{key}_get_metadata{name_suffix}')
wf.connect(gather_fmap, 'outputspec.scan_params',
get_fmap_metadata, 'data_config_scan_params')
@@ -459,33 +500,32 @@ def ingress_func_metadata(wf, cfg, rpool, sub_dict, subject_id,
'dwell_time', {}, "", "fmap_dwell_ingress")
rpool.set_data(f'{key}-pedir', get_fmap_metadata,
'pe_direction', {}, "", "fmap_pedir_ingress")
+ rpool.set_data(f'{key}-total-readout', get_fmap_metadata,
+ 'total_readout', {}, "", "fmap_readout_ingress")
fmap_TE_list.append(f"{key}-TE")
- keywords = ['diffphase', 'diffmag']
- if key in keywords:
+ if re.search('diff.*(phase|mag)', key):
diff = True
- if orig_key == "epi_AP" or orig_key == "epi_PA":
+ if re.match('epi_[AP]{2}', orig_key):
blip = True
if diff:
calc_delta_ratio = pe.Node(Function(
- input_names=['dwell_time',
+ input_names=['effective_echo_spacing',
'echo_time_one',
'echo_time_two',
'echo_time_three'],
output_names=['deltaTE',
- 'dwell_asym_ratio'],
- function=calc_deltaTE_and_asym_ratio),
- name='diff_distcor_calc_delta')
-
- node, out_file = rpool.get('diffphase-dwell')[
- "['diffphase-dwell:fmap_dwell_ingress']"]['data'] # <--- there will only be one pipe_idx
- wf.connect(node, out_file, calc_delta_ratio, 'dwell_time')
+ 'ees_asym_ratio'],
+ function=calc_delta_te_and_asym_ratio,
+ imports=['from typing import Optional, Tuple']),
+ name=f'diff_distcor_calc_delta{name_suffix}')
node, out_file = rpool.get(f'{fmap_TE_list[0]}')[
- f"['{fmap_TE_list[0]}:fmap_TE_ingress']"]['data']
+ f"['{fmap_TE_list[0]}:fmap_TE_ingress']"
+ ]['data'] # <--- there will only be one pipe_idx
wf.connect(node, out_file, calc_delta_ratio, 'echo_time_one')
node, out_file = rpool.get(f'{fmap_TE_list[1]}')[
@@ -498,40 +538,31 @@ def ingress_func_metadata(wf, cfg, rpool, sub_dict, subject_id,
wf.connect(node, out_file,
calc_delta_ratio, 'echo_time_three')
- rpool.set_data('deltaTE', calc_delta_ratio, 'deltaTE', {}, "",
- "deltaTE_ingress")
- rpool.set_data('dwell-asym-ratio',
- calc_delta_ratio, 'dwell_asym_ratio', {}, "",
- "dwell_asym_ratio_ingress")
-
# Add in nodes to get parameters from configuration file
# a node which checks if scan_parameters are present for each scan
- scan_params_imports = ['from CPAC.utils.utils import check, '
- 'try_fetch_parameter']
- scan_params = \
- pe.Node(Function(
- input_names=['data_config_scan_params',
- 'subject_id',
- 'scan',
- 'pipeconfig_tr',
- 'pipeconfig_tpattern',
- 'pipeconfig_start_indx',
- 'pipeconfig_stop_indx'],
- output_names=['tr',
- 'tpattern',
- 'ref_slice',
- 'start_indx',
- 'stop_indx',
- 'pe_direction'],
- function=get_scan_params,
- imports=scan_params_imports
- ), name=f"bold_scan_params_{subject_id}_{unique_id}")
+ scan_params = pe.Node(Function(
+ input_names=['data_config_scan_params',
+ 'subject_id',
+ 'scan',
+ 'pipeconfig_tr',
+ 'pipeconfig_tpattern',
+ 'pipeconfig_start_indx',
+ 'pipeconfig_stop_indx'],
+ output_names=['tr',
+ 'tpattern',
+ 'ref_slice',
+ 'start_indx',
+ 'stop_indx',
+ 'pe_direction',
+ 'effective_echo_spacing'],
+ function=get_scan_params,
+ imports=['from CPAC.utils.utils import check, try_fetch_parameter']
+ ), name=f"bold_scan_params_{subject_id}{name_suffix}")
scan_params.inputs.subject_id = subject_id
scan_params.inputs.set(
pipeconfig_start_indx=cfg.functional_preproc['truncation'][
'start_tr'],
- pipeconfig_stop_indx=cfg.functional_preproc['truncation']['stop_tr']
- )
+ pipeconfig_stop_indx=cfg.functional_preproc['truncation']['stop_tr'])
# wire in the scan parameter workflow
node, out = rpool.get('scan-params')[
@@ -551,7 +582,21 @@ def ingress_func_metadata(wf, cfg, rpool, sub_dict, subject_id,
rpool.set_data('pe-direction', scan_params, 'pe_direction', {}, "",
"func_metadata_ingress")
- return (wf, rpool, diff, blip, fmap_rp_list)
+ if diff:
+ # Connect EffectiveEchoSpacing from functional metadata
+ rpool.set_data('effectiveEchoSpacing', scan_params,
+ 'effective_echo_spacing', {}, '',
+ 'func_metadata_ingress')
+ node, out_file = rpool.get('effectiveEchoSpacing')[
+ "['effectiveEchoSpacing:func_metadata_ingress']"]['data']
+ wf.connect(node, out_file, calc_delta_ratio, 'effective_echo_spacing')
+ rpool.set_data('deltaTE', calc_delta_ratio, 'deltaTE', {}, '',
+ 'deltaTE_ingress')
+ rpool.set_data('ees-asym-ratio', calc_delta_ratio,
+ 'ees_asym_ratio', {}, '',
+ 'ees_asym_ratio_ingress')
+
+ return wf, rpool, diff, blip, fmap_rp_list
def create_general_datasource(wf_name):
@@ -836,7 +881,7 @@ def res_string_to_tuple(resolution):
def resolve_resolution(resolution, template, template_name, tag=None):
- import nipype.interfaces.afni as afni
+ from nipype.interfaces import afni
from CPAC.pipeline import nipype_pipeline_engine as pe
from CPAC.utils.datasource import check_for_s3
diff --git a/CPAC/utils/docs.py b/CPAC/utils/docs.py
index b1ee23df0b..181df9aa98 100644
--- a/CPAC/utils/docs.py
+++ b/CPAC/utils/docs.py
@@ -71,4 +71,41 @@ def grab_docstring_dct(fn):
return dct
+def retry_docstring(orig):
+ """Decorator to autodocument retries.
+
+ Examples
+ --------
+ >>> @retry_docstring(grab_docstring_dct)
+ ... def do_nothing():
+ ... '''Does this do anything?'''
+ ... pass
+ >>> print(do_nothing.__doc__)
+ Does this do anything?
+ Retries the following after a failed QC check:
+ Function to grab a NodeBlock dictionary from a docstring.
+
+ Parameters
+ ----------
+ fn : function
+ The NodeBlock function with the docstring to be parsed.
+
+ Returns
+ -------
+ dct : dict
+ A NodeBlock configuration dictionary.
+
+ """
+ def retry(obj):
+ if obj.__doc__ is None:
+ obj.__doc__ = ''
+ origdoc = (f'{orig.__module__}.{orig.__name__}' if
+ orig.__doc__ is None else orig.__doc__)
+ obj.__doc__ = '\n'.join([
+ obj.__doc__, 'Retries the following after a failed QC check:',
+ origdoc])
+ return obj
+ return retry
+
+
DOCS_URL_PREFIX = _docs_url_prefix()
diff --git a/CPAC/utils/strategy.py b/CPAC/utils/strategy.py
index 8eab5caf19..b176b06073 100644
--- a/CPAC/utils/strategy.py
+++ b/CPAC/utils/strategy.py
@@ -1,15 +1,29 @@
-import os
-import six
-import warnings
-import logging
+# Copyright (C) 2018-2022 C-PAC Developers
+
+# This file is part of C-PAC.
+
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
+import logging
+import six
+from CPAC.pipeline.engine import ResourcePool
logger = logging.getLogger('nipype.workflow')
-class Strategy(object):
+class Strategy:
def __init__(self):
- self.resource_pool = {}
+ self._resource_pool = ResourcePool({})
self.leaf_node = None
self.leaf_out_file = None
self.name = []
@@ -42,6 +56,16 @@ def get_node_from_resource_pool(self, resource_key):
logger.error('No node for output: %s', resource_key)
raise
+ @property
+ def resource_pool(self):
+ '''Strategy's ResourcePool dict'''
+ return self._resource_pool.get_entire_rpool()
+
+ @property
+ def rpool(self):
+ '''Strategy's ResourcePool'''
+ return self._resource_pool
+
def update_resource_pool(self, resources, override=False):
for key, value in resources.items():
if key in self.resource_pool and not override:
diff --git a/CPAC/utils/test_mocks.py b/CPAC/utils/test_mocks.py
index e76ea96750..2c88a10a2f 100644
--- a/CPAC/utils/test_mocks.py
+++ b/CPAC/utils/test_mocks.py
@@ -1,9 +1,10 @@
import os
+from nipype.interfaces import utility as util
from CPAC.pipeline import nipype_pipeline_engine as pe
-import nipype.interfaces.utility as util
-from CPAC.utils import Configuration, Strategy
-from CPAC.utils.interfaces.function import Function
+from CPAC.utils.configuration import Configuration
from CPAC.utils.datasource import resolve_resolution
+from CPAC.utils.interfaces.function import Function
+from CPAC.utils.strategy import Strategy
def file_node(path, file_node_num=0):
diff --git a/CPAC/utils/utils.py b/CPAC/utils/utils.py
index 5a8f65ac0e..fffc272a48 100644
--- a/CPAC/utils/utils.py
+++ b/CPAC/utils/utils.py
@@ -1,19 +1,19 @@
-"""Copyright (C) 2022 C-PAC Developers
+# Copyright (C) 2012-2022 C-PAC Developers
-This file is part of C-PAC.
+# This file is part of C-PAC.
-C-PAC is free software: you can redistribute it and/or modify it under
-the terms of the GNU Lesser General Public License as published by the
-Free Software Foundation, either version 3 of the License, or (at your
-option) any later version.
+# C-PAC is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
-C-PAC is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
-License for more details.
+# C-PAC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+# License for more details.
-You should have received a copy of the GNU Lesser General Public
-License along with C-PAC. If not, see ."""
+# You should have received a copy of the GNU Lesser General Public
+# License along with C-PAC. If not, see .
import os
import collections.abc
import fnmatch
@@ -36,6 +36,8 @@
CONFIGS_DIR, '1.7-1.8-nesting-mappings.yml'), 'r'))
NESTED_CONFIG_DEPRECATIONS = yaml.safe_load(open(os.path.join(
CONFIGS_DIR, '1.7-1.8-deprecations.yml'), 'r'))
+YAML_BOOLS = {True: ('on', 't', 'true', 'y', 'yes'),
+ False: ('f', 'false', 'n', 'no', 'off')}
def get_last_prov_entry(prov):
@@ -715,6 +717,8 @@ def get_scan_params(subject_id, scan, pipeconfig_start_indx,
starting TR or starting volume index
last_tr : an integer
ending TR or ending volume index
+ pe_direction : str
+ effective_echo_spacing : float
"""
import os
@@ -733,6 +737,7 @@ def get_scan_params(subject_id, scan, pipeconfig_start_indx,
last_tr = ''
unit = 's'
pe_direction = ''
+ effective_echo_spacing = None
if isinstance(pipeconfig_stop_indx, str):
if "End" in pipeconfig_stop_indx or "end" in pipeconfig_stop_indx:
@@ -755,16 +760,20 @@ def get_scan_params(subject_id, scan, pipeconfig_start_indx,
# TODO: better handling of errant key values!!!
if "RepetitionTime" in params_dct.keys():
TR = float(check(params_dct, subject_id, scan,
- 'RepetitionTime', False))
+ "RepetitionTime", False))
if "SliceTiming" in params_dct.keys():
pattern = str(check(params_dct, subject_id, scan,
- 'SliceTiming', False))
+ "SliceTiming", False))
elif "SliceAcquisitionOrder" in params_dct.keys():
pattern = str(check(params_dct, subject_id, scan,
- 'SliceAcquisitionOrder', False))
+ "SliceAcquisitionOrder", False))
if "PhaseEncodingDirection" in params_dct.keys():
pe_direction = str(check(params_dct, subject_id, scan,
- 'PhaseEncodingDirection', False))
+ "PhaseEncodingDirection", False))
+ if "EffectiveEchoSpacing" in params_dct.keys():
+ effective_echo_spacing = float(
+ check(params_dct, subject_id, scan,
+ "EffectiveEchoSpacing", False))
elif len(data_config_scan_params) > 0 and \
isinstance(data_config_scan_params, dict):
@@ -808,16 +817,20 @@ def get_scan_params(subject_id, scan, pipeconfig_start_indx,
pe_direction = check(params_dct, subject_id, scan,
'PhaseEncodingDirection', False)
+ effective_echo_spacing = float(
+ try_fetch_parameter(params_dct, subject_id, scan,
+ ["EffectiveEchoSpacing"]))
+
else:
err = "\n\n[!] Could not read the format of the scan parameters " \
"information included in the data configuration file for " \
- "the participant {0}.\n\n".format(subject_id)
+ f"the participant {subject_id}.\n\n"
raise Exception(err)
- if first_tr == '' or first_tr == None:
+ if first_tr == '' or first_tr is None:
first_tr = pipeconfig_start_indx
- if last_tr == '' or last_tr == None:
+ if last_tr == '' or last_tr is None:
last_tr = pipeconfig_stop_indx
unit = 's'
@@ -920,14 +933,13 @@ def get_scan_params(subject_id, scan, pipeconfig_start_indx,
start_indx = first_tr
stop_indx = last_tr
- return (
- tr if tr else None,
- tpattern if tpattern else None,
- ref_slice,
- start_indx,
- stop_indx,
- pe_direction
- )
+ return (tr if tr else None,
+ tpattern if tpattern else None,
+ ref_slice,
+ start_indx,
+ stop_indx,
+ pe_direction,
+ effective_echo_spacing)
def get_tr(tr):
@@ -1608,6 +1620,141 @@ def _pickle2(p, z=False):
return False
+def _changes_1_8_0_to_1_8_1(config_dict):
+ '''
+ Examples
+ --------
+ Starting with 1.8.0
+ >>> zero = {'anatomical_preproc': {
+ ... 'non_local_means_filtering': True,
+ ... 'n4_bias_field_correction': True
+ ... }, 'functional_preproc': {
+ ... 'motion_estimates_and_correction': {
+ ... 'calculate_motion_first': False
+ ... }
+ ... }, 'segmentation': {
+ ... 'tissue_segmentation': {
+ ... 'ANTs_Prior_Based': {
+ ... 'CSF_label': 0,
+ ... 'left_GM_label': 1,
+ ... 'right_GM_label': 2,
+ ... 'left_WM_label': 3,
+ ... 'right_WM_label': 4}}}}
+ >>> updated_apb = _changes_1_8_0_to_1_8_1(zero)[
+ ... 'segmentation']['tissue_segmentation']['ANTs_Prior_Based']
+ >>> updated_apb['CSF_label']
+ [0]
+ >>> updated_apb['GM_label']
+ [1, 2]
+ >>> updated_apb['WM_label']
+ [3, 4]
+
+ Starting with 1.8.1
+ >>> one = {'anatomical_preproc': {
+ ... 'non_local_means_filtering': True,
+ ... 'n4_bias_field_correction': True
+ ... }, 'functional_preproc': {
+ ... 'motion_estimates_and_correction': {
+ ... 'calculate_motion_first': False
+ ... }
+ ... }, 'segmentation': {
+ ... 'tissue_segmentation': {
+ ... 'ANTs_Prior_Based': {
+ ... 'CSF_label': [0],
+ ... 'GM_label': [1, 2],
+ ... 'WM_label': [3, 4]}}}}
+ >>> updated_apb = _changes_1_8_0_to_1_8_1(one)[
+ ... 'segmentation']['tissue_segmentation']['ANTs_Prior_Based']
+ >>> updated_apb['CSF_label']
+ [0]
+ >>> updated_apb['GM_label']
+ [1, 2]
+ >>> updated_apb['WM_label']
+ [3, 4]
+ '''
+ for key_sequence in {
+ ('anatomical_preproc', 'non_local_means_filtering'),
+ ('anatomical_preproc', 'n4_bias_field_correction')
+ }:
+ config_dict = _now_runswitch(config_dict, key_sequence)
+ for combiners in {
+ ((
+ ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'CSF_label'),
+ ), ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'CSF_label')),
+ ((
+ ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'left_GM_label'),
+ ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'right_GM_label')
+ ), ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'GM_label')),
+ ((
+ ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'left_WM_label'),
+ ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'right_WM_label')
+ ), ('segmentation', 'tissue_segmentation', 'ANTs_Prior_Based',
+ 'WM_label'))
+ }:
+ config_dict = _combine_labels(config_dict, *combiners)
+ try:
+ calculate_motion_first = lookup_nested_value(
+ config_dict,
+ ['functional_preproc', 'motion_estimates_and_correction',
+ 'calculate_motion_first']
+ )
+ except KeyError:
+ calculate_motion_first = None
+ if calculate_motion_first is not None:
+ del config_dict['functional_preproc'][
+ 'motion_estimates_and_correction']['calculate_motion_first']
+ config_dict = set_nested_value(config_dict, [
+ 'functional_preproc', 'motion_estimates_and_correction',
+ 'motion_estimates', 'calculate_motion_first'
+ ], calculate_motion_first)
+
+ return config_dict
+
+
+def _combine_labels(config_dict, list_to_combine, new_key):
+ '''
+ Helper function to combine formerly separate keys into a
+ combined key.
+
+ Parameters
+ ----------
+ config_dict: dict
+
+ key_sequence: iterable of lists or tuples
+
+ new_key: list or tuple
+
+ Returns
+ -------
+ updated_config_dict: dict
+ '''
+ new_value = []
+ any_old_values = False
+ for _to_combine in list_to_combine:
+ try:
+ old_value = lookup_nested_value(config_dict, _to_combine)
+ except KeyError:
+ old_value = None
+ if old_value is not None:
+ any_old_values = True
+ if isinstance(old_value, (list, set, tuple)):
+ for value in old_value:
+ new_value.append(value)
+ else:
+ new_value.append(old_value)
+ config_dict = delete_nested_value(config_dict, _to_combine)
+ if any_old_values:
+ return set_nested_value(config_dict, new_key, new_value)
+ return config_dict
+
+
def concat_list(in_list1=None, in_list2=None):
"""
Parameters
@@ -1708,6 +1855,31 @@ def lookup_nested_value(d, keys):
raise
+def _now_runswitch(config_dict, key_sequence):
+ '''
+ Helper function to convert a formerly forkable value to a
+ runswitch.
+
+ Parameters
+ ----------
+ config_dict: dict
+
+ key_sequence: list or tuple
+
+ Returns
+ -------
+ updated_config_dict: dict
+ '''
+ try:
+ old_forkable = lookup_nested_value(config_dict, key_sequence)
+ except KeyError:
+ return config_dict
+ if isinstance(old_forkable, (bool, list)):
+ return set_nested_value(
+ config_dict, key_sequence, {'run': old_forkable})
+ return config_dict
+
+
def _remove_somethings(value, things_to_remove):
'''Helper function to remove instances of any in a given set of
values from a list.
diff --git a/CPAC/utils/yaml_template.py b/CPAC/utils/yaml_template.py
index 1800c03dfd..d3ac3ef6ab 100644
--- a/CPAC/utils/yaml_template.py
+++ b/CPAC/utils/yaml_template.py
@@ -21,7 +21,7 @@
import yaml
from click import BadParameter
from CPAC.utils.configuration import Configuration, DEFAULT_PIPELINE_FILE
-from CPAC.utils.configuration.diff import dct_diff, diff_dict
+from CPAC.utils.configuration.diff import dct_diff
from CPAC.utils.utils import load_preconfig, \
lookup_nested_value, update_config_dict, \
update_pipeline_values_1_8
@@ -346,6 +346,15 @@ def hash_data_config(sub_list):
Returns
-------
data_config_hash : str, len(8)
+
+ Examples
+ --------
+ >>> sub_list = [{'site_id': f'site{i}', 'subject_id': f'sub{i}',
+ ... 'unique_id': f'uid{i}'} for i in range(1, 4)]
+ >>> sub_list[0]
+ {'site_id': 'site1', 'subject_id': 'sub1', 'unique_id': 'uid1'}
+ >>> hash_data_config(sub_list)
+ '6f49a278'
'''
return sha1('_'.join([','.join([run.get(key, '') for run in sub_list]) for
key in ['site_id', 'subject_id',
diff --git a/README.md b/README.md
index e753f1625b..137bc57972 100644
--- a/README.md
+++ b/README.md
@@ -40,7 +40,7 @@ Documentation pertaining to this latest release can be found here: https://fcp-i
Discussion Forum
---------------
-If you are stuck and need help or have any other questions or comments about C-PAC, there is a C-PAC discussion forum here: ttps://neurostars.org/tag/cpac
+If you are stuck and need help or have any other questions or comments about C-PAC, there is a C-PAC discussion forum here: https://neurostars.org/tag/cpac
Issue Tracker and Bugs
----------------------
diff --git a/dev/docker_data/default_pipeline.yml b/dev/docker_data/default_pipeline.yml
new file mode 100644
index 0000000000..afe3bd37f8
--- /dev/null
+++ b/dev/docker_data/default_pipeline.yml
@@ -0,0 +1,14 @@
+%YAML 1.1
+---
+# CPAC Pipeline Configuration YAML file
+# Version 1.8.5.dev
+#
+# http://fcp-indi.github.io for more info.
+#
+# Tip: This file can be edited manually with a text editor for quick modifications.
+
+# The default C-PAC pipeline was relocated from `dev/docker_data/default_pipeline.yml` to `CPAC/resources/configs/pipeline_config_default.yml`
+# This file (`dev/docker_data/default_pipeline.yml`) is included for backwards-compatibility and will be removed in a future version.
+
+# import full default pipeline
+FROM: default
diff --git a/dev/docker_data/run.py b/dev/docker_data/run.py
index 22ebc4222e..b7379e0bbf 100755
--- a/dev/docker_data/run.py
+++ b/dev/docker_data/run.py
@@ -28,11 +28,13 @@
from CPAC import license_notice, __version__
from CPAC.pipeline import AVAILABLE_PIPELINE_CONFIGS
from CPAC.pipeline.random_state import set_up_random_state
+from CPAC.pipeline.schema import str_to_bool1_1
from CPAC.utils.bids_utils import create_cpac_data_config, \
load_cpac_data_config, \
load_yaml_config, \
sub_list_filter_by_labels
-from CPAC.utils.configuration import Configuration, DEFAULT_PIPELINE_FILE
+from CPAC.utils.configuration import Configuration, DEFAULT_PIPELINE_FILE, \
+ set_subject
from CPAC.utils.docs import DOCS_URL_PREFIX
from CPAC.utils.monitoring import failed_to_start, log_nodes_cb
from CPAC.utils.yaml_template import create_yaml_from_template, \
@@ -251,6 +253,8 @@ def run_main():
help='Disable file logging, this is useful for '
'clusters that have disabled file locking.',
default=False)
+ parser.add_argument('--fail_fast', type=str.title,
+ help='# Stop worklow execution on first crash?')
parser.add_argument('--participant_label',
help='The label of the participant that should be '
@@ -466,9 +470,9 @@ def run_main():
_url = (f'{DOCS_URL_PREFIX}/user/pipelines/'
'1.7-1.8-nesting-mappings')
- warn('\nC-PAC changed its pipeline configuration format in '
- f'v1.8.0.\nSee {_url} for details.\n',
- category=DeprecationWarning)
+ logger.warning('\nC-PAC changed its pipeline configuration '
+ 'format in v1.8.0.\nSee %s for details.\n', _url,
+ category=DeprecationWarning)
updated_config = os.path.join(
output_dir,
@@ -611,6 +615,9 @@ def run_main():
'Either change the output directory to something '
'local or turn off the --save_working_dir flag',
category=UserWarning)
+ if args.fail_fast is not None:
+ c['pipeline_setup', 'system_config',
+ 'fail_fast'] = str_to_bool1_1(args.fail_fast)
if c['pipeline_setup']['output_directory']['quality_control'][
'generate_xcpqc_files']:
@@ -714,14 +721,11 @@ def run_main():
sys.exit(1)
else:
data_hash = hash_data_config(sub_list)
- # write out the data configuration file
data_config_file = (f"cpac_data_config_{data_hash}_{st}.yml")
- if not output_dir_is_s3:
- data_config_file = os.path.join(output_dir, data_config_file)
- else:
- data_config_file = os.path.join(DEFAULT_TMP_DIR, data_config_file)
-
+ sublogdirs = [set_subject(sub, c)[2] for sub in sub_list]
+ # write out the data configuration file
+ data_config_file = os.path.join(sublogdirs[0], data_config_file)
with open(data_config_file, 'w', encoding='utf-8') as _f:
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
@@ -729,18 +733,28 @@ def run_main():
Dumper=noalias_dumper)
# update and write out pipeline config file
- if not output_dir_is_s3:
- pipeline_config_file = os.path.join(
- output_dir, f"cpac_pipeline_config_{data_hash}_{st}.yml")
- else:
- pipeline_config_file = os.path.join(
- DEFAULT_TMP_DIR, f"cpac_pipeline_config_{data_hash}_{st}.yml")
-
+ pipeline_config_file = os.path.join(
+ sublogdirs[0], f"cpac_pipeline_config_{data_hash}_{st}.yml")
with open(pipeline_config_file, 'w', encoding='utf-8') as _f:
_f.write(create_yaml_from_template(c, DEFAULT_PIPELINE_FILE, True))
- with open(f'{pipeline_config_file[:-4]}_min.yml', 'w',
+ minimized_config = f'{pipeline_config_file[:-4]}_min.yml'
+ with open(minimized_config, 'w',
encoding='utf-8') as _f:
- _f.write(create_yaml_from_template(c, DEFAULT_PIPELINE_FILE, False))
+ _f.write(create_yaml_from_template(c, DEFAULT_PIPELINE_FILE,
+ False))
+ for config_file in (data_config_file, pipeline_config_file,
+ minimized_config):
+ os.chmod(config_file, 0o444) # Make config files readonly
+
+ if len(sublogdirs) > 1:
+ # If more than one run is included in the given data config
+ # file, an identical copy of the data and pipeline config
+ # will be included in the log directory for each run
+ for sublogdir in sublogdirs[1:]:
+ for config_file in (data_config_file, pipeline_config_file,
+ minimized_config):
+ os.link(config_file, config_file.replace(
+ sublogdirs[0], sublogdir))
if args.analysis_level in ["participant", "test_config"]:
# build pipeline easy way
diff --git a/requirements.txt b/requirements.txt
index c37cfe918f..b4cc92d941 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,6 +12,7 @@ nipype==1.5.1
nose==1.3.7
numpy==1.21.0
pandas==0.23.4
+pathvalidate==2.5.2
patsy==0.5.0
prov==1.5.2
psutil==5.6.6