Skip to content
This repository was archived by the owner on Dec 27, 2022. It is now read-only.

Non topup #78

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 29 additions & 5 deletions dmriprep/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from . import io
from . import run
from .data import get_dataset
from .utils import is_bval_bvec_hemispherical

# Filter warnings that are visible whenever you import another package that
# was compiled against an older numpy than is installed.
Expand Down Expand Up @@ -69,11 +70,34 @@ def main(participant_label, bids_dir, output_dir,
inputs = io.get_bids_files(participant_label, bids_dir)

for subject_inputs in inputs:
run.run_dmriprep_pe(**subject_inputs,
working_dir=os.path.join(output_dir, 'scratch'),
out_dir=output_dir,
eddy_niter=eddy_niter,
slice_outlier_threshold=slice_outlier_threshold)
# decide which workflow to run depending on inputs
if (subject_inputs['dwi_file_AP'] and subject_inputs['dwi_file_PA']):
# run the pe dmriprep workflow
run.run_dmriprep_pe(**subject_inputs,
working_dir=os.path.join(output_dir, 'scratch'),
out_dir=output_dir,
eddy_niter=eddy_niter,
slice_outlier_threshold=slice_outlier_threshold)
else:
print('''
WARNING: no AP/PA files found, so we won't use TOPUP. If this isn't right, check that
your files follow the BIDS format.
''')
# check hemispheres, to see whether we run eddy or not.
# if we don't run eddy, we use run.run_dmriprep()
is_hemi = is_bval_bvec_hemispherical(subject_inputs['bval_file'], subject_inputs['bvec_file'])
if is_hemi:
# run the dmriprep without topup workflow
run.run_dmriprep_no_pe_with_eddy(**subject_inputs,
working_dir=os.path.join(output_dir, 'scratch'),
out_dir=output_dir,
eddy_niter=eddy_niter,
slice_outlier_threshold=slice_outlier_threshold)
else:
run.run_dmriprep(**subject_inputs,
working_dir=os.path.join(output_dir, 'scratch'),
out_dir=output_dir,)


return 0

Expand Down
8 changes: 4 additions & 4 deletions dmriprep/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ def get_bids_subject_input_files(subject_id, bids_input_directory):
suffix='epi',
dir='AP',
extensions=['.nii', '.nii.gz'])
assert len(ap_file) == 1, 'found {} ap fieldmap files and we need just 1'.format(len(ap_file))
# assert len(ap_file) == 1, 'found {} ap fieldmap files and we need just 1'.format(len(ap_file))

pa_file = layout.get(subject=subject_id,
datatype='fmap',
suffix='epi',
dir='PA',
extensions=['.nii', '.nii.gz'])
assert len(pa_file) == 1, 'found {} pa fieldmap files and we need just 1'.format(len(pa_file))
# assert len(pa_file) == 1, 'found {} pa fieldmap files and we need just 1'.format(len(pa_file))

dwi_files = layout.get(subject=subject_id, datatype='dwi', suffix='dwi')
valid_dwi_files = []
Expand All @@ -60,8 +60,8 @@ def get_bids_subject_input_files(subject_id, bids_input_directory):

outputs = dict(subject_id="sub-"+subject_id,
dwi_file=dwi_file[0],
dwi_file_AP=ap_file[0].path,
dwi_file_PA=pa_file[0].path,
dwi_file_AP=ap_file[0].path if len(ap_file) and len(pa_file) else None,
dwi_file_PA=pa_file[0].path if len(ap_file) and len(pa_file) else None,
bvec_file=bvec_file[0],
bval_file=bval_file[0],
subjects_dir=op.abspath(subjects_dir))
Expand Down
119 changes: 103 additions & 16 deletions dmriprep/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@


def run_dmriprep(dwi_file, bvec_file, bval_file,
subjects_dir, working_dir, out_dir):
subjects_dir, working_dir, out_dir, **kwargs):

"""
Runs dmriprep for acquisitions with just one PE direction.
Expand Down Expand Up @@ -284,7 +284,7 @@ def run_dmriprep_pe(subject_id, dwi_file, dwi_file_AP, dwi_file_PA,
--------
dmriprep.run.get_dmriprep_pe_workflow
"""
wf = get_dmriprep_pe_workflow()
wf = get_dmriprep_base_workflow()
wf.base_dir = op.join(op.abspath(working_dir), subject_id)

inputspec = wf.get_node('inputspec')
Expand All @@ -306,7 +306,76 @@ def run_dmriprep_pe(subject_id, dwi_file, dwi_file_AP, dwi_file_PA,
wf.run()


def get_dmriprep_pe_workflow():
def run_dmriprep_no_pe_with_eddy(subject_id, dwi_file,
bvec_file, bval_file,
subjects_dir, working_dir, out_dir,
eddy_niter=5, slice_outlier_threshold=0.02, **kwargs):
"""Run the dmriprep (phase encoded) nipype workflow

Parameters
----------
subject_id : str
Subject identifier

dwi_file : str
Path to dwi nifti file

bvec_file : str
Path to bvec file

bval_file : str
Path to bval file

subjects_dir : str
Path to subject's freesurfer directory

working_dir : str
Path to workflow working directory

out_dir : str
Path to output directory

eddy_niter : int, default=5
Fixed number of eddy iterations. See
https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide#A--niter

slice_outlier_threshold: int or float
Number of allowed outlier slices per volume. If this is exceeded the
volume is dropped from analysis. If `slice_outlier_threshold` is an
int, it is treated as number of allowed outlier slices. If
`slice_outlier_threshold` is a float between 0 and 1 (exclusive), it is
treated the fraction of allowed outlier slices.

Notes
-----
This assumes that there are scans with phase-encode directions AP/PA for
topup.

See Also
--------
dmriprep.run.get_dmriprep_pe_workflow
"""
wf = get_dmriprep_base_workflow(0)
wf.base_dir = op.join(op.abspath(working_dir), subject_id)

inputspec = wf.get_node('inputspec')
inputspec.inputs.subject_id = subject_id
inputspec.inputs.dwi_file = dwi_file
inputspec.inputs.bvec_file = bvec_file
inputspec.inputs.bval_file = bval_file
inputspec.inputs.subjects_dir = subjects_dir
inputspec.inputs.out_dir = op.abspath(out_dir)
inputspec.inputs.eddy_niter = eddy_niter
inputspec.inputs.slice_outlier_threshold = slice_outlier_threshold

# write the graph (this is saved to the working dir)
wf.write_graph()
wf.config['execution']['remove_unnecessary_outputs'] = False
wf.config['execution']['keep_inputs'] = True
wf.run()


def get_dmriprep_base_workflow(use_reverse_phase_encode=True):
"""Return the dmriprep (phase encoded) nipype workflow

Parameters
Expand Down Expand Up @@ -349,6 +418,12 @@ def get_dmriprep_pe_workflow():
epi_pa = {'echospacing': 66.5e-3, 'enc_dir': 'y'}
prep = all_fsl_pipeline(epi_params=epi_ap, altepi_params=epi_pa)

if not use_reverse_phase_encode:
peb_node = prep.get_node('peb_correction')
prep.remove_nodes([peb_node])



# initialize an overall workflow
wf = pe.Workflow(name="dmriprep")

Expand All @@ -361,6 +436,13 @@ def get_dmriprep_pe_workflow():
eddy.inputs.repol = True
eddy.inputs.cnr_maps = True
eddy.inputs.residuals = True

if not use_reverse_phase_encode:
acqp_single_file = '/tmp/acqp.txt'
with open(acqp_single_file, 'w') as f:
f.write('0 1 0 .0665')
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is shady

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the BIDSLayout object is passed, we can read the PhaseEncodingDirection and TotalReadoutTime with layout.get_metadata()

eddy.inputs.in_acqp = acqp_single_file

import multiprocessing
eddy.inputs.num_threads = multiprocessing.cpu_count()
import numba.cuda
Expand All @@ -370,11 +452,12 @@ def get_dmriprep_pe_workflow():
except:
eddy.inputs.use_cuda = False

topup = prep.get_node('peb_correction.topup')
topup.inputs.checksize = True
if use_reverse_phase_encode:
topup = prep.get_node('peb_correction.topup')
topup.inputs.checksize = True

applytopup = prep.get_node('peb_correction.unwarp')
applytopup.inputs.checksize = True
applytopup = prep.get_node('peb_correction.unwarp')
applytopup.inputs.checksize = True

eddy.inputs.checksize = True

Expand All @@ -384,10 +467,12 @@ def get_dmriprep_pe_workflow():
wf.connect(prep, ('fsl_eddy.out_corrected', get_path), eddy_quad, 'base_name')
wf.connect(inputspec, 'bval_file', eddy_quad, 'bval_file')
wf.connect(prep, 'Rotate_Bvec.out_file', eddy_quad, 'bvec_file')
wf.connect(prep, 'peb_correction.topup.out_field', eddy_quad, 'field')
wf.connect(prep, 'gen_index.out_file', eddy_quad, 'idx_file')
wf.connect(prep, 'peb_correction.topup.out_enc_file', eddy_quad, 'param_file')
wf.connect(prep, ('fsl_eddy.out_corrected', get_qc_path), eddy_quad, 'output_dir')
wf.connect(prep, 'gen_index.out_file', eddy_quad, 'idx_file')

if use_reverse_phase_encode:
wf.connect(prep, 'peb_correction.topup.out_enc_file', eddy_quad, 'param_file')
wf.connect(prep, 'peb_correction.topup.out_field', eddy_quad, 'field')

# need a mask file for eddy_quad. lets get it from the B0.
def get_b0_mask_fn(b0_file):
Expand Down Expand Up @@ -478,13 +563,15 @@ def num_outliers(scan, outliers):
wf.connect(prep, "fsl_eddy.out_outlier_report",
id_outliers_node, "outlier_report")

list_merge = pe.Node(niu.Merge(numinputs=2), name="list_merge")
wf.connect(inputspec, 'dwi_file_ap', list_merge, 'in1')
wf.connect(inputspec, 'dwi_file_pa', list_merge, 'in2')

merge = pe.Node(fsl.Merge(dimension='t'), name="mergeAPPA")
wf.connect(merge, 'merged_file', prep, 'inputnode.alt_file')
wf.connect(list_merge, 'out', merge, 'in_files')
if use_reverse_phase_encode:
list_merge = pe.Node(niu.Merge(numinputs=2), name="list_merge")
wf.connect(inputspec, 'dwi_file_ap', list_merge, 'in1')
wf.connect(inputspec, 'dwi_file_pa', list_merge, 'in2')

merge = pe.Node(fsl.Merge(dimension='t'), name="mergeAPPA")
wf.connect(merge, 'merged_file', prep, 'inputnode.alt_file')
wf.connect(list_merge, 'out', merge, 'in_files')

fslroi = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name="fslroi")
wf.connect(prep, "outputnode.out_file", fslroi, "in_file")
Expand Down
6 changes: 6 additions & 0 deletions dmriprep/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import itertools

import numpy as np
import logging
from dipy.io import read_bvals_bvecs


mod_logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -64,3 +66,7 @@ def is_hemispherical(vecs):
else:
pole = np.array([0.0, 0.0, 0.0])
return is_hemi, pole

def is_bval_bvec_hemispherical(bval_file, bvec_file):
_, bvecs = read_bvals_bvecs(bval_file, bvec_file)
return is_hemispherical(bvecs[1:])