diff --git a/.dockerignore b/.dockerignore
index b5bfff59..a51f661d 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -8,8 +8,8 @@ build/**/*
build
dist/**/*
dist
-fmriprep.egg-info/**/*
-fmriprep.egg-info
+dmriprep.egg-info/**/*
+dmriprep.egg-info
.eggs/**/*
.eggs
@@ -39,4 +39,4 @@ out/
.zenodo.json
.travis.yml
.readthedocs.yml
-CONTRIBUTING.md
\ No newline at end of file
+CONTRIBUTING.rst
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..cd5a1171
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+dmriprep/_version.py export-subst
diff --git a/.github/config.yml b/.github/config.yml
index c65ed7d8..70c14c71 100644
--- a/.github/config.yml
+++ b/.github/config.yml
@@ -5,8 +5,8 @@ newPRWelcomeComment: >
Thanks for opening this pull request!
We have detected this is the first time for you to contribute
to *dMRIPrep*.
- Please check out our [contributing guidelines](https://github.com/nipreps/dmriprep/blob/master/CONTRIBUTING.md).
-
+ Please check out our [contributing guidelines](https://github.com/nipreps/dmriprep/blob/master/CONTRIBUTING.rst).
+
We invite you to list yourself as a *dMRIPrep* contributor, so if your name
is not already mentioned, please modify the
[``.zenodo.json``](https://github.com/nipreps/dmriprep/blob/master/.zenodo.json)
@@ -25,4 +25,4 @@ newPRWelcomeComment: >
Of course, if you want to opt-out this time there is no
problem at all with adding your name later.
You will be always welcome to add it in the future whenever
- you feel it should be listed.
\ No newline at end of file
+ you feel it should be listed.
diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml
new file mode 100644
index 00000000..075f95b6
--- /dev/null
+++ b/.github/release-drafter.yml
@@ -0,0 +1,5 @@
+template: |
+ ## Release Notes
+
+ ## CHANGES
+ $CHANGES
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 00000000..28e0d5c1
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,20 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 900
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 200
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - pinned
+ - security
+ - feature
+ - help wanted
+ - low priority
+# Label to use when marking an issue as stale
+staleLabel: stale
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: false
diff --git a/.gitignore b/.gitignore
index a8237ba3..3cae9a46 100644
--- a/.gitignore
+++ b/.gitignore
@@ -103,6 +103,3 @@ ENV/
# Mac OS nonsense:
.DS_Store
-
-#kubernetes stuff
-kubernetes/jobs/
diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644
index 00000000..14d721c1
--- /dev/null
+++ b/.readthedocs.yml
@@ -0,0 +1,20 @@
+# .readthedocs.yml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Build documentation in the docs/ directory with Sphinx
+sphinx:
+ configuration: docs/conf.py
+
+# Optionally build your docs in additional formats such as PDF and ePub
+formats: all
+
+python:
+ install:
+ - method: pip
+ path: .
+ extra_requirements:
+ - doc
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000..293b1af1
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,35 @@
+# Config file for automatic testing at travis-ci.org
+
+language: python
+matrix:
+ include:
+ - python: 3.5
+ dist: trusty
+ sudo: false
+ - python: 3.6
+ dist: trusty
+ sudo: false
+ - python: 3.7
+ dist: xenial
+ sudo: true
+
+# Command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
+install: pip install -U tox-travis
+
+# Command to run tests, e.g. python setup.py test
+script: tox
+
+# Assuming you have installed the travis-ci CLI tool, after you
+# create the Github repo and add it to Travis, run the
+# following command to finish PyPI deployment setup:
+# $ travis encrypt --add deploy.password
+deploy:
+ provider: pypi
+ distributions: sdist bdist_wheel
+ user: nipy
+ password:
+ secure: PLEASE_REPLACE_ME
+ on:
+ tags: true
+ repo: tigrlab/dmriprep
+ python: 3.7
diff --git a/.zenodo.json b/.zenodo.json
index f823aa44..c6d5fc23 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -3,6 +3,11 @@
"title": "dMRIPrep: a robust preprocessing pipeline for diffusion MRI",
"description": "
dMRIPrep is a robust and easy-to-use pipeline for preprocessing of diverse dMRI data. The transparent workflow dispenses of manual intervention, thereby ensuring the reproducibility of the results.
",
"creators": [
+ {
+ "affiliation": "University of Texas at Austin",
+ "name": "Pisner, Derek",
+ "orcid": "0000-0002-1228-0201"
+ },
{
"affiliation": "Stanford University",
"name": "Lerma-Usabiaga, Garikoitz",
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 00000000..a936a315
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,181 @@
+.. highlight:: shell
+
+============
+Contributing
+============
+
+We love contributions! dmriprep is open source, built on open source,
+and we'd love to have you hang out in our community.
+
+**Imposter syndrome disclaimer**: We want your help. No, really.
+
+There may be a little voice inside your head that is telling you that
+you're not ready to be an open source contributor; that your skills
+aren't nearly good enough to contribute. What could you possibly offer a
+project like this one?
+
+We assure you - the little voice in your head is wrong. If you can
+write code at all, you can contribute code to open source. Contributing
+to open source projects is a fantastic way to advance one's coding
+skills. Writing perfect code isn't the measure of a good developer (that
+would disqualify all of us!); it's trying to create something, making
+mistakes, and learning from those mistakes. That's how we all improve,
+and we are happy to help others learn.
+
+Being an open source contributor doesn't just mean writing code, either.
+You can help out by writing documentation, tests, or even giving
+feedback about the project (and yes - that includes giving feedback
+about the contribution process). Some of these contributions may be the
+most valuable to the project as a whole, because you're coming to the
+project with fresh eyes, so you can see the errors and assumptions that
+seasoned contributors have glossed over.
+
+Installing a development version of dmriprep
+--------------------------------------------
+
+First, you can install a development version of dmriprep by cloning this repository
+and then typing::
+
+ $ pip install -e .[dev]
+
+Activate the pre-commit formatting hook by typing::
+
+ $ pre-commit install
+
+Before committing your work, you can check for formatting issues or error by typing::
+
+ $ make lint
+ $ make test
+
+Types of Contributions
+----------------------
+
+You can contribute in many ways:
+
+Report Bugs
+~~~~~~~~~~~
+
+Report bugs at https://github.com/nipy/dmriprep/issues.
+
+If you are reporting a bug, please include:
+
+* Your operating system name and version.
+* Any details about your local setup that might be helpful in troubleshooting.
+* Detailed steps to reproduce the bug.
+
+Fix Bugs
+~~~~~~~~
+
+Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
+wanted" is open to whoever wants to implement it.
+
+Implement Features
+~~~~~~~~~~~~~~~~~~
+
+Look through the GitHub issues for features. Anything tagged with "enhancement"
+and "help wanted" is open to whoever wants to implement it.
+
+Write Documentation
+~~~~~~~~~~~~~~~~~~~
+
+dmriprep could always use more documentation, whether as part of the
+official dmriprep docs, in docstrings, or even on the web in blog posts,
+articles, and such.
+
+Submit Feedback
+~~~~~~~~~~~~~~~
+
+The best way to send feedback is to file an issue at https://github.com/nipy/dmriprep/issues.
+
+If you are proposing a feature:
+
+* Explain in detail how it would work.
+* Keep the scope as narrow as possible, to make it easier to implement.
+* Remember that this is a volunteer-driven project, and that contributions
+ are welcome :)
+
+Get Started!
+------------
+
+Ready to contribute? Here's how to set up `dmriprep` for local development.
+
+1. Fork the `dmriprep` repo on GitHub.
+2. Clone your fork locally::
+
+ $ git clone git@github.com:your_name_here/dmriprep.git
+
+3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
+
+ $ mkvirtualenv dmriprep
+ $ cd dmriprep/
+ $ python setup.py develop
+
+4. Create a branch for local development::
+
+ $ git checkout -b name-of-your-bugfix-or-feature
+
+ Now you can make your changes locally.
+
+5. When you're done making changes, check that your changes pass flake8 and the
+ tests, including testing other Python versions with tox::
+
+ $ flake8 dmriprep tests
+ $ python setup.py test or py.test
+ $ tox
+
+ To get flake8 and tox, just pip install them into your virtualenv.
+
+6. Commit your changes and push your branch to GitHub::
+
+ $ git add .
+ $ git commit -m "Your detailed description of your changes."
+ $ git push origin name-of-your-bugfix-or-feature
+
+7. Submit a pull request through the GitHub website.
+
+Pull Request Guidelines
+-----------------------
+
+Before you submit a pull request, check that it meets these guidelines:
+
+1. The pull request should include tests.
+2. If the pull request adds functionality, the docs should be updated. Put
+ your new functionality into a function with a docstring, and add the
+ feature to the list in README.rst.
+3. The pull request should work for Python 3.5, 3.6 and 3.7, and for PyPy. Check
+ https://travis-ci.org/tigrlab/dmriprep/pull_requests
+ and make sure that the tests pass for all supported Python versions.
+
+When opening a pull request, please use one of the following prefixes:
+
+* **[ENH]** for enhancements
+* **[FIX]** for bug fixes
+* **[TST]** for new or updated tests
+* **[DOC]** for new or updated documentation
+* **[STY]** for stylistic changes
+* **[REF]** for refactoring existing code
+
+Tips
+----
+
+To run a subset of tests::
+
+$ py.test tests.test_dmriprep
+
+
+Deploying
+---------
+
+A reminder for the maintainers on how to deploy.
+Make sure all your changes are committed (including an entry in HISTORY.rst).
+Then run::
+
+$ bumpversion patch # possible: major / minor / patch
+$ git push
+$ git push --tags
+
+Travis will then deploy to PyPI if tests pass.
+
+The imposter syndrome disclaimer was originally written by
+`Adrienne Lowe `_ for a `PyCon talk `_, and was
+adapted based on its use in the README file for the `MetPy project `_.
diff --git a/Dockerfile b/Dockerfile
index 9c707115..24137982 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,7 +2,7 @@
FROM ubuntu:xenial-20161213
# Pre-cache neurodebian key
-COPY .docker/neurodebian.gpg /usr/local/etc/neurodebian.gpg
+COPY docker/files/neurodebian.gpg /usr/local/etc/neurodebian.gpg
# Prepare environment
RUN apt-get update && \
@@ -16,6 +16,10 @@ RUN apt-get update && \
autoconf \
libtool \
pkg-config \
+ vim \
+ zip \
+ unzip \
+ wget \
git && \
curl -sL https://deb.nodesource.com/setup_10.x | bash - && \
apt-get install -y --no-install-recommends \
@@ -27,43 +31,13 @@ RUN curl -o pandoc-2.2.2.1-1-amd64.deb -sSL "https://github.com/jgm/pandoc/relea
dpkg -i pandoc-2.2.2.1-1-amd64.deb && \
rm pandoc-2.2.2.1-1-amd64.deb
-# Installing freesurfer
-RUN curl -sSL https://surfer.nmr.mgh.harvard.edu/pub/dist/freesurfer/6.0.1/freesurfer-Linux-centos6_x86_64-stable-pub-v6.0.1.tar.gz | tar zxv --no-same-owner -C /opt \
- --exclude='freesurfer/diffusion' \
- --exclude='freesurfer/docs' \
- --exclude='freesurfer/fsfast' \
- --exclude='freesurfer/lib/cuda' \
- --exclude='freesurfer/lib/qt' \
- --exclude='freesurfer/matlab' \
- --exclude='freesurfer/mni/share/man' \
- --exclude='freesurfer/subjects/fsaverage_sym' \
- --exclude='freesurfer/subjects/fsaverage3' \
- --exclude='freesurfer/subjects/fsaverage4' \
- --exclude='freesurfer/subjects/cvs_avg35' \
- --exclude='freesurfer/subjects/cvs_avg35_inMNI152' \
- --exclude='freesurfer/subjects/bert' \
- --exclude='freesurfer/subjects/lh.EC_average' \
- --exclude='freesurfer/subjects/rh.EC_average' \
- --exclude='freesurfer/subjects/sample-*.mgz' \
- --exclude='freesurfer/subjects/V1_average' \
- --exclude='freesurfer/trctrain'
-
ENV FSL_DIR="/usr/share/fsl/5.0" \
OS="Linux" \
FS_OVERRIDE=0 \
FIX_VERTEX_AREA="" \
- FSF_OUTPUT_FORMAT="nii.gz" \
- FREESURFER_HOME="/opt/freesurfer"
-ENV SUBJECTS_DIR="$FREESURFER_HOME/subjects" \
- FUNCTIONALS_DIR="$FREESURFER_HOME/sessions" \
- MNI_DIR="$FREESURFER_HOME/mni" \
- LOCAL_DIR="$FREESURFER_HOME/local" \
- MINC_BIN_DIR="$FREESURFER_HOME/mni/bin" \
- MINC_LIB_DIR="$FREESURFER_HOME/mni/lib" \
- MNI_DATAPATH="$FREESURFER_HOME/mni/data"
+ FSF_OUTPUT_FORMAT="nii.gz"
ENV PERL5LIB="$MINC_LIB_DIR/perl5/5.8.5" \
- MNI_PERL5LIB="$MINC_LIB_DIR/perl5/5.8.5" \
- PATH="$FREESURFER_HOME/bin:$FSFAST_HOME/bin:$FREESURFER_HOME/tktools:$MINC_BIN_DIR:$PATH"
+ MNI_PERL5LIB="$MINC_LIB_DIR/perl5/5.8.5"
# Installing Neurodebian packages (FSL, AFNI, git)
RUN curl -sSL "http://neuro.debian.net/lists/$( lsb_release -c | cut -f2 ).us-ca.full" >> /etc/apt/sources.list.d/neurodebian.sources.list && \
@@ -72,32 +46,63 @@ RUN curl -sSL "http://neuro.debian.net/lists/$( lsb_release -c | cut -f2 ).us-ca
RUN apt-get update && \
apt-get install -y --no-install-recommends \
- fsl-core=5.0.9-5~nd16.04+1 \
- fsl-mni152-templates=5.0.7-2 \
afni=16.2.07~dfsg.1-5~nd16.04+1 \
- convert3d \
git-annex-standalone && \
apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-ENV FSLDIR="/usr/share/fsl/5.0" \
- FSLOUTPUTTYPE="NIFTI_GZ" \
- FSLMULTIFILEQUIT="TRUE" \
- POSSUMDIR="/usr/share/fsl/5.0" \
- LD_LIBRARY_PATH="/usr/lib/fsl/5.0:$LD_LIBRARY_PATH" \
- FSLTCLSH="/usr/bin/tclsh" \
- FSLWISH="/usr/bin/wish" \
- AFNI_MODELPATH="/usr/lib/afni/models" \
- AFNI_IMSAVE_WARNINGS="NO" \
- AFNI_TTATLAS_DATASET="/usr/share/afni/atlases" \
- AFNI_PLUGINPATH="/usr/lib/afni/plugins"
-ENV PATH="/usr/lib/fsl/5.0:/usr/lib/afni/bin:$PATH"
+ENV FSLDIR="/opt/fsl-6.0.1" \
+ PATH="/opt/fsl-6.0.1/bin:$PATH" \
+ FSLOUTPUTTYPE="NIFTI_GZ"
+RUN apt-get update -qq \
+ && apt-get install -y -q --no-install-recommends \
+ bc \
+ dc \
+ file \
+ libfontconfig1 \
+ libfreetype6 \
+ libgl1-mesa-dev \
+ libglu1-mesa-dev \
+ libgomp1 \
+ libice6 \
+ libxcursor1 \
+ libxft2 \
+ libxinerama1 \
+ libxrandr2 \
+ libxrender1 \
+ libxt6 \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/* \
+ && echo "Downloading FSL ..." \
+ && wget -q http://fsl.fmrib.ox.ac.uk/fsldownloads/fslinstaller.py \
+ && chmod 775 fslinstaller.py
+RUN /fslinstaller.py -d /opt/fsl-6.0.1 -V 6.0.1 -q
+
+RUN rm -rf /opt/fsl-6.0.1/data \
+ && rm -rf /opt/fsl-6.0.1/bin/FSLeyes* \
+ && rm -rf /opt/fsl-6.0.1/src \
+ && rm -rf /opt/fsl-6.0.1/extras/src \
+ && rm -rf /opt/fsl-6.0.1/doc \
+ && rm -rf /opt/fsl-6.0.1/bin/fslview.app \
+ && rm -rf /opt/fsl-6.0.1/data/atlases \
+ && rm -rf /opt/fsl-6.0.1/data/first \
+ && rm -rf /opt/fsl-6.0.1/data/mist \
+ && rm -rf /opt/fsl-6.0.1/data/possum
# Installing ANTs 2.2.0 (NeuroDocker build)
ENV ANTSPATH=/usr/lib/ants
RUN mkdir -p $ANTSPATH && \
curl -sSL "https://dl.dropbox.com/s/2f4sui1z6lcgyek/ANTs-Linux-centos5_x86_64-v2.2.0-0740f91.tar.gz" \
| tar -xzC $ANTSPATH --strip-components 1
-ENV PATH=$ANTSPATH:$PATH
+
+ENV AFNI_INSTALLDIR=/usr/lib/afni \
+ PATH=${PATH}:/usr/lib/afni/bin \
+ AFNI_PLUGINPATH=/usr/lib/afni/plugins \
+ AFNI_MODELPATH=/usr/lib/afni/models \
+ AFNI_TTATLAS_DATASET=/usr/share/afni/atlases \
+ AFNI_IMSAVE_WARNINGS=NO \
+ FSLOUTPUTTYPE=NIFTI_GZ \
+ PATH=$ANTSPATH:$PATH \
+ ANTS_VERSION=2.2.0
# Create a shared $HOME directory
RUN useradd -m -s /bin/bash -G users dmriprep
@@ -112,14 +117,6 @@ RUN npm install -g svgo
# Installing bids-validator
RUN npm install -g bids-validator@1.2.3
-# Installing and setting up ICA_AROMA
-RUN mkdir -p /opt/ICA-AROMA && \
- curl -sSL "https://github.com/maartenmennes/ICA-AROMA/archive/v0.4.4-beta.tar.gz" \
- | tar -xzC /opt/ICA-AROMA --strip-components 1 && \
- chmod +x /opt/ICA-AROMA/ICA_AROMA.py
-
-ENV PATH=/opt/ICA-AROMA:$PATH
-
# Installing and setting up miniconda
RUN curl -sSLO https://repo.continuum.io/miniconda/Miniconda3-4.5.11-Linux-x86_64.sh && \
bash Miniconda3-4.5.11-Linux-x86_64.sh -b -p /usr/local/miniconda && \
@@ -147,6 +144,7 @@ RUN conda install -y python=3.7.1 \
graphviz=2.40.1 \
traits=4.6.0 \
zlib; sync && \
+ cython && \
chmod -R a+rX /usr/local/miniconda; sync && \
chmod +x /usr/local/miniconda/bin/*; sync && \
conda build purge-all; sync && \
@@ -161,26 +159,33 @@ ENV MKL_NUM_THREADS=1 \
RUN python -c "from matplotlib import font_manager" && \
sed -i 's/\(backend *: \).*$/\1Agg/g' $( python -c "import matplotlib; print(matplotlib.matplotlib_fname())" )
-# Precaching atlases
-RUN pip install --no-cache-dir "templateflow>=0.4.0,<0.5.0a0" && \
- python -c "from templateflow import api as tfapi; \
- tfapi.get('MNI152NLin6Asym', atlas=None, extension=['.nii', '.nii.gz']); \
- tfapi.get('MNI152NLin2009cAsym', atlas=None, extension=['.nii', '.nii.gz']); \
- tfapi.get('OASIS30ANTs', extension=['.nii', '.nii.gz']);" && \
- find $HOME/.cache/templateflow -type d -exec chmod go=u {} + && \
- find $HOME/.cache/templateflow -type f -exec chmod go=u {} +
-
-# Installing FMRIPREP
-COPY . /src/dmriprep
-ARG VERSION
-# Force static versioning within container
-RUN echo "${VERSION}" > /src/dmriprep/dmriprep/VERSION && \
- echo "include dmriprep/VERSION" >> /src/dmriprep/MANIFEST.in && \
- pip install --no-cache-dir "/src/dmriprep[all]"
+RUN pip install --upgrade pip
+
+RUN apt-get update && apt-get install -y \
+ gfortran \
+ liblapack-dev \
+ libopenblas-dev
+
+RUN pip install ipython cython parse
+
+# Installing DMRIPREP
+RUN git clone -b nipreps https://github.com/dPys/dmriprep.git dmriprep && \
+ cd dmriprep && \
+ python setup.py install
+
+RUN pip install ipython cython parse
+
+RUN pip install --no-cache-dir https://github.com/samuelstjean/nlsam/archive/master.zip
RUN find $HOME -type d -exec chmod go=u {} + && \
find $HOME -type f -exec chmod go=u {} +
+RUN mkdir /inputs && \
+ chmod -R 777 /inputs
+
+RUN mkdir /outputs && \
+ chmod -R 777 /outputs
+
ENV IS_DOCKER_8395080871=1
RUN ldconfig
diff --git a/MANIFEST.in b/MANIFEST.in
index 71126fef..06cb4024 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,3 +5,20 @@ include LICENSE
# versioneer
include versioneer.py
include dmriprep/_version.py
+recursive-include * *.nii.gz
+recursive-include * *.html
+recursive-include * *.js
+recursive-include * *.json
+recursive-include * *.txt
+recursive-include * *.csv
+recursive-include dmriprep/viz *
+recursive-include dmriprep/utils *
+recursive-include dmriprep/interfaces *
+recursive-include dmriprep/data *
+recursive-include dmriprep/config *
+recursive-include dmriprep/cli *
+recursive-include tests *
+recursive-exclude * __pycache__
+recursive-exclude * *.py[co]
+recursive-include * *.yaml
+recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..03b6b514
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,94 @@
+.PHONY: clean clean-test clean-pyc clean-build docs help
+.DEFAULT_GOAL := help
+
+define BROWSER_PYSCRIPT
+import os, webbrowser, sys
+
+try:
+ from urllib import pathname2url
+except:
+ from urllib.request import pathname2url
+
+webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1])))
+endef
+export BROWSER_PYSCRIPT
+
+define PRINT_HELP_PYSCRIPT
+import re, sys
+
+for line in sys.stdin:
+ match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line)
+ if match:
+ target, help = match.groups()
+ print("%-20s %s" % (target, help))
+endef
+export PRINT_HELP_PYSCRIPT
+
+BROWSER := python -c "$$BROWSER_PYSCRIPT"
+
+help:
+ @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST)
+
+clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts
+
+clean-build: ## remove build artifacts
+ rm -fr build/
+ rm -fr dist/
+ rm -fr .eggs/
+ find . -name '*.egg-info' -exec rm -fr {} +
+ find . -name '*.egg' -exec rm -f {} +
+
+clean-pyc: ## remove Python file artifacts
+ find . -name '*.pyc' -exec rm -f {} +
+ find . -name '*.pyo' -exec rm -f {} +
+ find . -name '*~' -exec rm -f {} +
+ find . -name '__pycache__' -exec rm -fr {} +
+
+clean-test: ## remove test and coverage artifacts
+ rm -fr .tox/
+ rm -f .coverage
+ rm -fr htmlcov/
+ rm -fr .pytest_cache
+
+lint: ## check style with flake8
+ flake8 dmriprep tests
+
+test: ## run tests quickly with the default Python
+ py.test
+
+test-all: ## run tests on every Python version with tox
+ tox
+
+coverage: ## check code coverage quickly with the default Python
+ coverage run --source dmriprep -m pytest
+ coverage report -m
+ coverage html
+ $(BROWSER) htmlcov/index.html
+
+docs: ## generate Sphinx HTML documentation, including API docs
+ rm -f docs/dmriprep*.rst
+ rm -f docs/modules.rst
+ sphinx-apidoc -o docs/ dmriprep
+ $(MAKE) -C docs clean
+ $(MAKE) -C docs html
+ $(BROWSER) docs/_build/html/index.html
+
+servedocs: docs ## compile the docs watching for changes
+ watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D .
+
+release: dist ## package and upload a release
+ twine upload dist/*
+
+dist: clean ## builds source and wheel package
+ python setup.py sdist
+ python setup.py bdist_wheel
+ ls -l dist
+
+install: clean ## install the package to the active Python's site-packages
+ python setup.py install
+
+docker: docker
+ docker build --rm -t nipreps/dmriprep:latest \
+ --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` \
+ --build-arg VCS_REF=`git rev-parse --short HEAD` \
+ --build-arg VERSION=$( python get_version.py ) .
diff --git a/README.rst b/README.rst
index f4add394..9f6310b4 100644
--- a/README.rst
+++ b/README.rst
@@ -4,7 +4,7 @@ dmriprep
.. image:: https://badgen.net/badge/chat/on%20mattermost/blue
:target: https://mattermost.brainhack.org/brainhack/channels/dmriprep
-
+
.. image:: https://img.shields.io/pypi/v/dmriprep.svg
:target: https://pypi.python.org/pypi/dmriprep
@@ -49,4 +49,4 @@ segmentation, skullstripping etc.) providing outputs that can be
easily submitted to a variety of tractography algorithms.
[Documentation `dmriprep.org `_]
-[Support `neurostars.org `_]
+[Support `neurostars.org `_]
diff --git a/dmriprep/.idea/dmriprep.iml b/dmriprep/.idea/dmriprep.iml
new file mode 100644
index 00000000..86abb4ca
--- /dev/null
+++ b/dmriprep/.idea/dmriprep.iml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dmriprep/.idea/libraries/R_User_Library.xml b/dmriprep/.idea/libraries/R_User_Library.xml
new file mode 100644
index 00000000..71f5ff74
--- /dev/null
+++ b/dmriprep/.idea/libraries/R_User_Library.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dmriprep/.idea/misc.xml b/dmriprep/.idea/misc.xml
new file mode 100644
index 00000000..1d8a5d64
--- /dev/null
+++ b/dmriprep/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dmriprep/.idea/modules.xml b/dmriprep/.idea/modules.xml
new file mode 100644
index 00000000..3913f593
--- /dev/null
+++ b/dmriprep/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dmriprep/.idea/vcs.xml b/dmriprep/.idea/vcs.xml
new file mode 100644
index 00000000..6c0b8635
--- /dev/null
+++ b/dmriprep/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dmriprep/.idea/workspace.xml b/dmriprep/.idea/workspace.xml
new file mode 100644
index 00000000..f2bb0f0f
--- /dev/null
+++ b/dmriprep/.idea/workspace.xml
@@ -0,0 +1,274 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ susceptibility_args
+ /work
+ work
+ slm
+ move
+ reverse
+ 'none'
+ work_dir
+ rescale
+ split
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1569954465602
+
+
+ 1569954465602
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dmriprep/__about__.py b/dmriprep/__about__.py
index 8f69a634..0b94bc0a 100644
--- a/dmriprep/__about__.py
+++ b/dmriprep/__about__.py
@@ -1,9 +1,10 @@
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Base module variables."""
-from ._version import get_versions
-__version__ = get_versions()['version']
-del get_versions
+#from ._version import get_versions
+#__version__ = get_versions()['version']
+#del get_versions
+__version__ = '0.2.1'
__packagename__ = 'dmriprep'
__copyright__ = 'Copyright 2019, The dMRIPrep developers'
diff --git a/dmriprep/__init__.py b/dmriprep/__init__.py
index 72e61e5a..35bac792 100644
--- a/dmriprep/__init__.py
+++ b/dmriprep/__init__.py
@@ -15,7 +15,7 @@
'__packagename__',
]
-# cmp is not used by fmriprep, so ignore nipype-generated warnings
+# cmp is not used by dmriprep, so ignore nipype-generated warnings
_warnings.filterwarnings('ignore', r'cmp not installed')
_warnings.filterwarnings('ignore', r'This has not been fully tested. Please report any failures.')
_warnings.filterwarnings('ignore', r"can't resolve package from __spec__ or __package__")
diff --git a/dmriprep/cli/run.py b/dmriprep/cli/run.py
index 05f74b0c..4837d96a 100755
--- a/dmriprep/cli/run.py
+++ b/dmriprep/cli/run.py
@@ -1,44 +1,22 @@
#!/usr/bin/env python
"""dMRI preprocessing workflow."""
-import os
from pathlib import Path
-import logging
import sys
-import gc
import uuid
import warnings
+warnings.filterwarnings("ignore")
from argparse import ArgumentParser
from argparse import ArgumentDefaultsHelpFormatter
from multiprocessing import cpu_count
from time import strftime
-logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING
-logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG
-logger = logging.getLogger('cli')
-
-
-def _warn_redirect(message, category, filename, lineno, file=None, line=None):
- logger.warning('Captured warning (%s): %s', category, message)
-
-
-def check_deps(workflow):
- from nipype.utils.filemanip import which
- return sorted(
- (node.interface.__class__.__name__, node.interface._cmd)
- for node in workflow._get_all_nodes()
- if (hasattr(node.interface, '_cmd') and
- which(node.interface._cmd.split()[0]) is None))
-
def get_parser():
"""Build parser object"""
- from smriprep.cli.utils import ParseTemplates, output_space as _output_space
- from templateflow.api import templates
from packaging.version import Version
- from ..__about__ import __version__
- from ..config import NONSTANDARD_REFERENCES
- from .version import check_latest, is_flagged
+ from dmriprep.__about__ import __version__
+ from dmriprep.cli.version import check_latest, is_flagged
verstr = 'dmriprep v{}'.format(__version__)
currentv = Version(__version__)
@@ -51,379 +29,86 @@ def get_parser():
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store', type=Path,
- help='the root folder of a BIDS valid dataset (sub-XXXXX folders should '
+ help='The root folder of a BIDS valid dataset (sub-XXXXX folders should '
'be found at the top level in this folder).')
parser.add_argument('output_dir', action='store', type=Path,
- help='the output path for the outcomes of preprocessing and visual '
+ help='The output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant'],
- help='processing stage to be run, only "participant" in the case of '
+ help='Processing stage to be run, only "participant" in the case of '
'dMRIPrep (see BIDS-Apps specification).')
# optional arguments
parser.add_argument('--version', action='version', version=verstr)
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
- g_bids.add_argument('--skip-bids-validation', action='store_true', default=False,
- help='assume the input dataset is BIDS compliant and skip the validation')
- g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='+',
- help='a space delimited list of participant identifiers or a single '
+ g_bids.add_argument('--skip_bids_validation', action='store_true', default=False,
+ help='Assume the input dataset is BIDS compliant and skip the validation')
+ g_bids.add_argument('--participant_label', action='store', nargs='+',
+ help='A space delimited list of participant identifiers or a single '
'identifier (the sub- prefix can be removed)')
- # Re-enable when option is actually implemented
- # g_bids.add_argument('-s', '--session-id', action='store', default='single_session',
- # help='select a specific session to be processed')
- # Re-enable when option is actually implemented
- # g_bids.add_argument('-r', '--run-id', action='store', default='single_run',
- # help='select a specific run to be processed')
-
+ g_bids.add_argument('-s', '--session_id', action='store', nargs='+',
+ help='A space delimited list of session identifiers or a single '
+ 'identifier (the ses- prefix can be removed)')
g_perfm = parser.add_argument_group('Options to handle performance')
- g_perfm.add_argument('--nprocs', '--n_cpus', '-n-cpus', action='store', type=int,
- help='maximum number of threads across all processes')
- g_perfm.add_argument('--omp-nthreads', action='store', type=int, default=0,
- help='maximum number of threads per-process')
- g_perfm.add_argument('--mem_mb', '--mem-mb', action='store', default=0, type=int,
- help='upper bound memory limit for dMRIPrep processes')
- g_perfm.add_argument('--low-mem', action='store_true',
- help='attempt to reduce memory usage (will increase disk usage '
- 'in working directory)')
- g_perfm.add_argument('--use-plugin', action='store', default=None,
- help='nipype plugin configuration file')
- g_perfm.add_argument('--anat-only', action='store_true',
- help='run anatomical workflows only')
- g_perfm.add_argument('--boilerplate', action='store_true',
- help='generate boilerplate only')
- g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
- help="increases log verbosity for each occurence, debug level is -vvv")
+ g_perfm.add_argument('--plugin', action='store', type=str, default='MultiProc',
+ help='Plugin type. Options include MultiProc or Linear')
+ g_perfm.add_argument('--nprocs', '--n_cpus', action='store', type=int, default=8,
+ help='Maximum number of threads across all processes. Minimum required is 8.')
+ g_perfm.add_argument('--omp_nthreads', action='store', type=int, default=2,
+ help='Maximum number of threads per-process')
+ g_perfm.add_argument('--mem_gb', action='store', default=16, type=int,
+ help='Upper bound memory limit for dMRIPrep processes. Minimum required is 16 GB.')
+ g_perfm.add_argument("-v", "--verbose", action="store_true", default=False,
+ help="Perform debug and logging.")
g_conf = parser.add_argument_group('Workflow configuration')
- g_conf.add_argument(
- '--ignore', required=False, action='store', nargs="+", default=[], choices=['sdc'],
- help='ignore selected aspects of the input dataset to disable corresponding '
- 'parts of the workflow (a space delimited list)')
- g_conf.add_argument(
- '--longitudinal', action='store_true',
- help='treat dataset as longitudinal - may increase runtime')
- g_conf.add_argument(
- '--output-spaces', nargs='+', action=ParseTemplates,
- help="""\
-Standard and non-standard spaces to resample anatomical and functional images to. \
-Standard spaces may be specified by the form \
-``[:res-][:cohort-][...]``, where ```` is \
-a keyword (valid keywords: %s) or path pointing to a user-supplied template, and \
-may be followed by optional, colon-separated parameters. \
-Non-standard spaces (valid keywords: %s) imply specific orientations and sampling \
-grids. \
-Important to note, the ``res-*`` modifier does not define the resolution used for \
-the spatial normalization.
-For further details, please check out \
-https://dmriprep.readthedocs.io/en/%s/spaces.html""" % (
- ', '.join('"%s"' % s for s in templates()), ', '.join(NONSTANDARD_REFERENCES),
- currentv.base_version if is_release else 'latest'))
-
- # ANTs options
- g_ants = parser.add_argument_group('Specific options for ANTs registrations')
- g_ants.add_argument(
- '--skull-strip-template', action='store', default='OASIS30ANTs', type=_output_space,
- help='select a template for skull-stripping with antsBrainExtraction')
- g_ants.add_argument('--skull-strip-fixed-seed', action='store_true',
- help='do not use a random seed for skull-stripping - will ensure '
- 'run-to-run replicability when used with --omp-nthreads 1')
-
- # Fieldmap options
- g_fmap = parser.add_argument_group('Specific options for handling fieldmaps')
- g_fmap.add_argument('--fmap-bspline', action='store_true', default=False,
- help='fit a B-Spline field using least-squares (experimental)')
- g_fmap.add_argument('--fmap-no-demean', action='store_false', default=True,
- help='do not remove median (within mask) from fieldmap')
-
- # SyN-unwarp options
- g_syn = parser.add_argument_group('Specific options for SyN distortion correction')
- g_syn.add_argument('--use-syn-sdc', action='store_true', default=False,
- help='EXPERIMENTAL: Use fieldmap-free distortion correction')
- g_syn.add_argument('--force-syn', action='store_true', default=False,
- help='EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to '
- 'fieldmap correction, if available')
-
- # FreeSurfer options
- g_fs = parser.add_argument_group('Specific options for FreeSurfer preprocessing')
- g_fs.add_argument(
- '--fs-license-file', metavar='PATH', type=Path,
- help='Path to FreeSurfer license key file. Get it (for free) by registering'
- ' at https://surfer.nmr.mgh.harvard.edu/registration.html')
-
- # Surface generation xor
- g_surfs = parser.add_argument_group('Surface preprocessing options')
- g_surfs_xor = g_surfs.add_mutually_exclusive_group()
- g_surfs_xor.add_argument('--no-submm-recon', action='store_false', dest='hires',
- help='disable sub-millimeter (hires) reconstruction')
- g_surfs_xor.add_argument('--fs-no-reconall', '--no-freesurfer',
- action='store_false', dest='run_reconall',
- help='disable FreeSurfer surface preprocessing.'
- ' Note : `--no-freesurfer` is deprecated and will be removed in 1.2.'
- ' Use `--fs-no-reconall` instead.')
+ g_conf.add_argument('--sdc_method', action='store', default='topup',
+ help='Susceptibility distortion correction type')
+ g_conf.add_argument('--denoise_strategy', action='store', default='mppca',
+ help='Denoising strategy. Choices include: mppca, nlmeans, localpca, and nlsam')
+ g_conf.add_argument('--outlier_threshold', action='store', default=0.02,
+ help='Percent of bad slices required to reject volume.')
+ g_conf.add_argument('--vox_size', action='store', default='1mm',
+ help='Voxel resolution in mm.')
g_other = parser.add_argument_group('Other options')
- g_other.add_argument('-w', '--work-dir', action='store', type=Path, default=Path('work'),
- help='path where intermediate results should be stored')
- g_other.add_argument(
- '--resource-monitor', action='store_true', default=False,
- help='enable Nipype\'s resource monitoring to keep track of memory and CPU usage')
- g_other.add_argument(
- '--reports-only', action='store_true', default=False,
- help='only generate reports, don\'t run workflows. This will only rerun report '
- 'aggregation, not reportlet generation for specific nodes.')
- g_other.add_argument(
- '--run-uuid', action='store', default=None,
- help='Specify UUID of previous run, to include error logs in report. '
- 'No effect without --reports-only.')
- g_other.add_argument('--write-graph', action='store_true', default=False,
+ g_other.add_argument('-w', '--work_dir', action='store', type=Path, default=Path('/tmp/work'),
+ help='Path where intermediate results should be stored. Default is /tmp/work')
+ g_other.add_argument('--write_graph', action='store_true', default=False,
help='Write workflow graph.')
- g_other.add_argument('--stop-on-first-crash', action='store_true', default=False,
- help='Force stopping on first crash, even if a work directory'
- ' was specified.')
- g_other.add_argument('--notrack', action='store_true', default=False,
- help='Opt-out of sending tracking information of this run to '
- 'the dMRIPrep developers. This information helps to '
- 'improve dMRIPrep and provides an indicator of real '
- 'world usage crucial for obtaining funding.')
- g_other.add_argument('--sloppy', action='store_true', default=False, dest='debug',
- help='Use low-quality tools for speed - TESTING ONLY')
latest = check_latest()
if latest is not None and currentv < latest:
- print("""\
-You are using dMRIPrep-%s, and a newer version of dMRIPrep is available: %s.
-Please check out our documentation about how and when to upgrade:
-https://dmriprep.readthedocs.io/en/latest/faq.html#upgrading""" % (
+ print("""You are using dMRIPrep-%s, and a newer version of dMRIPrep is available: %s.
+ Please check out our documentation about how and when to upgrade:
+ https://dmriprep.readthedocs.io/en/latest/faq.html#upgrading""" % (
__version__, latest), file=sys.stderr)
_blist = is_flagged()
if _blist[0]:
_reason = _blist[1] or 'unknown'
- print("""\
-WARNING: Version %s of dMRIPrep (current) has been FLAGGED
-(reason: %s).
-That means some severe flaw was found in it and we strongly
-discourage its usage.""" % (__version__, _reason), file=sys.stderr)
+ print("""WARNING: Version %s of dMRIPrep (current) has been FLAGGED (reason: %s).
+ That means some severe flaw was found in it and we strongly
+ discourage its usage.""" % (__version__, _reason), file=sys.stderr)
return parser
-def main():
- """Entry point"""
- from nipype import logging as nlogging
- from multiprocessing import set_start_method, Process, Manager
- from ..utils.bids import write_derivative_description, validate_input_dir
- set_start_method('forkserver')
- warnings.showwarning = _warn_redirect
- opts = get_parser().parse_args()
-
- exec_env = os.name
-
- # special variable set in the container
- if os.getenv('IS_DOCKER_8395080871'):
- exec_env = 'singularity'
- cgroup = Path('/proc/1/cgroup')
- if cgroup.exists() and 'docker' in cgroup.read_text():
- exec_env = 'docker'
- if os.getenv('DOCKER_VERSION_8395080871'):
- exec_env = 'dmriprep-docker'
-
- sentry_sdk = None
- if not opts.notrack:
- import sentry_sdk
- from ..utils.sentry import sentry_setup
- sentry_setup(opts, exec_env)
-
- if opts.debug:
- print('WARNING: Option --debug is deprecated and has no effect',
- file=sys.stderr)
-
- # Validate inputs
- if not opts.skip_bids_validation:
- print("Making sure the input data is BIDS compliant (warnings can be ignored in most "
- "cases).")
- validate_input_dir(exec_env, opts.bids_dir, opts.participant_label)
-
- # FreeSurfer license
- default_license = str(Path(os.getenv('FREESURFER_HOME')) / 'license.txt')
- # Precedence: --fs-license-file, $FS_LICENSE, default_license
- license_file = opts.fs_license_file or Path(os.getenv('FS_LICENSE', default_license))
- if not license_file.exists():
- raise RuntimeError("""\
-ERROR: a valid license file is required for FreeSurfer to run. dMRIPrep looked for an existing \
-license file at several paths, in this order: 1) command line argument ``--fs-license-file``; \
-2) ``$FS_LICENSE`` environment variable; and 3) the ``$FREESURFER_HOME/license.txt`` path. Get it \
-(for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html""")
- os.environ['FS_LICENSE'] = str(license_file.resolve())
-
- # Retrieve logging level
- log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG))
- # Set logging
- logger.setLevel(log_level)
- nlogging.getLogger('nipype.workflow').setLevel(log_level)
- nlogging.getLogger('nipype.interface').setLevel(log_level)
- nlogging.getLogger('nipype.utils').setLevel(log_level)
-
- # Call build_workflow(opts, retval)
- with Manager() as mgr:
- retval = mgr.dict()
- p = Process(target=build_workflow, args=(opts, retval))
- p.start()
- p.join()
-
- retcode = p.exitcode or retval.get('return_code', 0)
-
- bids_dir = Path(retval.get('bids_dir'))
- output_dir = Path(retval.get('output_dir'))
- work_dir = Path(retval.get('work_dir'))
- plugin_settings = retval.get('plugin_settings', None)
- subject_list = retval.get('subject_list', None)
- dmriprep_wf = retval.get('workflow', None)
- run_uuid = retval.get('run_uuid', None)
-
- if opts.reports_only:
- sys.exit(int(retcode > 0))
-
- if opts.boilerplate:
- sys.exit(int(retcode > 0))
-
- if dmriprep_wf and opts.write_graph:
- dmriprep_wf.write_graph(graph2use="colored", format='svg', simple_form=True)
-
- retcode = retcode or int(dmriprep_wf is None)
- if retcode != 0:
- sys.exit(retcode)
-
- # Check workflow for missing commands
- missing = check_deps(dmriprep_wf)
- if missing:
- print("Cannot run dMRIPrep. Missing dependencies:", file=sys.stderr)
- for iface, cmd in missing:
- print("\t{} (Interface: {})".format(cmd, iface))
- sys.exit(2)
- # Clean up master process before running workflow, which may create forks
- gc.collect()
-
- # Sentry tracking
- if not opts.notrack:
- from ..utils.sentry import start_ping
- start_ping(run_uuid, len(subject_list))
-
- errno = 1 # Default is error exit unless otherwise set
- try:
- dmriprep_wf.run(**plugin_settings)
- except Exception as e:
- if not opts.notrack:
- from ..utils.sentry import process_crashfile
- crashfolders = [output_dir / 'dmriprep' / 'sub-{}'.format(s) / 'log' / run_uuid
- for s in subject_list]
- for crashfolder in crashfolders:
- for crashfile in crashfolder.glob('crash*.*'):
- process_crashfile(crashfile)
-
- if "Workflow did not execute cleanly" not in str(e):
- sentry_sdk.capture_exception(e)
- logger.critical('dMRIPrep failed: %s', e)
- raise
- else:
- if opts.run_reconall:
- from templateflow import api
- from niworkflows.utils.misc import _copy_any
- dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv']))
- _copy_any(dseg_tsv,
- str(output_dir / 'dmriprep' / 'desc-aseg_dseg.tsv'))
- _copy_any(dseg_tsv,
- str(output_dir / 'dmriprep' / 'desc-aparcaseg_dseg.tsv'))
- errno = 0
- logger.log(25, 'dMRIPrep finished without errors')
- if not opts.notrack:
- sentry_sdk.capture_message('dMRIPrep finished without errors',
- level='info')
- finally:
- from niworkflows.reports import generate_reports
- from subprocess import check_call, CalledProcessError, TimeoutExpired
- from pkg_resources import resource_filename as pkgrf
- from shutil import copyfile
-
- citation_files = {
- ext: output_dir / 'dmriprep' / 'logs' / ('CITATION.%s' % ext)
- for ext in ('bib', 'tex', 'md', 'html')
- }
-
- if citation_files['md'].exists():
- # Generate HTML file resolving citations
- cmd = ['pandoc', '-s', '--bibliography',
- pkgrf('dmriprep', 'data/boilerplate.bib'),
- '--filter', 'pandoc-citeproc',
- '--metadata', 'pagetitle="dMRIPrep citation boilerplate"',
- str(citation_files['md']),
- '-o', str(citation_files['html'])]
-
- logger.info('Generating an HTML version of the citation boilerplate...')
- try:
- check_call(cmd, timeout=10)
- except (FileNotFoundError, CalledProcessError, TimeoutExpired):
- logger.warning('Could not generate CITATION.html file:\n%s',
- ' '.join(cmd))
-
- # Generate LaTex file resolving citations
- cmd = ['pandoc', '-s', '--bibliography',
- pkgrf('dmriprep', 'data/boilerplate.bib'),
- '--natbib', str(citation_files['md']),
- '-o', str(citation_files['tex'])]
- logger.info('Generating a LaTeX version of the citation boilerplate...')
- try:
- check_call(cmd, timeout=10)
- except (FileNotFoundError, CalledProcessError, TimeoutExpired):
- logger.warning('Could not generate CITATION.tex file:\n%s',
- ' '.join(cmd))
- else:
- copyfile(pkgrf('dmriprep', 'data/boilerplate.bib'),
- citation_files['bib'])
- else:
- logger.warning('dMRIPrep could not find the markdown version of '
- 'the citation boilerplate (%s). HTML and LaTeX versions'
- ' of it will not be available', citation_files['md'])
-
- # Generate reports phase
- failed_reports = generate_reports(
- subject_list, output_dir, work_dir, run_uuid, packagename='dmriprep')
- write_derivative_description(bids_dir, output_dir / 'dmriprep')
-
- if failed_reports and not opts.notrack:
- sentry_sdk.capture_message(
- 'Report generation failed for %d subjects' % failed_reports,
- level='error')
- sys.exit(int((errno + failed_reports) > 0))
-
-
def build_workflow(opts, retval):
- """
- Create the Nipype Workflow that supports the whole execution
- graph, given the inputs.
-
- All the checks and the construction of the workflow are done
- inside this function that has pickleable inputs and output
- dictionary (``retval``) to allow isolation using a
- ``multiprocessing.Process`` that allows dmriprep to enforce
- a hard-limited memory-scope.
-
- """
+ import numpy as np
+ import shutil
+ import os
from bids import BIDSLayout
-
- from nipype import logging as nlogging, config as ncfg
- from niworkflows.utils.bids import collect_participants
- from niworkflows.reports import generate_reports
- from ..__about__ import __version__
- from ..workflows.base import init_dmriprep_wf
-
- build_log = nlogging.getLogger('nipype.workflow')
+ from dmriprep.__about__ import __version__
+ from dmriprep.workflows.dwi.base import init_base_wf
+ from dmriprep.utils.bids import collect_sessions, get_bids_dict
INIT_MSG = """
Running dMRIPrep version {version}:
* BIDS dataset path: {bids_dir}.
- * Participant list: {subject_list}.
+ * Participant list: {subject}.
+ * Session list: {session}.
* Run identifier: {uuid}.
""".format
@@ -438,175 +123,168 @@ def build_workflow(opts, retval):
retval['work_dir'] = str(work_dir)
if output_dir == bids_dir:
- build_log.error(
+ ValueError(
'The selected output folder is the same as the input BIDS folder. '
'Please modify the output path (suggestion: %s).',
bids_dir / 'derivatives' / ('dmriprep-%s' % __version__.split('+')[0]))
retval['return_code'] = 1
return retval
- output_spaces = parse_spaces(opts)
-
# Set up some instrumental utilities
run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid.uuid4())
retval['run_uuid'] = run_uuid
# First check that bids_dir looks like a BIDS folder
- layout = BIDSLayout(str(bids_dir), validate=False)
- subject_list = collect_participants(
- layout, participant_label=opts.participant_label)
- retval['subject_list'] = subject_list
-
- # Load base plugin_settings from file if --use-plugin
- if opts.use_plugin is not None:
- from yaml import load as loadyml
- with open(opts.use_plugin) as f:
- plugin_settings = loadyml(f)
- plugin_settings.setdefault('plugin_args', {})
+ if opts.skip_bids_validation is True:
+ validate = False
else:
- # Defaults
- plugin_settings = {
- 'plugin': 'MultiProc',
- 'plugin_args': {
- 'raise_insufficient': False,
- 'maxtasksperchild': 1,
- }
- }
+ validate = True
+ layout = BIDSLayout(str(bids_dir), validate=validate)
+ bids_dict = get_bids_dict(layout, opts.participant_label, opts.session_id)
+ subjects_list = list(list(bids_dict.keys())[0])
+ session_list = list(list(bids_dict.values())[0].keys())
+ retval['subject_list'] = subjects_list
+ retval['session_list'] = session_list
# Resource management options
- # Note that we're making strong assumptions about valid plugin args
- # This may need to be revisited if people try to use batch plugins
- nprocs = plugin_settings['plugin_args'].get('n_procs')
- # Permit overriding plugin config with specific CLI options
- if nprocs is None or opts.nprocs is not None:
- nprocs = opts.nprocs
- if nprocs is None or nprocs < 1:
- nprocs = cpu_count()
- plugin_settings['plugin_args']['n_procs'] = nprocs
-
- if opts.mem_mb:
- plugin_settings['plugin_args']['memory_gb'] = opts.mem_mb / 1024
-
omp_nthreads = opts.omp_nthreads
+ mem_gb = opts.mem_gb
+ nprocs = opts.nprocs
+ if nprocs is None:
+ nprocs = cpu_count()
+
if omp_nthreads == 0:
- omp_nthreads = min(nprocs - 1 if nprocs > 1 else cpu_count(), 8)
+ omp_nthreads = int(np.round(min(float(nprocs) - 1 if float(nprocs) > 1 else cpu_count(), 8)), 0)
- if 1 < nprocs < omp_nthreads:
- build_log.warning(
- 'Per-process threads (--omp-nthreads=%d) exceed total '
- 'threads (--nprocs/--n_cpus=%d)', omp_nthreads, nprocs)
- retval['plugin_settings'] = plugin_settings
+ if 1 < float(nprocs) < float(omp_nthreads):
+ raise RuntimeWarning('Per-process threads (--omp_nthreads=%d) exceed total threads (--nprocs/--n_cpus=%d)',
+ omp_nthreads, nprocs)
# Set up directories
log_dir = output_dir / 'dmriprep' / 'logs'
+
# Check and create output and working directories
output_dir.mkdir(exist_ok=True, parents=True)
log_dir.mkdir(exist_ok=True, parents=True)
work_dir.mkdir(exist_ok=True, parents=True)
- # Nipype config (logs and execution)
- ncfg.update_config({
- 'logging': {
- 'log_directory': str(log_dir),
- 'log_to_file': True
- },
- 'execution': {
- 'crashdump_dir': str(log_dir),
- 'crashfile_format': 'txt',
- 'get_linked_libs': False,
- 'stop_on_first_crash': opts.stop_on_first_crash,
- },
- 'monitoring': {
- 'enabled': opts.resource_monitor,
- 'sample_frequency': '0.5',
- 'summary_append': True,
+ uuid_dir = str(work_dir) + '/' + str(run_uuid)
+ if os.path.exists(uuid_dir):
+ shutil.rmtree(uuid_dir)
+ os.mkdir(uuid_dir)
+
+ # Single-subject pipeline
+ wf = init_base_wf(
+ bids_dict=bids_dict,
+ output_dir=str(output_dir),
+ sdc_method=opts.sdc_method,
+ denoise_strategy=opts.denoise_strategy,
+ vox_size=opts.vox_size,
+ outlier_thresh=opts.outlier_threshold,
+ omp_nthreads=omp_nthreads,
+ work_dir=uuid_dir
+ )
+
+ wf.base_dir = uuid_dir
+
+ if opts.verbose is True:
+ from nipype import config, logging
+ cfg_v = dict(logging={'workflow_level': 'DEBUG', 'utils_level': 'DEBUG', 'interface_level': 'DEBUG',
+ 'log_directory': str(log_dir), 'log_to_file': True},
+ monitoring={'enabled': True, 'sample_frequency': '0.1', 'summary_append': True,
+ 'summary_file': str(wf.base_dir)})
+ logging.update_logging(config)
+ config.update_config(cfg_v)
+ config.enable_debug_mode()
+ config.enable_resource_monitor()
+
+ import logging
+ callback_log_path = "%s%s" % (wf.base_dir, '/run_stats.log')
+ logger = logging.getLogger('callback')
+ logger.setLevel(logging.DEBUG)
+ handler = logging.FileHandler(callback_log_path)
+ logger.addHandler(handler)
+
+ # Set runtime/logging configurations
+ plugin_type = opts.plugin
+ cfg = dict(
+ execution={
+ "stop_on_first_crash": False,
+ "hash_method": "content",
+ "crashfile_format": "txt",
+ "display_variable": ":0",
+ "job_finished_timeout": 65,
+ "matplotlib_backend": "Agg",
+ "plugin": plugin_type,
+ "use_relative_paths": True,
+ "parameterize_dirs": True,
+ "remove_unnecessary_outputs": False,
+ "remove_node_directories": False,
+ "poll_sleep_duration": 0.1,
}
- })
-
- if opts.resource_monitor:
- ncfg.enable_resource_monitor()
-
- # Called with reports only
- if opts.reports_only:
- build_log.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list))
- if opts.run_uuid is not None:
- run_uuid = opts.run_uuid
- retval['run_uuid'] = run_uuid
- retval['return_code'] = generate_reports(
- subject_list, output_dir, work_dir, run_uuid,
- packagename='dmriprep')
- return retval
-
- # Build main workflow
- build_log.log(25, INIT_MSG(
- version=__version__,
- bids_dir=bids_dir,
- subject_list=subject_list,
- uuid=run_uuid)
- )
-
- retval['workflow'] = init_dmriprep_wf(
- anat_only=opts.anat_only,
- debug=opts.debug,
- force_syn=opts.force_syn,
- freesurfer=opts.run_reconall,
- hires=opts.hires,
- ignore=opts.ignore,
- layout=layout,
- longitudinal=opts.longitudinal,
- low_mem=opts.low_mem,
- omp_nthreads=omp_nthreads,
- output_dir=str(output_dir),
- output_spaces=output_spaces,
- run_uuid=run_uuid,
- skull_strip_fixed_seed=opts.skull_strip_fixed_seed,
- skull_strip_template=opts.skull_strip_template,
- subject_list=subject_list,
- use_syn=opts.use_syn_sdc,
- work_dir=str(work_dir),
)
+ for key in cfg.keys():
+ for setting, value in cfg[key].items():
+ wf.config[key][setting] = value
+ try:
+ wf.write_graph(graph2use="colored", format='png')
+ except:
+ pass
+
+ if opts.verbose is True:
+ from nipype.utils.profiler import log_nodes_cb
+ plugin_args = {'n_procs': int(nprocs),
+ 'memory_gb': int(mem_gb),
+ 'status_callback': log_nodes_cb}
+ else:
+ plugin_args = {'n_procs': int(nprocs),
+ 'memory_gb': int(mem_gb)}
+ print("%s%s%s" % ('\nRunning with ', str(plugin_args), '\n'))
+ wf.run(plugin=str(plugin_type), plugin_args=plugin_args)
retval['return_code'] = 0
- logs_path = Path(output_dir) / 'dmriprep' / 'logs'
- boilerplate = retval['workflow'].visit_desc()
+ if opts.verbose is True:
+ from nipype.utils.draw_gantt_chart import generate_gantt_chart
+ print('Plotting resource profile from run...')
+ generate_gantt_chart("%s%s" % (wf.base_dir, '/run_stats.log'),
+ cores=int(nprocs))
+ handler.close()
+ logger.removeHandler(handler)
- if boilerplate:
- citation_files = {
- ext: logs_path / ('CITATION.%s' % ext)
- for ext in ('bib', 'tex', 'md', 'html')
- }
- # To please git-annex users and also to guarantee consistency
- # among different renderings of the same file, first remove any
- # existing one
- for citation_file in citation_files.values():
- try:
- citation_file.unlink()
- except FileNotFoundError:
- pass
-
- citation_files['md'].write_text(boilerplate)
- build_log.log(25, 'Works derived from this dMRIPrep execution should '
- 'include the following boilerplate:\n\n%s', boilerplate)
- return retval
-
-
-def parse_spaces(opts):
- """Ensure the spaces are correctly parsed."""
- from sys import stderr
- from collections import OrderedDict
- # Set the default template to 'MNI152NLin2009cAsym'
- output_spaces = opts.output_spaces or OrderedDict([('MNI152NLin2009cAsym', {})])
-
- FS_SPACES = set(['fsnative', 'fsaverage', 'fsaverage6', 'fsaverage5'])
- if opts.run_reconall and not list(FS_SPACES.intersection(output_spaces.keys())):
- print("""\
-Although ``--fs-no-reconall`` was not set (i.e., FreeSurfer is to be run), no FreeSurfer \
-output space (valid values are: %s) was selected. Adding default "fsaverage5" to the \
-list of output spaces.""" % ', '.join(FS_SPACES), file=stderr)
- output_spaces['fsaverage5'] = {}
- return output_spaces
+ return
+
+
+def main():
+ """Initializes main script from command-line call to generate single-subject or multi-subject workflow(s)"""
+ import sys
+ try:
+ import dmriprep
+ except ImportError:
+ print('dmriprep not installed! Ensure that you are referencing the correct site-packages and using Python3.5+')
+
+ if len(sys.argv) < 1:
+ print("\nMissing command-line inputs! See help options with the -h flag.\n")
+ sys.exit()
+
+ opts = get_parser().parse_args()
+
+ try:
+ from multiprocessing import set_start_method, Process, Manager
+ set_start_method('forkserver')
+ with Manager() as mgr:
+ retval = mgr.dict()
+ p = Process(target=build_workflow, args=(opts, retval))
+ p.start()
+ p.join()
+
+ if p.exitcode != 0:
+ sys.exit(p.exitcode)
+ except:
+ print('\nWARNING: Forkserver failed to initialize. Are you using Python3 ?')
+ retval = dict()
+ build_workflow(opts, retval)
if __name__ == '__main__':
- raise RuntimeError("dmriprep/cli/run.py should not be run directly;\n"
- "Please `pip install` dmriprep and use the `dmriprep` command")
+ __spec__ = "ModuleSpec(name='builtins', loader=)"
+ main()
diff --git a/dmriprep/cli/version.py b/dmriprep/cli/version.py
index 88e3a651..3338d5ae 100644
--- a/dmriprep/cli/version.py
+++ b/dmriprep/cli/version.py
@@ -63,8 +63,8 @@ def is_flagged():
# https://raw.githubusercontent.com/poldracklab/dmriprep/master/.versions.json
flagged = tuple()
try:
- response = requests.get(url="""\
-https://raw.githubusercontent.com/poldracklab/dmriprep/master/.versions.json""", timeout=1.0)
+ response = requests.get(url="""https://raw.githubusercontent.com/poldracklab/dmriprep/master/.versions.json""",
+ timeout=1.0)
except Exception:
response = None
diff --git a/dmriprep/config/__init__.py b/dmriprep/config/__init__.py
index 8f7e8bdc..e69de29b 100644
--- a/dmriprep/config/__init__.py
+++ b/dmriprep/config/__init__.py
@@ -1,5 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
-# vi: set ft=python sts=4 ts=4 sw=4 et:
-"""Settings."""
-
-NONSTANDARD_REFERENCES = ['anat', 'T1w', 'dwi', 'fsnative']
diff --git a/dmriprep/config/b02b0.cnf b/dmriprep/config/b02b0.cnf
new file mode 100644
index 00000000..0f45b6ab
--- /dev/null
+++ b/dmriprep/config/b02b0.cnf
@@ -0,0 +1,26 @@
+# Resolution (knot-spacing) of warps in mm
+--warpres=20,16,10,6
+# Subsampling level (a value of 2 indicates that a 2x2x2 neighbourhood is collapsed to 1 voxel)
+--subsamp=2,2,1,1
+# FWHM of gaussian smoothing
+--fwhm=6,4,2,0
+# Maximum number of iterations
+--miter=5,5,10,20
+# Relative weight of regularisation
+--lambda=0.0005,0.00001,0.0000005,0.0000000005
+# If set to 1 lambda is multiplied by the current average squared difference
+--ssqlambda=1
+# Regularisation model
+--regmod=bending_energy
+# If set to 1 movements are estimated along with the field
+--estmov=1,1,0,0
+# 0=Levenberg-Marquardt, 1=Scaled Conjugate Gradient
+--minmet=0,0,1,1
+# Quadratic or cubic splines
+--splineorder=3
+# Precision for calculation and storage of Hessian
+--numprec=double
+# Linear or spline interpolation
+--interp=spline
+# If set to 1 the images are individually scaled to a common mean intensity
+--scale=1
\ No newline at end of file
diff --git a/dmriprep/config/b02b0_1.cnf b/dmriprep/config/b02b0_1.cnf
new file mode 100644
index 00000000..a1dd571d
--- /dev/null
+++ b/dmriprep/config/b02b0_1.cnf
@@ -0,0 +1,26 @@
+# Resolution (knot-spacing) of warps in mm
+--warpres=20,16,10,6
+# Subsampling level (a value of 2 indicates that a 2x2x2 neighbourhood is collapsed to 1 voxel)
+--subsamp=1,1,1,1
+# FWHM of gaussian smoothing
+--fwhm=6,4,2,0
+# Maximum number of iterations
+--miter=5,5,10,20
+# Relative weight of regularisation
+--lambda=0.0005,0.00001,0.0000005,0.0000000005
+# If set to 1 lambda is multiplied by the current average squared difference
+--ssqlambda=1
+# Regularisation model
+--regmod=bending_energy
+# If set to 1 movements are estimated along with the field
+--estmov=1,1,0,0
+# 0=Levenberg-Marquardt, 1=Scaled Conjugate Gradient
+--minmet=0,0,1,1
+# Quadratic or cubic splines
+--splineorder=3
+# Precision for calculation and storage of Hessian
+--numprec=double
+# Linear or spline interpolation
+--interp=spline
+# If set to 1 the images are individually scaled to a common mean intensity
+--scale=1
\ No newline at end of file
diff --git a/dmriprep/config/eddy_params.json b/dmriprep/config/eddy_params.json
new file mode 100644
index 00000000..9b7c5961
--- /dev/null
+++ b/dmriprep/config/eddy_params.json
@@ -0,0 +1,18 @@
+{
+ "flm": "quadratic",
+ "fep": false,
+ "interp": "spline",
+ "nvoxhp": 1000,
+ "fudge_factor": 10,
+ "dont_sep_offs_move": false,
+ "dont_peas": false,
+ "niter": 5,
+ "method": "jac",
+ "repol": true,
+ "num_threads": 1,
+ "is_shelled": false,
+ "use_cuda": false,
+ "cnr_maps": true,
+ "residuals": true,
+ "output_type": "NIFTI_GZ"
+}
diff --git a/dmriprep/data/__init__.py b/dmriprep/data/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/dmriprep/data/boilerplate.bib b/dmriprep/data/boilerplate.bib
deleted file mode 100644
index 9946a723..00000000
--- a/dmriprep/data/boilerplate.bib
+++ /dev/null
@@ -1,349 +0,0 @@
-@article{dmriprep,
- author = {Lerma-Usabiaga, Garikoitz and Keshavan, Anisha and Richie-Halford, Adam and Rokem, Ariel and Esteban, Oscar},
- title = {{dMRIPrep: a robust preprocessing pipeline for diffusion MRI}},
- year = 2019,
- doi = {10.5281/zenodo.3392201},
- publisher = {Zenodo},
- journal = {Software}
-}
-
-@article{fmriprep1,
- author = {Esteban, Oscar and Markiewicz, Christopher and Blair, Ross W and Moodie, Craig and Isik, Ayse Ilkay and Erramuzpe Aliaga, Asier and Kent, James and Goncalves, Mathias and DuPre, Elizabeth and Snyder, Madeleine and Oya, Hiroyuki and Ghosh, Satrajit and Wright, Jessey and Durnez, Joke and Poldrack, Russell and Gorgolewski, Krzysztof Jacek},
- title = {{fMRIPrep}: a robust preprocessing pipeline for functional {MRI}},
- year = {2018},
- doi = {10.1038/s41592-018-0235-4},
- journal = {Nature Methods}
-}
-
-@article{fmriprep2,
- author = {Esteban, Oscar and Blair, Ross and Markiewicz, Christopher J. and Berleant, Shoshana L. and Moodie, Craig and Ma, Feilong and Isik, Ayse Ilkay and Erramuzpe, Asier and Kent, James D. andGoncalves, Mathias and DuPre, Elizabeth and Sitek, Kevin R. and Gomez, Daniel E. P. and Lurie, Daniel J. and Ye, Zhifang and Poldrack, Russell A. and Gorgolewski, Krzysztof J.},
- title = {fMRIPrep},
- year = 2018,
- doi = {10.5281/zenodo.852659},
- publisher = {Zenodo},
- journal = {Software}
-}
-
-@article{nipype1,
- author = {Gorgolewski, K. and Burns, C. D. and Madison, C. and Clark, D. and Halchenko, Y. O. and Waskom, M. L. and Ghosh, S.},
- doi = {10.3389/fninf.2011.00013},
- journal = {Frontiers in Neuroinformatics},
- pages = 13,
- shorttitle = {Nipype},
- title = {Nipype: a flexible, lightweight and extensible neuroimaging data processing framework in Python},
- volume = 5,
- year = 2011
-}
-
-@article{nipype2,
- author = {Gorgolewski, Krzysztof J. and Esteban, Oscar and Markiewicz, Christopher J. and Ziegler, Erik and Ellis, David Gage and Notter, Michael Philipp and Jarecka, Dorota and Johnson, Hans and Burns, Christopher and Manhães-Savio, Alexandre and Hamalainen, Carlo and Yvernault, Benjamin and Salo, Taylor and Jordan, Kesshi and Goncalves, Mathias and Waskom, Michael and Clark, Daniel and Wong, Jason and Loney, Fred and Modat, Marc and Dewey, Blake E and Madison, Cindee and Visconti di Oleggio Castello, Matteo and Clark, Michael G. and Dayan, Michael and Clark, Dav and Keshavan, Anisha and Pinsard, Basile and Gramfort, Alexandre and Berleant, Shoshana and Nielson, Dylan M. and Bougacha, Salma and Varoquaux, Gael and Cipollini, Ben and Markello, Ross and Rokem, Ariel and Moloney, Brendan and Halchenko, Yaroslav O. and Wassermann , Demian and Hanke, Michael and Horea, Christian and Kaczmarzyk, Jakub and Gilles de Hollander and DuPre, Elizabeth and Gillman, Ashley and Mordom, David and Buchanan, Colin and Tungaraza, Rosalia and Pauli, Wolfgang M. and Iqbal, Shariq and Sikka, Sharad and Mancini, Matteo and Schwartz, Yannick and Malone, Ian B. and Dubois, Mathieu and Frohlich, Caroline and Welch, David and Forbes, Jessica and Kent, James and Watanabe, Aimi and Cumba, Chad and Huntenburg, Julia M. and Kastman, Erik and Nichols, B. Nolan and Eshaghi, Arman and Ginsburg, Daniel and Schaefer, Alexander and Acland, Benjamin and Giavasis, Steven and Kleesiek, Jens and Erickson, Drew and Küttner, René and Haselgrove, Christian and Correa, Carlos and Ghayoor, Ali and Liem, Franz and Millman, Jarrod and Haehn, Daniel and Lai, Jeff and Zhou, Dale and Blair, Ross and Glatard, Tristan and Renfro, Mandy and Liu, Siqi and Kahn, Ari E. and Pérez-García, Fernando and Triplett, William and Lampe, Leonie and Stadler, Jörg and Kong, Xiang-Zhen and Hallquist, Michael and Chetverikov, Andrey and Salvatore, John and Park, Anne and Poldrack, Russell and Craddock, R. Cameron and Inati, Souheil and Hinds, Oliver and Cooper, Gavin and Perkins, L. Nathan and Marina, Ana and Mattfeld, Aaron and Noel, Maxime and Lukas Snoek and Matsubara, K and Cheung, Brian and Rothmei, Simon and Urchs, Sebastian and Durnez, Joke and Mertz, Fred and Geisler, Daniel and Floren, Andrew and Gerhard, Stephan and Sharp, Paul and Molina-Romero, Miguel and Weinstein, Alejandro and Broderick, William and Saase, Victor and Andberg, Sami Kristian and Harms, Robbert and Schlamp, Kai and Arias, Jaime and Papadopoulos Orfanos, Dimitri and Tarbert, Claire and Tambini, Arielle and De La Vega, Alejandro and Nickson, Thomas and Brett, Matthew and Falkiewicz, Marcel and Podranski, Kornelius and Linkersdörfer, Janosch and Flandin, Guillaume and Ort, Eduard and Shachnev, Dmitry and McNamee, Daniel and Davison, Andrew and Varada, Jan and Schwabacher, Isaac and Pellman, John and Perez-Guevara, Martin and Khanuja, Ranjeet and Pannetier, Nicolas and McDermottroe, Conor and Ghosh, Satrajit},
- title = {Nipype},
- year = 2018,
- doi = {10.5281/zenodo.596855},
- publisher = {Zenodo},
- journal = {Software}
-}
-
-@article{n4,
- author = {Tustison, N. J. and Avants, B. B. and Cook, P. A. and Zheng, Y. and Egan, A. and Yushkevich, P. A. and Gee, J. C.},
- doi = {10.1109/TMI.2010.2046908},
- issn = {0278-0062},
- journal = {IEEE Transactions on Medical Imaging},
- number = 6,
- pages = {1310-1320},
- shorttitle = {N4ITK},
- title = {N4ITK: Improved N3 Bias Correction},
- volume = 29,
- year = 2010
-}
-
-@article{fs_reconall,
- author = {Dale, Anders M. and Fischl, Bruce and Sereno, Martin I.},
- doi = {10.1006/nimg.1998.0395},
- issn = {1053-8119},
- journal = {NeuroImage},
- number = 2,
- pages = {179-194},
- shorttitle = {Cortical Surface-Based Analysis},
- title = {Cortical Surface-Based Analysis: I. Segmentation and Surface Reconstruction},
- url = {http://www.sciencedirect.com/science/article/pii/S1053811998903950},
- volume = 9,
- year = 1999
-}
-
-
-
-@article{mindboggle,
- author = {Klein, Arno and Ghosh, Satrajit S. and Bao, Forrest S. and Giard, Joachim and Häme, Yrjö and Stavsky, Eliezer and Lee, Noah and Rossa, Brian and Reuter, Martin and Neto, Elias Chaibub and Keshavan, Anisha},
- doi = {10.1371/journal.pcbi.1005350},
- issn = {1553-7358},
- journal = {PLOS Computational Biology},
- number = 2,
- pages = {e1005350},
- title = {Mindboggling morphometry of human brains},
- url = {http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005350},
- volume = 13,
- year = 2017
-}
-
-@article{mni152lin,
- title = {A {Probabilistic} {Atlas} of the {Human} {Brain}: {Theory} and {Rationale} for {Its} {Development}: {The} {International} {Consortium} for {Brain} {Mapping} ({ICBM})},
- author = {Mazziotta, John C. and Toga, Arthur W. and Evans, Alan and Fox, Peter and Lancaster, Jack},
- volume = {2},
- issn = {1053-8119},
- shorttitle = {A {Probabilistic} {Atlas} of the {Human} {Brain}},
- doi = {10.1006/nimg.1995.1012},
- number = {2, Part A},
- journal = {NeuroImage},
- year = {1995},
- pages = {89--101}
-}
-
-@article{mni152nlin2009casym,
- title = {Unbiased nonlinear average age-appropriate brain templates from birth to adulthood},
- author = {Fonov, VS and Evans, AC and McKinstry, RC and Almli, CR and Collins, DL},
- doi = {10.1016/S1053-8119(09)70884-5},
- journal = {NeuroImage},
- pages = {S102},
- volume = {47, Supplement 1},
- year = 2009
-}
-
-@article{mni152nlin6asym,
- author = {Evans, AC and Janke, AL and Collins, DL and Baillet, S},
- title = {Brain templates and atlases},
- doi = {10.1016/j.neuroimage.2012.01.024},
- journal = {NeuroImage},
- volume = {62},
- number = {2},
- pages = {911--922},
- year = 2012
-}
-
-@article{ants,
- author = {Avants, B.B. and Epstein, C.L. and Grossman, M. and Gee, J.C.},
- doi = {10.1016/j.media.2007.06.004},
- issn = {1361-8415},
- journal = {Medical Image Analysis},
- number = 1,
- pages = {26-41},
- shorttitle = {Symmetric diffeomorphic image registration with cross-correlation},
- title = {Symmetric diffeomorphic image registration with cross-correlation: Evaluating automated labeling of elderly and neurodegenerative brain},
- url = {http://www.sciencedirect.com/science/article/pii/S1361841507000606},
- volume = 12,
- year = 2008
-}
-
-@article{fsl_fast,
- author = {Zhang, Y. and Brady, M. and Smith, S.},
- doi = {10.1109/42.906424},
- issn = {0278-0062},
- journal = {IEEE Transactions on Medical Imaging},
- number = 1,
- pages = {45-57},
- title = {Segmentation of brain {MR} images through a hidden Markov random field model and the expectation-maximization algorithm},
- volume = 20,
- year = 2001
-}
-
-
-@article{fieldmapless1,
- author = {Wang, Sijia and Peterson, Daniel J. and Gatenby, J. C. and Li, Wenbin and Grabowski, Thomas J. and Madhyastha, Tara M.},
- doi = {10.3389/fninf.2017.00017},
- issn = {1662-5196},
- journal = {Frontiers in Neuroinformatics},
- language = {English},
- title = {Evaluation of Field Map and Nonlinear Registration Methods for Correction of Susceptibility Artifacts in Diffusion {MRI}},
- url = {http://journal.frontiersin.org/article/10.3389/fninf.2017.00017/full},
- volume = 11,
- year = 2017
-}
-
-@phdthesis{fieldmapless2,
- address = {Berlin},
- author = {Huntenburg, Julia M.},
- language = {eng},
- school = {Freie Universität},
- title = {Evaluating nonlinear coregistration of {BOLD} {EPI} and T1w images},
- type = {Master's Thesis},
- url = {http://hdl.handle.net/11858/00-001M-0000-002B-1CB5-A},
- year = 2014
-}
-
-@article{fieldmapless3,
- author = {Treiber, Jeffrey Mark and White, Nathan S. and Steed, Tyler Christian and Bartsch, Hauke and Holland, Dominic and Farid, Nikdokht and McDonald, Carrie R. and Carter, Bob S. and Dale, Anders Martin and Chen, Clark C.},
- doi = {10.1371/journal.pone.0152472},
- issn = {1932-6203},
- journal = {PLOS ONE},
- number = 3,
- pages = {e0152472},
- title = {Characterization and Correction of Geometric Distortions in 814 Diffusion Weighted Images},
- url = {http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0152472},
- volume = 11,
- year = 2016
-}
-
-@article{flirt,
- title = {A global optimisation method for robust affine registration of brain images},
- volume = {5},
- issn = {1361-8415},
- url = {http://www.sciencedirect.com/science/article/pii/S1361841501000366},
- doi = {10.1016/S1361-8415(01)00036-6},
- number = {2},
- urldate = {2018-07-27},
- journal = {Medical Image Analysis},
- author = {Jenkinson, Mark and Smith, Stephen},
- year = {2001},
- keywords = {Affine transformation, flirt, fsl, Global optimisation, Multi-resolution search, Multimodal registration, Robustness},
- pages = {143--156}
-}
-
-@article{mcflirt,
- author = {Jenkinson, Mark and Bannister, Peter and Brady, Michael and Smith, Stephen},
- doi = {10.1006/nimg.2002.1132},
- issn = {1053-8119},
- journal = {NeuroImage},
- number = 2,
- pages = {825-841},
- title = {Improved Optimization for the Robust and Accurate Linear Registration and Motion Correction of Brain Images},
- url = {http://www.sciencedirect.com/science/article/pii/S1053811902911328},
- volume = 17,
- year = 2002
-}
-
-@article{bbr,
- author = {Greve, Douglas N and Fischl, Bruce},
- doi = {10.1016/j.neuroimage.2009.06.060},
- issn = {1095-9572},
- journal = {NeuroImage},
- number = 1,
- pages = {63-72},
- title = {Accurate and robust brain image alignment using boundary-based registration},
- volume = 48,
- year = 2009
-}
-
-@article{aroma,
- author = {Pruim, Raimon H. R. and Mennes, Maarten and van Rooij, Daan and Llera, Alberto and Buitelaar, Jan K. and Beckmann, Christian F.},
- doi = {10.1016/j.neuroimage.2015.02.064},
- issn = {1053-8119},
- journal = {NeuroImage},
- number = {Supplement C},
- pages = {267-277},
- shorttitle = {ICA-AROMA},
- title = {ICA-{AROMA}: A robust {ICA}-based strategy for removing motion artifacts from fMRI data},
- url = {http://www.sciencedirect.com/science/article/pii/S1053811915001822},
- volume = 112,
- year = 2015
-}
-
-@article{power_fd_dvars,
- author = {Power, Jonathan D. and Mitra, Anish and Laumann, Timothy O. and Snyder, Abraham Z. and Schlaggar, Bradley L. and Petersen, Steven E.},
- doi = {10.1016/j.neuroimage.2013.08.048},
- issn = {1053-8119},
- journal = {NeuroImage},
- number = {Supplement C},
- pages = {320-341},
- title = {Methods to detect, characterize, and remove motion artifact in resting state fMRI},
- url = {http://www.sciencedirect.com/science/article/pii/S1053811913009117},
- volume = 84,
- year = 2014
-}
-
-@article{confounds_satterthwaite_2013,
- author = {Satterthwaite, Theodore D. and Elliott, Mark A. and Gerraty, Raphael T. and Ruparel, Kosha and Loughead, James and Calkins, Monica E. and Eickhoff, Simon B. and Hakonarson, Hakon and Gur, Ruben C. and Gur, Raquel E. and Wolf, Daniel H.},
- doi = {10.1016/j.neuroimage.2012.08.052},
- issn = {10538119},
- journal = {NeuroImage},
- number = 1,
- pages = {240--256},
- title = {{An improved framework for confound regression and filtering for control of motion artifact in the preprocessing of resting-state functional connectivity data}},
- url = {http://linkinghub.elsevier.com/retrieve/pii/S1053811912008609},
- volume = 64,
- year = 2013
-}
-
-
-@article{nilearn,
- author = {Abraham, Alexandre and Pedregosa, Fabian and Eickenberg, Michael and Gervais, Philippe and Mueller, Andreas and Kossaifi, Jean and Gramfort, Alexandre and Thirion, Bertrand and Varoquaux, Gael},
- doi = {10.3389/fninf.2014.00014},
- issn = {1662-5196},
- journal = {Frontiers in Neuroinformatics},
- language = {English},
- title = {Machine learning for neuroimaging with scikit-learn},
- url = {https://www.frontiersin.org/articles/10.3389/fninf.2014.00014/full},
- volume = 8,
- year = 2014
-}
-
-@article{lanczos,
- author = {Lanczos, C.},
- doi = {10.1137/0701007},
- issn = {0887-459X},
- journal = {Journal of the Society for Industrial and Applied Mathematics Series B Numerical Analysis},
- number = 1,
- pages = {76-85},
- title = {Evaluation of Noisy Data},
- url = {http://epubs.siam.org/doi/10.1137/0701007},
- volume = 1,
- year = 1964
-}
-
-@article{compcor,
- author = {Behzadi, Yashar and Restom, Khaled and Liau, Joy and Liu, Thomas T.},
- doi = {10.1016/j.neuroimage.2007.04.042},
- issn = {1053-8119},
- journal = {NeuroImage},
- number = 1,
- pages = {90-101},
- title = {A component based noise correction method ({CompCor}) for {BOLD} and perfusion based fMRI},
- url = {http://www.sciencedirect.com/science/article/pii/S1053811907003837},
- volume = 37,
- year = 2007
-}
-
-@article{hcppipelines,
- author = {Glasser, Matthew F. and Sotiropoulos, Stamatios N. and Wilson, J. Anthony and Coalson, Timothy S. and Fischl, Bruce and Andersson, Jesper L. and Xu, Junqian and Jbabdi, Saad and Webster, Matthew and Polimeni, Jonathan R. and Van Essen, David C. and Jenkinson, Mark},
- doi = {10.1016/j.neuroimage.2013.04.127},
- issn = {1053-8119},
- journal = {NeuroImage},
- pages = {105-124},
- series = {Mapping the Connectome},
- title = {The minimal preprocessing pipelines for the Human Connectome Project},
- url = {http://www.sciencedirect.com/science/article/pii/S1053811913005053},
- volume = 80,
- year = 2013
-}
-
-@article{fs_template,
- author = {Reuter, Martin and Rosas, Herminia Diana and Fischl, Bruce},
- doi = {10.1016/j.neuroimage.2010.07.020},
- journal = {NeuroImage},
- number = 4,
- pages = {1181-1196},
- title = {Highly accurate inverse consistent registration: A robust approach},
- volume = 53,
- year = 2010
-}
-
-@article{afni,
- author = {Cox, Robert W. and Hyde, James S.},
- doi = {10.1002/(SICI)1099-1492(199706/08)10:4/5<171::AID-NBM453>3.0.CO;2-L},
- journal = {NMR in Biomedicine},
- number = {4-5},
- pages = {171-178},
- title = {Software tools for analysis and visualization of fMRI data},
- volume = 10,
- year = 1997
-}
-
-@article{posse_t2s,
- author = {Posse, Stefan and Wiese, Stefan and Gembris, Daniel and Mathiak, Klaus and Kessler, Christoph and Grosse-Ruyken, Maria-Liisa and Elghahwagi, Barbara and Richards, Todd and Dager, Stephen R. and Kiselev, Valerij G.},
- doi = {10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O},
- journal = {Magnetic Resonance in Medicine},
- number = 1,
- pages = {87-97},
- title = {Enhancement of {BOLD}-contrast sensitivity by single-shot multi-echo functional {MR} imaging},
- volume = 42,
- year = 1999
-}
diff --git a/dmriprep/due.py b/dmriprep/due.py
new file mode 100644
index 00000000..9a1c4dd0
--- /dev/null
+++ b/dmriprep/due.py
@@ -0,0 +1,74 @@
+# emacs: at the end of the file
+# ex: set sts=4 ts=4 sw=4 et:
+# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### #
+"""
+
+Stub file for a guaranteed safe import of duecredit constructs: if duecredit
+is not available.
+
+To use it, place it into your project codebase to be imported, e.g. copy as
+
+ cp stub.py /path/tomodule/module/due.py
+
+Note that it might be better to avoid naming it duecredit.py to avoid shadowing
+installed duecredit.
+
+Then use in your code as
+
+ from .due import due, Doi, BibTeX, Text
+
+See https://github.com/duecredit/duecredit/blob/master/README.md for examples.
+
+Origin: Originally a part of the duecredit
+Copyright: 2015-2019 DueCredit developers
+License: BSD-2
+"""
+
+__version__ = '0.0.8'
+
+
+class InactiveDueCreditCollector(object):
+ """Just a stub at the Collector which would not do anything"""
+ def _donothing(self, *args, **kwargs):
+ """Perform no good and no bad"""
+ pass
+
+ def dcite(self, *args, **kwargs):
+ """If I could cite I would"""
+ def nondecorating_decorator(func):
+ return func
+ return nondecorating_decorator
+
+ active = False
+ activate = add = cite = dump = load = _donothing
+
+ def __repr__(self):
+ return self.__class__.__name__ + '()'
+
+
+def _donothing_func(*args, **kwargs):
+ """Perform no good and no bad"""
+ pass
+
+
+try:
+ from duecredit import due, BibTeX, Doi, Url, Text
+ if 'due' in locals() and not hasattr(due, 'cite'):
+ raise RuntimeError(
+ "Imported due lacks .cite. DueCredit is now disabled")
+except Exception as e:
+ if not isinstance(e, ImportError):
+ import logging
+ logging.getLogger("duecredit").error(
+ "Failed to import duecredit due to %s" % str(e))
+ # Initiate due stub
+ due = InactiveDueCreditCollector()
+ BibTeX = Doi = Url = Text = _donothing_func
+
+# Emacs mode definitions
+# Local Variables:
+# mode: python
+# py-indent-offset: 4
+# tab-width: 4
+# indent-tabs-mode: nil
+# End:
diff --git a/dmriprep/interfaces/__init__.py b/dmriprep/interfaces/__init__.py
index 55e90fe4..e69de29b 100644
--- a/dmriprep/interfaces/__init__.py
+++ b/dmriprep/interfaces/__init__.py
@@ -1,53 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
-# vi: set ft=python sts=4 ts=4 sw=4 et:
-"""Custom Nipype interfaces for dMRIPrep."""
-from nipype.interfaces.base import OutputMultiObject, SimpleInterface
-from niworkflows.interfaces.bids import (
- DerivativesDataSink as _DDS,
- BIDSDataGrabberOutputSpec as _BIDSDataGrabberOutputSpec,
- BIDSDataGrabberInputSpec,
- LOGGER as _LOGGER,
-)
-
-
-class DerivativesDataSink(_DDS):
- """A patched DataSink."""
-
- out_path_base = 'dmriprep'
-
-
-class BIDSDataGrabberOutputSpec(_BIDSDataGrabberOutputSpec):
- dwi = OutputMultiObject(desc='output DWI images')
-
-
-class BIDSDataGrabber(SimpleInterface):
- input_spec = BIDSDataGrabberInputSpec
- output_spec = BIDSDataGrabberOutputSpec
- _require_dwis = True
-
- def __init__(self, *args, **kwargs):
- anat_only = kwargs.pop('anat_only', False)
- super(BIDSDataGrabber, self).__init__(*args, **kwargs)
- if anat_only is not None:
- self._require_dwis = not anat_only
-
- def _run_interface(self, runtime):
- bids_dict = self.inputs.subject_data
-
- self._results['out_dict'] = bids_dict
- self._results.update(bids_dict)
-
- if not bids_dict['t1w']:
- raise FileNotFoundError('No T1w images found for subject sub-{}'.format(
- self.inputs.subject_id))
-
- if self._require_dwis and not bids_dict['dwi']:
- raise FileNotFoundError('No diffusion weighted images found for subject sub-{}'.format(
- self.inputs.subject_id))
-
- for imtype in ['dwi', 't2w', 'flair', 'fmap', 'roi']:
- if not bids_dict[imtype]:
- _LOGGER.warning('No "%s" images found for sub-%s',
- imtype, self.inputs.subject_id)
-
- return runtime
diff --git a/dmriprep/interfaces/fmap.py b/dmriprep/interfaces/fmap.py
new file mode 100644
index 00000000..b1a6e9cd
--- /dev/null
+++ b/dmriprep/interfaces/fmap.py
@@ -0,0 +1,612 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+"""
+Interfaces to deal with the various types of fieldmap sources
+"""
+
+import numpy as np
+import nibabel as nb
+from nipype import logging
+from nipype.utils.filemanip import fname_presuffix
+from nipype.interfaces.base import (
+ BaseInterfaceInputSpec, TraitedSpec, File, isdefined, traits,
+ SimpleInterface, InputMultiObject)
+
+LOGGER = logging.getLogger('nipype.interface')
+
+
+class FieldEnhanceInputSpec(BaseInterfaceInputSpec):
+ in_file = File(exists=True, mandatory=True, desc='input fieldmap')
+ in_mask = File(exists=True, desc='brain mask')
+ in_magnitude = File(exists=True, desc='input magnitude')
+ unwrap = traits.Bool(False, usedefault=True, desc='run phase unwrap')
+ despike = traits.Bool(True, usedefault=True, desc='run despike filter')
+ bspline_smooth = traits.Bool(True, usedefault=True, desc='run 3D bspline smoother')
+ mask_erode = traits.Int(1, usedefault=True, desc='mask erosion iterations')
+ despike_threshold = traits.Float(0.2, usedefault=True, desc='mask erosion iterations')
+ num_threads = traits.Int(1, usedefault=True, nohash=True, desc='number of jobs')
+
+
+class FieldEnhanceOutputSpec(TraitedSpec):
+ out_file = File(desc='the output fieldmap')
+ out_unwrapped = File(desc='unwrapped fieldmap')
+
+
+class FieldEnhance(SimpleInterface):
+ """
+ The FieldEnhance interface wraps a workflow to massage the input fieldmap
+ and return it masked, despiked, etc.
+ """
+ input_spec = FieldEnhanceInputSpec
+ output_spec = FieldEnhanceOutputSpec
+
+ def _run_interface(self, runtime):
+ from scipy import ndimage as sim
+
+ fmap_nii = nb.load(self.inputs.in_file)
+ data = np.squeeze(fmap_nii.get_data().astype(np.float32))
+
+ # Despike / denoise (no-mask)
+ if self.inputs.despike:
+ data = _despike2d(data, self.inputs.despike_threshold)
+
+ mask = None
+ if isdefined(self.inputs.in_mask):
+ masknii = nb.load(self.inputs.in_mask)
+ mask = masknii.get_data().astype(np.uint8)
+
+ # Dilate mask
+ if self.inputs.mask_erode > 0:
+ struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 1)
+ mask = sim.binary_erosion(
+ mask, struc,
+ iterations=self.inputs.mask_erode
+ ).astype(np.uint8) # pylint: disable=no-member
+
+ self._results['out_file'] = fname_presuffix(
+ self.inputs.in_file, suffix='_enh', newpath=runtime.cwd)
+ datanii = nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header)
+
+ if self.inputs.unwrap:
+ data = _unwrap(data, self.inputs.in_magnitude, mask)
+ self._results['out_unwrapped'] = fname_presuffix(
+ self.inputs.in_file, suffix='_unwrap', newpath=runtime.cwd)
+ nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header).to_filename(
+ self._results['out_unwrapped'])
+
+ if not self.inputs.bspline_smooth:
+ datanii.to_filename(self._results['out_file'])
+ return runtime
+ else:
+ from ..utils import bspline as fbsp
+ from statsmodels.robust.scale import mad
+
+ # Fit BSplines (coarse)
+ bspobj = fbsp.BSplineFieldmap(datanii, weights=mask,
+ njobs=self.inputs.num_threads)
+ bspobj.fit()
+ smoothed1 = bspobj.get_smoothed()
+
+ # Manipulate the difference map
+ diffmap = data - smoothed1.get_data()
+ sderror = mad(diffmap[mask > 0])
+ LOGGER.info('SD of error after B-Spline fitting is %f', sderror)
+ errormask = np.zeros_like(diffmap)
+ errormask[np.abs(diffmap) > (10 * sderror)] = 1
+ errormask *= mask
+
+ nslices = 0
+ try:
+ errorslice = np.squeeze(np.argwhere(errormask.sum(0).sum(0) > 0))
+ nslices = errorslice[-1] - errorslice[0]
+ except IndexError: # mask is empty, do not refine
+ pass
+
+ if nslices > 1:
+ diffmapmsk = mask[..., errorslice[0]:errorslice[-1]]
+ diffmapnii = nb.Nifti1Image(
+ diffmap[..., errorslice[0]:errorslice[-1]] * diffmapmsk,
+ datanii.affine, datanii.header)
+
+ bspobj2 = fbsp.BSplineFieldmap(diffmapnii, knots_zooms=[24., 24., 4.],
+ njobs=self.inputs.num_threads)
+ bspobj2.fit()
+ smoothed2 = bspobj2.get_smoothed().get_data()
+
+ final = smoothed1.get_data().copy()
+ final[..., errorslice[0]:errorslice[-1]] += smoothed2
+ else:
+ final = smoothed1.get_data()
+
+ nb.Nifti1Image(final, datanii.affine, datanii.header).to_filename(
+ self._results['out_file'])
+
+ return runtime
+
+
+class FieldToRadSInputSpec(BaseInterfaceInputSpec):
+ in_file = File(exists=True, mandatory=True, desc='input fieldmap')
+ fmap_range = traits.Float(desc='range of input field map')
+
+
+class FieldToRadSOutputSpec(TraitedSpec):
+ out_file = File(desc='the output fieldmap')
+ fmap_range = traits.Float(desc='range of input field map')
+
+
+class FieldToRadS(SimpleInterface):
+ """
+ The FieldToRadS converts from arbitrary units to rad/s
+ """
+ input_spec = FieldToRadSInputSpec
+ output_spec = FieldToRadSOutputSpec
+
+ def _run_interface(self, runtime):
+ fmap_range = None
+ if isdefined(self.inputs.fmap_range):
+ fmap_range = self.inputs.fmap_range
+ self._results['out_file'], self._results['fmap_range'] = _torads(
+ self.inputs.in_file, fmap_range, newpath=runtime.cwd)
+ return runtime
+
+
+class FieldToHzInputSpec(BaseInterfaceInputSpec):
+ in_file = File(exists=True, mandatory=True, desc='input fieldmap')
+ range_hz = traits.Float(mandatory=True, desc='range of input field map')
+
+
+class FieldToHzOutputSpec(TraitedSpec):
+ out_file = File(desc='the output fieldmap')
+
+
+class FieldToHz(SimpleInterface):
+ """
+ The FieldToHz converts from arbitrary units to Hz
+ """
+ input_spec = FieldToHzInputSpec
+ output_spec = FieldToHzOutputSpec
+
+ def _run_interface(self, runtime):
+ self._results['out_file'] = _tohz(
+ self.inputs.in_file, self.inputs.range_hz, newpath=runtime.cwd)
+ return runtime
+
+
+class Phasediff2FieldmapInputSpec(BaseInterfaceInputSpec):
+ in_file = File(exists=True, mandatory=True, desc='input fieldmap')
+ metadata = traits.Dict(mandatory=True, desc='BIDS metadata dictionary')
+
+
+class Phasediff2FieldmapOutputSpec(TraitedSpec):
+ out_file = File(desc='the output fieldmap')
+
+
+class Phasediff2Fieldmap(SimpleInterface):
+ """
+ Convert a phase difference map into a fieldmap in Hz
+ """
+ input_spec = Phasediff2FieldmapInputSpec
+ output_spec = Phasediff2FieldmapOutputSpec
+
+ def _run_interface(self, runtime):
+ self._results['out_file'] = phdiff2fmap(
+ self.inputs.in_file,
+ _delta_te(self.inputs.metadata),
+ newpath=runtime.cwd)
+ return runtime
+
+
+class Phases2FieldmapInputSpec(BaseInterfaceInputSpec):
+ phase_files = InputMultiObject(
+ File(exists=True), mandatory=True, desc='list of phase1, phase2 files')
+ metadatas = traits.List(
+ traits.Dict, mandatory=True, desc='list of phase1, phase2 metadata dicts')
+
+
+class Phases2FieldmapOutputSpec(TraitedSpec):
+ out_file = File(desc='the output fieldmap')
+ phasediff_metadata = traits.Dict(desc='the phasediff metadata')
+
+
+class Phases2Fieldmap(SimpleInterface):
+ """
+ Convert a phase1, phase2 into a difference map
+ """
+ input_spec = Phases2FieldmapInputSpec
+ output_spec = Phases2FieldmapOutputSpec
+
+ def _run_interface(self, runtime):
+ # Get the echo times
+ fmap_file, merged_metadata = phases2fmap(self.inputs.phase_files, self.inputs.metadatas,
+ newpath=runtime.cwd)
+ self._results['phasediff_metadata'] = merged_metadata
+ self._results['out_file'] = fmap_file
+ return runtime
+
+
+def _despike2d(data, thres, neigh=None):
+ """
+ despiking as done in FSL fugue
+ """
+
+ if neigh is None:
+ neigh = [-1, 0, 1]
+ nslices = data.shape[-1]
+
+ for k in range(nslices):
+ data2d = data[..., k]
+
+ for i in range(data2d.shape[0]):
+ for j in range(data2d.shape[1]):
+ vals = []
+ thisval = data2d[i, j]
+ for ii in neigh:
+ for jj in neigh:
+ try:
+ vals.append(data2d[i + ii, j + jj])
+ except IndexError:
+ pass
+ vals = np.array(vals)
+ patch_range = vals.max() - vals.min()
+ patch_med = np.median(vals)
+
+ if (patch_range > 1e-6 and
+ (abs(thisval - patch_med) / patch_range) > thres):
+ data[i, j, k] = patch_med
+ return data
+
+
+def _unwrap(fmap_data, mag_file, mask=None):
+ from math import pi
+ from nipype.interfaces.fsl import PRELUDE
+ magnii = nb.load(mag_file)
+
+ if mask is None:
+ mask = np.ones_like(fmap_data, dtype=np.uint8)
+
+ fmapmax = max(abs(fmap_data[mask > 0].min()), fmap_data[mask > 0].max())
+ fmap_data *= pi / fmapmax
+
+ nb.Nifti1Image(fmap_data, magnii.affine).to_filename('fmap_rad.nii.gz')
+ nb.Nifti1Image(mask, magnii.affine).to_filename('fmap_mask.nii.gz')
+ nb.Nifti1Image(magnii.get_data(), magnii.affine).to_filename('fmap_mag.nii.gz')
+
+ # Run prelude
+ res = PRELUDE(phase_file='fmap_rad.nii.gz',
+ magnitude_file='fmap_mag.nii.gz',
+ mask_file='fmap_mask.nii.gz').run()
+
+ unwrapped = nb.load(res.outputs.unwrapped_phase_file).get_data() * (fmapmax / pi)
+ return unwrapped
+
+
+def get_ees(in_meta, in_file=None):
+ """
+ Calculate the *effective echo spacing* :math:`t_\\text{ees}`
+ for an input :abbr:`EPI (echo-planar imaging)` scan.
+
+
+ There are several procedures to calculate the effective
+ echo spacing. The basic one is that an ``EffectiveEchoSpacing``
+ field is set in the JSON sidecar. The following examples
+ use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the
+ j-axis encoding direction.
+
+ >>> meta = {'EffectiveEchoSpacing': 0.00059,
+ ... 'PhaseEncodingDirection': 'j-'}
+ >>> get_ees(meta)
+ 0.00059
+
+ If the *total readout time* :math:`T_\\text{ro}` (``TotalReadoutTime``
+ BIDS field) is provided, then the effective echo spacing can be
+ calculated reading the number of voxels :math:`N_\\text{PE}` along the
+ readout direction and the parallel acceleration
+ factor of the EPI
+
+ .. math ::
+
+ = T_\\text{ro} \\, (N_\\text{PE} / f_\\text{acc} - 1)^{-1}
+
+ where :math:`N_y` is the number of pixels along the phase-encoding direction
+ :math:`y`, and :math:`f_\\text{acc}` is the parallel imaging acceleration factor
+ (:abbr:`GRAPPA (GeneRalized Autocalibrating Partial Parallel Acquisition)`,
+ :abbr:`ARC (Autocalibrating Reconstruction for Cartesian imaging)`, etc.).
+
+ >>> meta = {'TotalReadoutTime': 0.02596,
+ ... 'PhaseEncodingDirection': 'j-',
+ ... 'ParallelReductionFactorInPlane': 2}
+ >>> get_ees(meta, in_file='epi.nii.gz')
+ 0.00059
+
+ Some vendors, like Philips, store different parameter names
+ (see http://dbic.dartmouth.edu/pipermail/mrusers/attachments/\
+20141112/eb1d20e6/attachment.pdf):
+
+ >>> meta = {'WaterFatShift': 8.129,
+ ... 'MagneticFieldStrength': 3,
+ ... 'PhaseEncodingDirection': 'j-',
+ ... 'ParallelReductionFactorInPlane': 2}
+ >>> get_ees(meta, in_file='epi.nii.gz')
+ 0.00041602630141921826
+
+ """
+
+ import nibabel as nb
+ from dmriprep.interfaces.fmap import _get_pe_index
+
+ # Use case 1: EES is defined
+ ees = in_meta.get('EffectiveEchoSpacing', None)
+ if ees is not None:
+ return ees
+
+ # All other cases require the parallel acc and npe (N vox in PE dir)
+ acc = float(in_meta.get('ParallelReductionFactorInPlane', 1.0))
+ npe = nb.load(in_file).shape[_get_pe_index(in_meta)]
+ etl = npe // acc
+
+ # Use case 2: TRT is defined
+ trt = in_meta.get('TotalReadoutTime', None)
+ if trt is not None:
+ return trt / (etl - 1)
+
+ # Use case 3 (philips scans)
+ wfs = in_meta.get('WaterFatShift', None)
+ if wfs is not None:
+ fstrength = in_meta['MagneticFieldStrength']
+ wfd_ppm = 3.4 # water-fat diff in ppm
+ g_ratio_mhz_t = 42.57 # gyromagnetic ratio for proton (1H) in MHz/T
+ wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t
+ return wfs / (wfs_hz * etl)
+
+ raise ValueError('Unknown effective echo-spacing specification')
+
+
+def get_trt(in_meta, in_file=None):
+ """
+ Calculate the *total readout time* for an input
+ :abbr:`EPI (echo-planar imaging)` scan.
+
+
+ There are several procedures to calculate the total
+ readout time. The basic one is that a ``TotalReadoutTime``
+ field is set in the JSON sidecar. The following examples
+ use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the
+ j-axis encoding direction.
+
+ >>> meta = {'TotalReadoutTime': 0.02596}
+ >>> get_trt(meta)
+ 0.02596
+
+ If the *effective echo spacing* :math:`t_\\text{ees}`
+ (``EffectiveEchoSpacing`` BIDS field) is provided, then the
+ total readout time can be calculated reading the number
+ of voxels along the readout direction :math:`T_\\text{ro}`
+ and the parallel acceleration factor of the EPI :math:`f_\\text{acc}`.
+
+ .. math ::
+
+ T_\\text{ro} = t_\\text{ees} \\, (N_\\text{PE} / f_\\text{acc} - 1)
+
+ >>> meta = {'EffectiveEchoSpacing': 0.00059,
+ ... 'PhaseEncodingDirection': 'j-',
+ ... 'ParallelReductionFactorInPlane': 2}
+ >>> get_trt(meta, in_file='epi.nii.gz')
+ 0.02596
+
+ Some vendors, like Philips, store different parameter names:
+
+ >>> meta = {'WaterFatShift': 8.129,
+ ... 'MagneticFieldStrength': 3,
+ ... 'PhaseEncodingDirection': 'j-',
+ ... 'ParallelReductionFactorInPlane': 2}
+ >>> get_trt(meta, in_file='epi.nii.gz')
+ 0.018721183563864822
+
+ """
+
+ # Use case 1: TRT is defined
+ trt = in_meta.get('TotalReadoutTime', None)
+ if trt is not None:
+ return trt
+
+ # All other cases require the parallel acc and npe (N vox in PE dir)
+ acc = float(in_meta.get('ParallelReductionFactorInPlane', 1.0))
+ npe = nb.load(in_file).shape[_get_pe_index(in_meta)]
+ etl = npe // acc
+
+ # Use case 2: TRT is defined
+ ees = in_meta.get('EffectiveEchoSpacing', None)
+ if ees is not None:
+ return ees * (etl - 1)
+
+ # Use case 3 (philips scans)
+ wfs = in_meta.get('WaterFatShift', None)
+ if wfs is not None:
+ fstrength = in_meta['MagneticFieldStrength']
+ wfd_ppm = 3.4 # water-fat diff in ppm
+ g_ratio_mhz_t = 42.57 # gyromagnetic ratio for proton (1H) in MHz/T
+ wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t
+ return wfs / wfs_hz
+
+ raise ValueError('Unknown total-readout time specification')
+
+
+def _get_pe_index(meta):
+ pe = meta['PhaseEncodingDirection']
+ try:
+ return {'i': 0, 'j': 1, 'k': 2}[pe[0]]
+ except KeyError:
+ raise RuntimeError('"%s" is an invalid PE string' % pe)
+
+
+def _torads(in_file, fmap_range=None, newpath=None):
+ """
+ Convert a field map to rad/s units
+
+ If fmap_range is None, the range of the fieldmap
+ will be automatically calculated.
+
+ Use fmap_range=0.5 to convert from Hz to rad/s
+ """
+ from math import pi
+ import nibabel as nb
+ from nipype.utils.filemanip import fname_presuffix
+
+ out_file = fname_presuffix(in_file, suffix='_rad', newpath=newpath)
+ fmapnii = nb.load(in_file)
+ fmapdata = fmapnii.get_data()
+
+ if fmap_range is None:
+ fmap_range = max(abs(fmapdata.min()), fmapdata.max())
+ fmapdata = fmapdata * (pi / fmap_range)
+ out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header)
+ out_img.set_data_dtype('float32')
+ out_img.to_filename(out_file)
+ return out_file, fmap_range
+
+
+def _tohz(in_file, range_hz, newpath=None):
+ """Convert a field map to Hz units"""
+ from math import pi
+ import nibabel as nb
+ from nipype.utils.filemanip import fname_presuffix
+
+ out_file = fname_presuffix(in_file, suffix='_hz', newpath=newpath)
+ fmapnii = nb.load(in_file)
+ fmapdata = fmapnii.get_data()
+ fmapdata = fmapdata * (range_hz / pi)
+ out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header)
+ out_img.set_data_dtype('float32')
+ out_img.to_filename(out_file)
+ return out_file
+
+
+def phdiff2fmap(in_file, delta_te, newpath=None):
+ r"""
+ Converts the input phase-difference map into a fieldmap in Hz,
+ using the eq. (1) of [Hutton2002]_:
+
+ .. math::
+
+ \Delta B_0 (\text{T}^{-1}) = \frac{\Delta \Theta}{2\pi\gamma \Delta\text{TE}}
+
+
+ In this case, we do not take into account the gyromagnetic ratio of the
+ proton (:math:`\gamma`), since it will be applied inside TOPUP:
+
+ .. math::
+
+ \Delta B_0 (\text{Hz}) = \frac{\Delta \Theta}{2\pi \Delta\text{TE}}
+
+ """
+ import math
+ import numpy as np
+ import nibabel as nb
+ from nipype.utils.filemanip import fname_presuffix
+ # GYROMAG_RATIO_H_PROTON_MHZ = 42.576
+
+ out_file = fname_presuffix(in_file, suffix='_fmap', newpath=newpath)
+ image = nb.load(in_file)
+ data = (image.get_data().astype(np.float32) / (2. * math.pi * delta_te))
+ nii = nb.Nifti1Image(data, image.affine, image.header)
+ nii.set_data_dtype(np.float32)
+ nii.to_filename(out_file)
+ return out_file
+
+
+def phases2fmap(phase_files, metadatas, newpath=None):
+ """Calculates a phasediff from two phase images. Assumes monopolar
+ readout. """
+ import numpy as np
+ import nibabel as nb
+ from nipype.utils.filemanip import fname_presuffix
+ from copy import deepcopy
+
+ phasediff_file = fname_presuffix(phase_files[0], suffix='_phasediff', newpath=newpath)
+ echo_times = [meta.get("EchoTime") for meta in metadatas]
+ if None in echo_times or echo_times[0] == echo_times[1]:
+ raise RuntimeError()
+ # Determine the order of subtraction
+ short_echo_index = echo_times.index(min(echo_times))
+ long_echo_index = echo_times.index(max(echo_times))
+
+ short_phase_image = phase_files[short_echo_index]
+ long_phase_image = phase_files[long_echo_index]
+
+ image0 = nb.load(short_phase_image)
+ phase0 = image0.get_fdata()
+ image1 = nb.load(long_phase_image)
+ phase1 = image1.get_fdata()
+
+ def rescale_image(img):
+ if np.any(img < -128):
+ # This happens sometimes on 7T fieldmaps
+ LOGGER.info("Found negative values in phase image: rescaling")
+ imax = img.max()
+ imin = img.min()
+ scaled = 2 * ((img - imin) / (imax - imin) - 0.5)
+ return np.pi * scaled
+ mask = img > 0
+ imax = img.max()
+ imin = img.min()
+ max_check = imax - 4096
+ if np.abs(max_check) > 10 or np.abs(imin) > 10:
+ LOGGER.warning("Phase image may be scaled incorrectly: check results")
+ return mask * (img / 2048 * np.pi - np.pi)
+
+ # Calculate fieldmaps
+ rad0 = rescale_image(phase0)
+ rad1 = rescale_image(phase1)
+ a = np.cos(rad0)
+ b = np.sin(rad0)
+ c = np.cos(rad1)
+ d = np.sin(rad1)
+ fmap = -np.arctan2(b * c - a * d, a * c + b * d)
+
+ phasediff_nii = nb.Nifti1Image(fmap, image0.affine)
+ phasediff_nii.set_data_dtype(np.float32)
+ phasediff_nii.to_filename(phasediff_file)
+
+ merged_metadata = deepcopy(metadatas[0])
+ del merged_metadata['EchoTime']
+ merged_metadata['EchoTime1'] = float(echo_times[short_echo_index])
+ merged_metadata['EchoTime2'] = float(echo_times[long_echo_index])
+
+ return phasediff_file, merged_metadata
+
+
+def _delta_te(in_values, te1=None, te2=None):
+ r"""Read :math:`\Delta_\text{TE}` from BIDS metadata dict"""
+ if isinstance(in_values, float):
+ te2 = in_values
+ te1 = 0.
+
+ if isinstance(in_values, dict):
+ te1 = in_values.get('EchoTime1')
+ te2 = in_values.get('EchoTime2')
+
+ if not all((te1, te2)):
+ te2 = in_values.get('EchoTimeDifference')
+ te1 = 0
+
+ if isinstance(in_values, list):
+ te2, te1 = in_values
+ if isinstance(te1, list):
+ te1 = te1[1]
+ if isinstance(te2, list):
+ te2 = te2[1]
+
+ # For convienience if both are missing we should give one error about them
+ if te1 is None and te2 is None:
+ raise RuntimeError('EchoTime1 and EchoTime2 metadata fields not found. '
+ 'Please consult the BIDS specification.')
+ if te1 is None:
+ raise RuntimeError(
+ 'EchoTime1 metadata field not found. Please consult the BIDS specification.')
+ if te2 is None:
+ raise RuntimeError(
+ 'EchoTime2 metadata field not found. Please consult the BIDS specification.')
+
+ return abs(float(te2) - float(te1))
diff --git a/dmriprep/interfaces/fsl_extensions.py b/dmriprep/interfaces/fsl_extensions.py
new file mode 100644
index 00000000..9f524b75
--- /dev/null
+++ b/dmriprep/interfaces/fsl_extensions.py
@@ -0,0 +1,124 @@
+import os
+import os.path as op
+
+from nipype.interfaces import fsl
+from nipype.interfaces.base import isdefined
+
+
+class ExtendedEddyOutputSpec(fsl.epi.EddyOutputSpec):
+ from nipype.interfaces.base import File
+
+ shell_PE_translation_parameters = File(
+ exists=True,
+ desc="the translation along the PE-direction between the different shells",
+ )
+ outlier_map = File(
+ exists=True,
+ desc="All numbers are either 0, meaning that scan-slice "
+ "is not an outliers, or 1 meaning that it is.",
+ )
+ outlier_n_stdev_map = File(
+ exists=True,
+ desc="how many standard deviations off the mean difference "
+ "between observation and prediction is.",
+ )
+ outlier_n_sqr_stdev_map = File(
+ exists=True,
+ desc="how many standard deviations off the square root of the "
+ "mean squared difference between observation and prediction is.",
+ )
+ outlier_free_data = File(
+ exists=True,
+ desc=" the original data given by --imain not corrected for "
+ "susceptibility or EC-induced distortions or subject movement, but with "
+ "outlier slices replaced by the Gaussian Process predictions.",
+ )
+
+
+class ExtendedEddy(fsl.Eddy):
+ output_spec = ExtendedEddyOutputSpec
+ _num_threads = 1
+
+ def __init__(self, **inputs):
+ super(fsl.Eddy, self).__init__(**inputs)
+ self.inputs.on_trait_change(self._num_threads_update, "num_threads")
+ if not isdefined(self.inputs.num_threads):
+ self.inputs.num_threads = self._num_threads
+ else:
+ self._num_threads_update()
+ self.inputs.on_trait_change(self._use_cuda, "use_cuda")
+ if isdefined(self.inputs.use_cuda):
+ self._use_cuda()
+
+ def _list_outputs(self):
+ outputs = self.output_spec().get()
+ outputs["out_corrected"] = os.path.abspath("%s.nii.gz" % self.inputs.out_base)
+ outputs["out_parameter"] = os.path.abspath(
+ "%s.eddy_parameters" % self.inputs.out_base
+ )
+
+ # File generation might depend on the version of EDDY
+ out_rotated_bvecs = os.path.abspath(
+ "%s.eddy_rotated_bvecs" % self.inputs.out_base
+ )
+ out_movement_rms = os.path.abspath(
+ "%s.eddy_movement_rms" % self.inputs.out_base
+ )
+ out_restricted_movement_rms = os.path.abspath(
+ "%s.eddy_restricted_movement_rms" % self.inputs.out_base
+ )
+ out_shell_alignment_parameters = os.path.abspath(
+ "%s.eddy_post_eddy_shell_alignment_parameters" % self.inputs.out_base
+ )
+ shell_PE_translation_parameters = op.abspath(
+ "%s.eddy_post_eddy_shell_PE_translation_parameters" % self.inputs.out_base
+ )
+ out_outlier_report = os.path.abspath(
+ "%s.eddy_outlier_report" % self.inputs.out_base
+ )
+ outlier_map = op.abspath("%s.eddy_outlier_map" % self.inputs.out_base)
+ outlier_n_stdev_map = op.abspath(
+ "%s.eddy_outlier_n_stdev_map" % self.inputs.out_base
+ )
+ outlier_n_sqr_stdev_map = op.abspath(
+ "%s.eddy_outlier_n_sqr_stdev_map" % self.inputs.out_base
+ )
+ outlier_free_data = op.abspath(
+ "%s.eddy_outlier_free_data.nii.gz" % self.inputs.out_base
+ )
+
+ if isdefined(self.inputs.cnr_maps) and self.inputs.cnr_maps:
+ out_cnr_maps = os.path.abspath(
+ "%s.eddy_cnr_maps.nii.gz" % self.inputs.out_base
+ )
+ if os.path.exists(out_cnr_maps):
+ outputs["out_cnr_maps"] = out_cnr_maps
+ if isdefined(self.inputs.residuals) and self.inputs.residuals:
+ out_residuals = os.path.abspath(
+ "%s.eddy_residuals.nii.gz" % self.inputs.out_base
+ )
+ if os.path.exists(out_residuals):
+ outputs["out_residuals"] = out_residuals
+
+ if os.path.exists(out_rotated_bvecs):
+ outputs["out_rotated_bvecs"] = out_rotated_bvecs
+ if os.path.exists(out_movement_rms):
+ outputs["out_movement_rms"] = out_movement_rms
+ if os.path.exists(out_restricted_movement_rms):
+ outputs["out_restricted_movement_rms"] = out_restricted_movement_rms
+ if os.path.exists(out_shell_alignment_parameters):
+ outputs["out_shell_alignment_parameters"] = out_shell_alignment_parameters
+ if os.path.exists(out_outlier_report):
+ outputs["out_outlier_report"] = out_outlier_report
+ if os.path.exists(outlier_free_data):
+ outputs["outlier_free_data"] = outlier_free_data
+ if op.exists(shell_PE_translation_parameters):
+ outputs["shell_PE_translation_parameters"] = shell_PE_translation_parameters
+ if op.exists(outlier_map):
+ outputs["outlier_map"] = outlier_map
+ if op.exists(outlier_n_stdev_map):
+ outputs["outlier_n_stdev_map"] = outlier_n_stdev_map
+ if op.exists(outlier_n_sqr_stdev_map):
+ outputs["outlier_n_sqr_stdev_map"] = outlier_n_sqr_stdev_map
+
+ return outputs
diff --git a/dmriprep/interfaces/reportlets.py b/dmriprep/interfaces/reportlets.py
new file mode 100644
index 00000000..610fdcc1
--- /dev/null
+++ b/dmriprep/interfaces/reportlets.py
@@ -0,0 +1,75 @@
+"""Interfaces to generate speciality reportlets."""
+from nilearn.image import threshold_img, load_img
+from niworkflows import NIWORKFLOWS_LOG
+from niworkflows.viz.utils import cuts_from_bbox, compose_view
+from nipype.interfaces.base import File, isdefined
+from nipype.interfaces.mixins import reporting
+
+from ..viz.utils import plot_registration, coolwarm_transparent
+
+
+class FieldmapReportletInputSpec(reporting.ReportCapableInputSpec):
+ reference = File(exists=True, mandatory=True, desc="input reference")
+ fieldmap = File(exists=True, mandatory=True, desc="input fieldmap")
+ mask = File(exists=True, desc="brain mask")
+ out_report = File(
+ "report.svg", usedefault=True, desc="filename for the visual report"
+ )
+
+
+class FieldmapReportlet(reporting.ReportCapableInterface):
+ """An abstract mixin to registration nipype interfaces."""
+
+ _n_cuts = 7
+ input_spec = FieldmapReportletInputSpec
+ output_spec = reporting.ReportCapableOutputSpec
+
+ def __init__(self, **kwargs):
+ """Instantiate FieldmapReportlet."""
+ self._n_cuts = kwargs.pop("n_cuts", self._n_cuts)
+ super(FieldmapReportlet, self).__init__(generate_report=True, **kwargs)
+
+ def _run_interface(self, runtime):
+ return runtime
+
+ def _generate_report(self):
+ """Generate a reportlet."""
+ NIWORKFLOWS_LOG.info("Generating visual report")
+
+ refnii = load_img(self.inputs.reference)
+ fmapnii = load_img(self.inputs.fieldmap)
+ contour_nii = (
+ load_img(self.inputs.mask) if isdefined(self.inputs.mask) else None
+ )
+ mask_nii = threshold_img(refnii, 1e-3)
+ cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts)
+ fmapdata = fmapnii.get_fdata()
+ vmax = max(fmapdata.max(), abs(fmapdata.min()))
+
+ # Call composer
+ compose_view(
+ plot_registration(
+ refnii,
+ "fixed-image",
+ estimate_brightness=True,
+ cuts=cuts,
+ label="reference",
+ contour=contour_nii,
+ compress=False,
+ ),
+ plot_registration(
+ fmapnii,
+ "moving-image",
+ estimate_brightness=True,
+ cuts=cuts,
+ label="fieldmap (Hz)",
+ contour=contour_nii,
+ compress=False,
+ plot_params={
+ "cmap": coolwarm_transparent(),
+ "vmax": vmax,
+ "vmin": -vmax,
+ },
+ ),
+ out_file=self._out_report,
+ )
diff --git a/dmriprep/utils/bids.py b/dmriprep/utils/bids.py
index 8a3f4269..7295cb86 100644
--- a/dmriprep/utils/bids.py
+++ b/dmriprep/utils/bids.py
@@ -1,178 +1,146 @@
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Utilities to handle BIDS inputs."""
-import os
-import sys
-import json
-from pathlib import Path
-from bids import BIDSLayout
-
-
-def collect_data(bids_dir, participant_label, task=None, echo=None,
- bids_validate=True):
- """Replacement for niworkflows' version."""
+import warnings
+
+
+def get_bids_dict(layout, participant_label=None, session=None):
+ def merge_dicts(x, y):
+ """
+ A function to merge two dictionaries, making it easier for us to make
+ modality specific queries for dwi images (since they have variable
+ extensions due to having an nii.gz, bval, and bvec file).
+ """
+ z = x.copy()
+ z.update(y)
+ return z
+
+ # get all files matching the specific modality we are using
+ if not participant_label:
+ # list of all the subjects
+ subjs = layout.get_subjects()
+ else:
+ # make it a list so we can iterate
+ if not isinstance(participant_label, list):
+ subjs = [participant_label]
+ assert participant_label in subjs, "subject {} is not in the bids folder".format(participant_label)
+ else:
+ subjs = participant_label
+ assert participant_label[0] in subjs, "subject {} is not in the bids folder".format(participant_label)
+
+ print('\n')
+ print("%s%s" % ('Subject:', subjs))
+ for sub in subjs:
+ if not session:
+ seshs = layout.get_sessions(subject=sub, derivatives=False)
+ # in case there are non-session level inputs
+ seshs += [None]
+ else:
+ # make a list so we can iterate
+ if not isinstance(session, list):
+ seshs = [session]
+ assert session in seshs, "session {} is not in the bids folder".format(session)
+ else:
+ seshs = session
+ assert session[0] in seshs, "session {} is not in the bids folder".format(session)
+
+ print("%s%s" % ('Sessions:', seshs))
+ print('\n')
+ # all the combinations of sessions and tasks that are possible
+ bids_dict = dict()
+ bids_dict[sub] = {}
+ for ses in seshs:
+ # the attributes for our modality img
+ mod_attributes = [sub, ses]
+ # the keys for our modality img
+ mod_keys = ['subject', 'session']
+ # our query we will use for each modality img
+ mod_query = {'datatype': 'dwi'}
+ for attr, key in zip(mod_attributes, mod_keys):
+ if attr:
+ mod_query[key] = attr
+
+ dwi = layout.get(**merge_dicts(mod_query, {'extension': ['nii', 'nii.gz']}))
+ bval = layout.get(**merge_dicts(mod_query, {'extension': 'bval'}))
+ bvec = layout.get(**merge_dicts(mod_query, {'extension': 'bvec'}))
+ jso = layout.get(**merge_dicts(mod_query, {'extension': 'json'}))
+
+ bids_dict[sub][ses] = {}
+ for acq_ix in range(1, len(dwi) + 1):
+ bids_dict[sub][ses][acq_ix] = {}
+ bids_dict[sub][ses][acq_ix]['dwi_file'] = dwi[acq_ix - 1].path
+ bids_dict[sub][ses][acq_ix]['fbval'] = bval[acq_ix - 1].path
+ bids_dict[sub][ses][acq_ix]['fbvec'] = bvec[acq_ix - 1].path
+ bids_dict[sub][ses][acq_ix]['metadata'] = jso[acq_ix - 1].path
+ return bids_dict
+
+
+class BIDSError(ValueError):
+ def __init__(self, message, bids_root):
+ indent = 10
+ header = '{sep} BIDS root folder: "{bids_root}" {sep}'.format(
+ bids_root=bids_root, sep=''.join(['-'] * indent))
+ self.msg = '\n{header}\n{indent}{message}\n{footer}'.format(
+ header=header, indent=''.join([' '] * (indent + 1)),
+ message=message, footer=''.join(['-'] * len(header))
+ )
+ super(BIDSError, self).__init__(self.msg)
+ self.bids_root = bids_root
+
+
+class BIDSWarning(RuntimeWarning):
+ pass
+
+
+def collect_sessions(bids_dir, session=None, strict=False, bids_validate=True):
+ """
+ List the sessions under the BIDS root and checks that sessions
+ designated with the participant_label argument exist in that folder.
+ Returns the list of sessions to be finally processed.
+ Requesting all sessions in a BIDS directory root:
+ ...
+ """
+ from bids import BIDSLayout
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
- queries = {
- 'fmap': {'datatype': 'fmap'},
- 'dwi': {'datatype': 'dwi', 'suffix': 'dwi'},
- 'bold': {'datatype': 'func', 'suffix': 'bold'},
- 'sbref': {'datatype': 'func', 'suffix': 'sbref'},
- 'flair': {'datatype': 'anat', 'suffix': 'FLAIR'},
- 't2w': {'datatype': 'anat', 'suffix': 'T2w'},
- 't1w': {'datatype': 'anat', 'suffix': 'T1w'},
- 'roi': {'datatype': 'anat', 'suffix': 'roi'},
- }
-
- if task:
- queries['bold']['task'] = task
-
- if echo:
- queries['bold']['echo'] = echo
-
- subj_data = {
- dtype: sorted(layout.get(return_type='file', subject=participant_label,
- extension=['nii', 'nii.gz'], **query))
- for dtype, query in queries.items()}
-
- return subj_data, layout
-
-
-def write_derivative_description(bids_dir, deriv_dir):
- from ..__about__ import __version__, __url__, DOWNLOAD_URL
-
- bids_dir = Path(bids_dir)
- deriv_dir = Path(deriv_dir)
- desc = {
- 'Name': 'dMRIPrep - dMRI PREProcessing workflow',
- 'BIDSVersion': '1.1.1',
- 'PipelineDescription': {
- 'Name': 'dMRIPrep',
- 'Version': __version__,
- 'CodeURL': DOWNLOAD_URL,
- },
- 'CodeURL': __url__,
- 'HowToAcknowledge':
- 'Please cite https://doi.org/10.5281/zenodo.3392201.',
- }
-
- # Keys that can only be set by environment
- if 'FMRIPREP_DOCKER_TAG' in os.environ:
- desc['DockerHubContainerTag'] = os.environ['FMRIPREP_DOCKER_TAG']
- if 'FMRIPREP_SINGULARITY_URL' in os.environ:
- singularity_url = os.environ['FMRIPREP_SINGULARITY_URL']
- desc['SingularityContainerURL'] = singularity_url
-
- singularity_md5 = _get_shub_version(singularity_url)
- if singularity_md5 and singularity_md5 is not NotImplemented:
- desc['SingularityContainerMD5'] = _get_shub_version(singularity_url)
-
- # Keys deriving from source dataset
- orig_desc = {}
- fname = bids_dir / 'dataset_description.json'
- if fname.exists():
- with fname.open() as fobj:
- orig_desc = json.load(fobj)
-
- if 'DatasetDOI' in orig_desc:
- desc['SourceDatasetsURLs'] = ['https://doi.org/{}'.format(
- orig_desc['DatasetDOI'])]
- if 'License' in orig_desc:
- desc['License'] = orig_desc['License']
-
- with (deriv_dir / 'dataset_description.json').open('w') as fobj:
- json.dump(desc, fobj, indent=4)
-
-
-def validate_input_dir(exec_env, bids_dir, participant_label):
- # Ignore issues and warnings that should not influence FMRIPREP
- import tempfile
- import subprocess
- validator_config_dict = {
- "ignore": [
- "EVENTS_COLUMN_ONSET",
- "EVENTS_COLUMN_DURATION",
- "TSV_EQUAL_ROWS",
- "TSV_EMPTY_CELL",
- "TSV_IMPROPER_NA",
- "VOLUME_COUNT_MISMATCH",
- "BVAL_MULTIPLE_ROWS",
- "BVEC_NUMBER_ROWS",
- "DWI_MISSING_BVAL",
- "INCONSISTENT_SUBJECTS",
- "INCONSISTENT_PARAMETERS",
- "BVEC_ROW_LENGTH",
- "B_FILE",
- "PARTICIPANT_ID_COLUMN",
- "PARTICIPANT_ID_MISMATCH",
- "TASK_NAME_MUST_DEFINE",
- "PHENOTYPE_SUBJECTS_MISSING",
- "STIMULUS_FILE_MISSING",
- "DWI_MISSING_BVEC",
- "EVENTS_TSV_MISSING",
- "TSV_IMPROPER_NA",
- "ACQTIME_FMT",
- "Participants age 89 or higher",
- "DATASET_DESCRIPTION_JSON_MISSING",
- "FILENAME_COLUMN",
- "WRONG_NEW_LINE",
- "MISSING_TSV_COLUMN_CHANNELS",
- "MISSING_TSV_COLUMN_IEEG_CHANNELS",
- "MISSING_TSV_COLUMN_IEEG_ELECTRODES",
- "UNUSED_STIMULUS",
- "CHANNELS_COLUMN_SFREQ",
- "CHANNELS_COLUMN_LOWCUT",
- "CHANNELS_COLUMN_HIGHCUT",
- "CHANNELS_COLUMN_NOTCH",
- "CUSTOM_COLUMN_WITHOUT_DESCRIPTION",
- "ACQTIME_FMT",
- "SUSPICIOUSLY_LONG_EVENT_DESIGN",
- "SUSPICIOUSLY_SHORT_EVENT_DESIGN",
- "MALFORMED_BVEC",
- "MALFORMED_BVAL",
- "MISSING_TSV_COLUMN_EEG_ELECTRODES",
- "MISSING_SESSION"
- ],
- "error": ["NO_T1W"],
- "ignoredFiles": ['/dataset_description.json', '/participants.tsv']
- }
- # Limit validation only to data from requested participants
- if participant_label:
- all_subs = set([s.name[4:] for s in bids_dir.glob('sub-*')])
- selected_subs = set([s[4:] if s.startswith('sub-') else s
- for s in participant_label])
- bad_labels = selected_subs.difference(all_subs)
- if bad_labels:
- error_msg = 'Data for requested participant(s) label(s) not found. Could ' \
- 'not find data for participant(s): %s. Please verify the requested ' \
- 'participant labels.'
- if exec_env == 'docker':
- error_msg += ' This error can be caused by the input data not being ' \
- 'accessible inside the docker container. Please make sure all ' \
- 'volumes are mounted properly (see https://docs.docker.com/' \
- 'engine/reference/commandline/run/#mount-volume--v---read-only)'
- if exec_env == 'singularity':
- error_msg += ' This error can be caused by the input data not being ' \
- 'accessible inside the singularity container. Please make sure ' \
- 'all paths are mapped properly (see https://www.sylabs.io/' \
- 'guides/3.0/user-guide/bind_paths_and_mounts.html)'
- raise RuntimeError(error_msg % ','.join(bad_labels))
-
- ignored_subs = all_subs.difference(selected_subs)
- if ignored_subs:
- for sub in ignored_subs:
- validator_config_dict["ignoredFiles"].append("/sub-%s/**" % sub)
- with tempfile.NamedTemporaryFile('w+') as temp:
- temp.write(json.dumps(validator_config_dict))
- temp.flush()
- try:
- subprocess.check_call(['bids-validator', bids_dir, '-c', temp.name])
- except FileNotFoundError:
- print("bids-validator does not appear to be installed", file=sys.stderr)
-
-
-def _get_shub_version(singularity_url):
- return NotImplemented
+ all_sessions = set(layout.get_sessions())
+
+ # Error: bids_dir does not contain subjects
+ if not all_sessions:
+ raise BIDSError(
+ 'Could not find session. Please make sure the BIDS data '
+ 'structure is present and correct. Datasets can be validated online '
+ 'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n'
+ 'If you are using Docker for Mac or Docker for Windows, you '
+ 'may need to adjust your "File sharing" preferences.', bids_dir)
+
+ # No --participant-label was set, return all
+ if not session:
+ return sorted(all_sessions)
+
+ if isinstance(session, int):
+ session = [session]
+
+ # Drop ses- prefixes
+ session = [ses[4:] if ses.startswith('ses-') else ses for ses in session]
+ # Remove duplicates
+ session = sorted(set(session))
+ # Remove labels not found
+ found_label = sorted(set(session) & all_sessions)
+ if not found_label:
+ raise BIDSError('Could not find session [{}]'.format(
+ ', '.join(session)), bids_dir)
+
+ # Warn if some IDs were not found
+ notfound_label = sorted(set(session) - all_sessions)
+ if notfound_label:
+ exc = BIDSError('Some sessions were not found: {}'.format(
+ ', '.join(notfound_label)), bids_dir)
+ if strict:
+ raise exc
+ warnings.warn(exc.msg, BIDSWarning)
+
+ return found_label
\ No newline at end of file
diff --git a/dmriprep/utils/core.py b/dmriprep/utils/core.py
new file mode 100644
index 00000000..8dcb5a28
--- /dev/null
+++ b/dmriprep/utils/core.py
@@ -0,0 +1,808 @@
+from __future__ import division, print_function
+import os
+import nibabel as nib
+import numpy as np
+
+
+def make_gtab(fbval, fbvec, sesdir, final, b0_thr=100):
+ from dipy.io import save_pickle
+ from dipy.core.gradients import gradient_table
+ from dipy.io import read_bvals_bvecs
+
+ if fbval and fbvec:
+ bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
+ else:
+ raise ValueError("Either bvals or bvecs files not found (or rescaling failed)!")
+
+ namer_dir = sesdir + "/dmri_tmp"
+ if not os.path.isdir(namer_dir):
+ os.mkdir(namer_dir)
+
+ gtab_file = "%s%s" % (namer_dir, "/gtab.pkl")
+
+ # Creating the gradient table
+ gtab = gradient_table(bvals, bvecs, atol=1)
+
+ # Correct b0 threshold
+ gtab.b0_threshold = b0_thr
+
+ # Correct b0 mask
+ gtab_bvals = gtab.bvals.copy()
+ b0_thr_ixs = np.where(gtab_bvals < gtab.b0_threshold)[0]
+ gtab_bvals[b0_thr_ixs] = 0
+ gtab.b0s_mask = gtab_bvals == 0
+
+ # Show info
+ print(gtab.info)
+
+ # Save gradient table to pickle
+ save_pickle(gtab_file, gtab)
+
+ if final is True:
+ final_bval_path = sesdir + '/final_bval.bval'
+ final_bvec_path = sesdir + '/final_bvec.bvec'
+ np.savetxt(final_bval_path, bvals.astype('int'), fmt='%i')
+ np.savetxt(final_bvec_path, bvecs.astype('float'), fmt='%10f')
+ else:
+ final_bval_path = None
+ final_bvec_path = None
+
+ return gtab_file, gtab, final_bval_path, final_bvec_path
+
+
+def rename_final_preprocessed_file(in_file, sesdir):
+ import shutil
+ out_file = sesdir + '/final_preprocessed_dwi.nii.gz'
+ shutil.copy(in_file, out_file)
+ return out_file
+
+
+def rescale_bvec(bvec, bvec_rescaled):
+ """
+ Normalizes b-vectors to be of unit length for the non-zero b-values. If the
+ b-value is 0, the vector is untouched.
+
+ Parameters
+ ----------
+ bvec : str
+ File name of the original b-vectors file.
+ bvec_rescaled : str
+ File name of the new (normalized) b-vectors file. Must have extension `.bvec`.
+
+ Returns
+ -------
+ bvec_rescaled : str
+ File name of the new (normalized) b-vectors file. Must have extension `.bvec`.
+ """
+ bv1 = np.array(np.loadtxt(bvec))
+ # Enforce proper dimensions
+ bv1 = bv1.T if bv1.shape[0] == 3 else bv1
+
+ # Normalize values not close to norm 1
+ bv2 = [
+ b / np.linalg.norm(b) if not np.isclose(np.linalg.norm(b), 0) else b
+ for b in bv1
+ ]
+ np.savetxt(bvec_rescaled, bv2)
+ return bvec_rescaled
+
+
+def is_hemispherical(vecs):
+ """
+ Test whether all points on a unit sphere lie in the same hemisphere.
+
+ **Inputs**
+
+ vecs : numpy.ndarray
+ 2D numpy array with shape (N, 3) where N is the number of points.
+ All points must lie on the unit sphere.
+
+ **Outputs**
+
+ is_hemi : bool
+ If True, one can find a hemisphere that contains all the points.
+ If False, then the points do not lie in any hemisphere
+ pole : numpy.ndarray
+ If `is_hemi == True`, then pole is the "central" pole of the
+ input vectors. Otherwise, pole is the zero vector.
+
+ **References**
+
+ https://rstudio-pubs-static.s3.amazonaws.com/27121_a22e51b47c544980bad594d5e0bb2d04.html # noqa
+ """
+ import itertools
+ if vecs.shape[1] != 3:
+ raise ValueError("Input vectors must be 3D vectors")
+ if not np.allclose(1, np.linalg.norm(vecs, axis=1)):
+ raise ValueError("Input vectors must be unit vectors")
+
+ # Generate all pairwise cross products
+ v0, v1 = zip(*[p for p in itertools.permutations(vecs, 2)])
+ cross_prods = np.cross(v0, v1)
+
+ # Normalize them
+ cross_prods /= np.linalg.norm(cross_prods, axis=1)[:, np.newaxis]
+
+ # `cross_prods` now contains all candidate vertex points for "the polygon"
+ # in the reference. "The polygon" is a subset. Find which points belong to
+ # the polygon using a dot product test with each of the original vectors
+ angles = np.arccos(np.dot(cross_prods, vecs.transpose()))
+
+ # And test whether it is orthogonal or less
+ dot_prod_test = angles <= np.pi / 2.0
+
+ # If there is at least one point that is orthogonal or less to each
+ # input vector, then the points lie on some hemisphere
+ is_hemi = len(vecs) in np.sum(dot_prod_test.astype(int), axis=1)
+
+ if is_hemi:
+ vertices = cross_prods[
+ np.sum(dot_prod_test.astype(int), axis=1) == len(vecs)
+ ]
+ pole = np.mean(vertices, axis=0)
+ pole /= np.linalg.norm(pole)
+ else:
+ pole = np.array([0.0, 0.0, 0.0])
+ return is_hemi, pole
+
+
+def correct_vecs_and_make_b0s(fbval, fbvec, dwi_file, sesdir):
+ from dipy.io import read_bvals_bvecs
+ from dmriprep.utils.core import make_gtab, rescale_bvec, is_hemispherical, make_mean_b0
+
+ namer_dir = sesdir + "/dmri_tmp"
+ if not os.path.isdir(namer_dir):
+ os.mkdir(namer_dir)
+
+ bvec_rescaled = "%s%s" % (namer_dir, "/bvec_scaled.bvec")
+ all_b0s_file = "%s%s" % (namer_dir, "/all_b0s.nii.gz")
+
+ # loading bvecs/bvals
+ bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
+ bvecs[np.where(np.any(abs(bvecs) >= 10, axis=1) == True)] = [1, 0, 0]
+ bvecs[np.where(bvals == 0)] = 0
+ if (
+ len(
+ bvecs[
+ np.where(
+ np.logical_and(
+ bvals > 50, np.all(abs(bvecs) == np.array([0, 0, 0]), axis=1)
+ )
+ )
+ ]
+ )
+ > 0
+ ):
+ raise ValueError(
+ "WARNING: Encountered potentially corrupted bval/bvecs. Check to ensure volumes with a "
+ "diffusion weighting are not being treated as B0's along the bvecs"
+ )
+
+ if (len(bvals) < 10) and (max(bvals) < 1500):
+ raise ValueError('Too few directions in this data. Use of eddy is not recommended.')
+
+ if (len(bvals) < 30) and (max(bvals) < 5000):
+ raise ValueError('Too few directions in this data. Use of eddy is not recommended.')
+
+ np.savetxt(fbval, bvals.astype('int'), fmt='%i')
+ np.savetxt(fbvec, bvecs.astype('float'), fmt='%10f')
+ bvec_rescaled = rescale_bvec(fbvec, bvec_rescaled)
+ vecs_rescaled = np.genfromtxt(bvec_rescaled)
+ vecs = np.round(vecs_rescaled, 8)[~(np.round(vecs_rescaled, 8) == 0).all(1)]
+ [is_hemi, pole] = is_hemispherical(vecs)
+ if is_hemi is True:
+ slm = 'linear'
+ print("Warning: B-vectors for this data are hemispherical at polar vertex: " + str(pole) +
+ " To ensure adequate eddy current correction, eddy will be run using the --slm=linear option.")
+ else:
+ slm = 'none'
+
+ [gtab_file, gtab, _, _] = make_gtab(fbval, bvec_rescaled, sesdir, final=False)
+
+ # Get b0 indices
+ b0s = np.where(gtab.bvals <= gtab.b0_threshold)[0].tolist()
+ print("%s%s" % ("b0's found at: ", b0s))
+
+ # Extract and Combine all b0s collected
+ print("Extracting b0's...")
+ b0_vols = []
+ dwi_img = nib.load(dwi_file)
+ dwi_data = dwi_img.get_data()
+ for b0 in b0s:
+ print(b0)
+ b0_vols.append(dwi_data[:, :, :, b0])
+
+ all_b0s = np.stack(b0_vols, axis=3)
+ all_b0s_aff = dwi_img.affine.copy()
+ all_b0s_aff[3][3] = len(b0_vols)
+ nib.save(nib.Nifti1Image(all_b0s, affine=all_b0s_aff), all_b0s_file)
+ initial_mean_b0 = make_mean_b0(all_b0s_file)
+
+ return initial_mean_b0, gtab_file, b0_vols, b0s, slm
+
+
+def topup_inputs_from_dwi_files(dwi_file, sesdir, spec_acqp, b0_vols, b0s, vol_legend):
+ from collections import defaultdict
+ import pkg_resources
+ from shutil import copyfile
+ import random
+
+ """Create a datain spec and a slspec from a list of dwi files."""
+ # Write the datain.txt file
+ datain_lines = []
+ spec_counts = defaultdict(int)
+
+ if isinstance(spec_acqp, list):
+ unique_encodings = []
+ for k in range(len(spec_acqp)):
+ unique_encodings.extend([j for j in list(set([float(i) for i in spec_acqp[k].split(' ')])) if (j == 1.0) or (j == -1.0)])
+ else:
+ unique_encodings = [j for j in list(set([float(i) for i in spec_acqp.split(' ')])) if (j == 1.0) or (j == -1.0)]
+
+ if len(unique_encodings) > 1:
+ # Reverse phase encodings are present to susceptibility can be estimated via topup
+ susceptibility_args = '--estimate_move_by_susceptibility'
+ else:
+ susceptibility_args = ''
+
+ dwi_img = nib.load(dwi_file)
+ imain_data = []
+ ix = 0
+ for b0_vol in b0_vols:
+ num_trs = 1 if len(b0_vol.shape) < 4 else b0_vol.shape[3]
+ if vol_legend:
+ vol_legend_consec = [sum(vol_legend[:i+1]) for i in range(len(vol_legend))]
+ b0_level = next(x[0] for x in enumerate(vol_legend_consec) if x[1] > b0s[ix])
+ datain_lines.extend([spec_acqp[b0_level]] * num_trs)
+ spec_counts[spec_acqp[b0_level]] += num_trs
+ else:
+ datain_lines.extend([spec_acqp] * num_trs)
+ spec_counts[spec_acqp] += num_trs
+ imain_data.append(b0_vol)
+ ix = ix + 1
+
+ # Make a 4d series
+ imain_output = sesdir + "/topup_imain.nii.gz"
+ imain_data_4d = [imain_vol[..., np.newaxis] for imain_vol in imain_data]
+ imain_img = nib.Nifti1Image(
+ np.concatenate(imain_data_4d, 3), dwi_img.affine, dwi_img.header
+ )
+ assert imain_img.shape[3] == len(datain_lines)
+ imain_img.to_filename(imain_output)
+
+ # Write the datain text file
+ datain_file = sesdir + "/acqparams.txt"
+ with open(datain_file, "w") as f:
+ f.write("\n".join(datain_lines))
+
+ example_b0 = b0_vols[0]
+ topup_config = pkg_resources.resource_filename('dmriprep.config', "b02b0.cnf")
+ topup_config_odd = pkg_resources.resource_filename('dmriprep.config', "b02b0_1.cnf")
+
+ if 1 in (example_b0.shape[0] % 2, example_b0.shape[1] % 2, example_b0.shape[2] % 2):
+ print("Using slower b02b0_1.cnf because an axis has an odd number of slices")
+ topup_config_odd_tmp = sesdir + '/b02b0_1_' + str(random.randint(1, 1000)) + '.cnf'
+ copyfile(topup_config_odd, topup_config_odd_tmp)
+ topup_config = topup_config_odd_tmp
+ else:
+ topup_config_tmp = sesdir + '/b02b0_' + str(random.randint(1, 1000)) + '.cnf'
+ copyfile(topup_config, topup_config_tmp)
+ topup_config = topup_config_tmp
+
+ return datain_file, imain_output, example_b0, datain_lines, topup_config, susceptibility_args
+
+
+def eddy_inputs_from_dwi_files(sesdir, gtab_file):
+ from dipy.io import load_pickle
+
+ b0s_mask_all = []
+ gtab = load_pickle(gtab_file)
+ b0s_mask = gtab.b0s_mask
+ b0s_mask_all.append(b0s_mask)
+
+ # Create the index file
+ index_file = sesdir + "/index.txt"
+ ix_vec = []
+ i = 1
+ pastfirst0s = False
+ for val in gtab.bvals:
+ if val > gtab.b0_threshold:
+ pastfirst0s = True
+ elif val <= gtab.b0_threshold and pastfirst0s is True:
+ i = i + 1
+ ix_vec.append(i)
+ with open(index_file, "w") as f:
+ f.write(" ".join(map(str, ix_vec)))
+
+ return index_file
+
+
+def id_outliers_fn(outlier_report, threshold, dwi_file):
+ """Get list of scans that exceed threshold for number of outliers
+ Parameters
+ ----------
+ outlier_report: string
+ Path to the fsl_eddy outlier report
+ threshold: int or float
+ If threshold is an int, it is treated as number of allowed outlier
+ slices. If threshold is a float between 0 and 1 (exclusive), it is
+ treated the fraction of allowed outlier slices before we drop the
+ whole volume.
+ dwi_file: string
+ Path to nii dwi file to determine total number of slices
+ Returns
+ -------
+ drop_scans: numpy.ndarray
+ List of scan indices to drop
+ """
+ import nibabel as nib
+ import numpy as np
+ import os.path as op
+ import parse
+
+ with open(op.abspath(outlier_report), "r") as fp:
+ lines = fp.readlines()
+
+ p = parse.compile(
+ "Slice {slice:d} in scan {scan:d} is an outlier with "
+ "mean {mean_sd:f} standard deviations off, and mean "
+ "squared {mean_sq_sd:f} standard deviations off."
+ )
+
+ outliers = [p.parse(l).named for l in lines]
+ scans = {d["scan"] for d in outliers}
+
+ def num_outliers(scan, outliers):
+ return len([d for d in outliers if d["scan"] == scan])
+
+ if 0 < threshold < 1:
+ img = nib.load(dwi_file)
+ try:
+ threshold *= img.header.get_n_slices()
+ except nib.spatialimages.HeaderDataError:
+ print(
+ "WARNING. We are not sure which dimension has the "
+ "slices in this image. So we are using the 3rd dim.",
+ img.shape,
+ )
+ threshold *= img.shape[2]
+
+ drop_scans = np.array([s for s in scans if num_outliers(s, outliers) > threshold])
+
+ outpath = op.abspath("dropped_scans.txt")
+ np.savetxt(outpath, drop_scans, fmt="%d")
+
+ return drop_scans, outpath
+
+
+def drop_outliers_fn(in_file, in_bval, in_bvec, drop_scans, in_sigma=None, perc_missing=0.15):
+ """Drop outlier volumes from dwi file
+ Parameters
+ ----------
+ in_file: string
+ Path to nii dwi file to drop outlier volumes from
+ in_bval: string
+ Path to bval file to drop outlier volumes from
+ in_bvec: string
+ Path to bvec file to drop outlier volumes from
+ drop_scans: numpy.ndarray or str
+ List of scan indices to drop. If str, then assume path to text file
+ listing outlier volumes.
+
+ Returns
+ -------
+ out_file: string
+ Path to "thinned" output dwi file
+ out_bval: string
+ Path to "thinned" output bval file
+ out_bvec: string
+ Path to "thinned" output bvec file
+ """
+ import nibabel as nib
+ import numpy as np
+ import os.path as op
+ from nipype.utils.filemanip import fname_presuffix
+
+ if isinstance(drop_scans, str):
+ try:
+ drop_scans = np.genfromtxt(drop_scans).tolist()
+ if not isinstance(drop_scans, list):
+ drop_scans = [drop_scans]
+
+ except TypeError:
+ print(
+ "Unrecognized file format. Unable to load vector from drop_scans file."
+ )
+
+ print("%s%s" % ('Dropping outlier volumes:\n', drop_scans))
+
+ img = nib.load(op.abspath(in_file))
+ drop_perc = (len(drop_scans))/float(img.shape[-1])
+ if drop_perc > perc_missing:
+ raise ValueError('Missing > ' + str(np.round(100*drop_perc, 2)) + '% of volumes after outlier removal. '
+ 'This dataset is unuseable based on the '
+ 'given rejection threshold.')
+ # drop 4d outliers from dwi
+ img_data = img.get_data()
+ img_data_thinned = np.delete(img_data, drop_scans, axis=3)
+ if isinstance(img, nib.nifti1.Nifti1Image):
+ img_thinned = nib.Nifti1Image(
+ img_data_thinned.astype(np.float64), img.affine, header=img.header
+ )
+ elif isinstance(img, nib.nifti2.Nifti2Image):
+ img_thinned = nib.Nifti2Image(
+ img_data_thinned.astype(np.float64), img.affine, header=img.header
+ )
+ else:
+ raise TypeError("in_file does not contain Nifti image datatype.")
+
+ out_file = fname_presuffix(in_file, suffix="_thinned", newpath=op.abspath("."))
+ nib.save(img_thinned, op.abspath(out_file))
+
+ # drop outliers from sigma (if 4d)
+ if in_sigma is not None:
+ sigma = np.load(op.abspath(in_sigma))
+ if len(sigma.shape) == 4:
+ sigma_thinned = np.delete(sigma, drop_scans, axis=3)
+ out_sigma = fname_presuffix(in_sigma, suffix="_thinned", newpath=op.abspath("."))
+ np.save(sigma_thinned, op.abspath(out_sigma))
+ else:
+ out_sigma = in_sigma
+ else:
+ out_sigma = None
+
+ bval = np.loadtxt(in_bval)
+ bval_thinned = np.delete(bval, drop_scans, axis=0)
+ out_bval = fname_presuffix(in_bval, suffix="_thinned", newpath=op.abspath("."))
+ np.savetxt(out_bval, bval_thinned.astype('int'), fmt='%i')
+
+ bvec = np.loadtxt(in_bvec)
+ if bvec.shape[0] == 3:
+ bvec_thinned = np.delete(bvec, drop_scans, axis=1)
+ else:
+ bvec_thinned = np.delete(bvec, drop_scans, axis=0)
+ out_bvec = fname_presuffix(in_bvec, suffix="_thinned", newpath=op.abspath("."))
+ np.savetxt(out_bvec, bvec_thinned.astype('float'), fmt='%10f')
+
+ return out_file, out_bval, out_bvec, out_sigma
+
+
+def get_params(A):
+ """This is a copy of spm's spm_imatrix where
+ we already know the rotations and translations matrix,
+ shears and zooms (as outputs from fsl FLIRT/avscale)
+ Let A = the 4x4 rotation and translation matrix
+ R = [ c5*c6, c5*s6, s5]
+ [-s4*s5*c6-c4*s6, -s4*s5*s6+c4*c6, s4*c5]
+ [-c4*s5*c6+s4*s6, -c4*s5*s6-s4*c6, c4*c5]
+ """
+
+ def rang(b):
+ a = min(max(b, -1), 1)
+ return a
+
+ Ry = np.arcsin(A[0, 2])
+ # Rx = np.arcsin(A[1, 2] / np.cos(Ry))
+ # Rz = np.arccos(A[0, 1] / np.sin(Ry))
+
+ if (abs(Ry) - np.pi / 2) ** 2 < 1e-9:
+ Rx = 0
+ Rz = np.arctan2(-rang(A[1, 0]), rang(-A[2, 0] / A[0, 2]))
+ else:
+ c = np.cos(Ry)
+ Rx = np.arctan2(rang(A[1, 2] / c), rang(A[2, 2] / c))
+ Rz = np.arctan2(rang(A[0, 1] / c), rang(A[0, 0] / c))
+
+ rotations = [Rx, Ry, Rz]
+ translations = [A[0, 3], A[1, 3], A[2, 3]]
+
+ return rotations, translations
+
+
+def get_flirt_motion_parameters(flirt_mats):
+ import os.path as op
+ from nipype.interfaces.fsl.utils import AvScale
+ from dmriprep.utils.core import get_params
+
+ motion_params = open(op.abspath("motion_parameters.par"), "w")
+ for mat in flirt_mats:
+ res = AvScale(mat_file=mat).run()
+ A = np.asarray(res.outputs.rotation_translation_matrix)
+ rotations, translations = get_params(A)
+ for i in rotations + translations:
+ motion_params.write("%f " % i)
+ motion_params.write("\n")
+ motion_params.close()
+ motion_params = op.abspath("motion_parameters.par")
+ return motion_params
+
+
+def read_nifti_sidecar(json_file):
+ import json
+
+ with open(json_file, "r") as f:
+ metadata = json.load(f)
+
+ if 'vol_legend' not in metadata:
+ pe_dir = metadata["PhaseEncodingDirection"]
+ trt = metadata.get("TotalReadoutTime")
+ if trt is None:
+ pass
+
+ return {
+ "PhaseEncodingDirection": pe_dir,
+ "TotalReadoutTime": trt,
+ }
+ else:
+ idxs = list(map(lambda x: x + 1, list(range(len(metadata['vol_legend'])))))
+ pe_dirs = []
+ trts = []
+ for idx in idxs:
+ pe_dirs.append(metadata["PhaseEncodingDirection_" + str(idx)])
+ trts.append(metadata.get("TotalReadoutTime_" + str(idx)))
+ if len(list(set(trts))) == 1 and list(set(trts))[0] is None:
+ pass
+ vol_legend = metadata['vol_legend']
+
+ return {
+ "PhaseEncodingDirection": pe_dirs,
+ "TotalReadoutTime": trts,
+ "vol_legend": vol_legend,
+ }
+
+
+def extract_metadata(metadata):
+ from dmriprep.utils.core import read_nifti_sidecar, compute_readout
+
+ acqp_lines = {
+ "i": "1 0 0 %.6f",
+ "j": "0 1 0 %.6f",
+ "k": "0 0 1 %.6f",
+ "i-": "-1 0 0 %.6f",
+ "j-": "0 -1 0 %.6f",
+ "k-": "0 0 -1 %.6f",
+ }
+ spec = read_nifti_sidecar(metadata)
+
+ try:
+ total_rdout = spec["TotalReadoutTime"]
+ except:
+ try:
+ total_rdout = compute_readout(spec)
+ except KeyError as e:
+ print('Readout time not found in .json metadata and could not be computed using %s. '
+ 'Cannot proceed with TOPUP/Eddy' % str(e))
+
+ if isinstance(list(spec.values())[0], list):
+ total_readout = list(set(total_rdout))
+ if len(total_readout) != 1:
+ raise ValueError('Multiple readout times detected!')
+ spec_lines = []
+ spec_acqps = []
+ for line in spec["PhaseEncodingDirection"]:
+ spec_line = acqp_lines[line]
+ spec_lines.append(acqp_lines[line])
+ spec_acqps.append(spec_line % total_readout[0])
+ vol_legend = spec['vol_legend']
+ else:
+ spec_line = acqp_lines[spec["PhaseEncodingDirection"]]
+ spec_acqps = spec_line % total_rdout
+ vol_legend = None
+ return spec_acqps, vol_legend
+
+
+def check_shelled(gtab_file):
+ from dipy.io import load_pickle
+
+ # Check whether data is shelled
+ gtab = load_pickle(gtab_file)
+ if len(np.unique(gtab.bvals)) > 2:
+ is_shelled = True
+ else:
+ is_shelled = False
+ return is_shelled
+
+
+def make_mean_b0(in_file):
+ import time
+
+ b0_img = nib.load(in_file)
+ b0_img_data = b0_img.get_data()
+ mean_b0 = np.mean(b0_img_data, axis=3, dtype=b0_img_data.dtype)
+ mean_file_out = in_file.split(".nii.gz")[0] + "_mean_b0.nii.gz"
+ nib.save(
+ nib.Nifti1Image(mean_b0, affine=b0_img.affine, header=b0_img.header),
+ mean_file_out,
+ )
+ while os.path.isfile(mean_file_out) is False:
+ time.sleep(1)
+ return mean_file_out
+
+
+def suppress_gibbs(in_file, sesdir):
+ from time import time
+ from dipy.denoise.gibbs import gibbs_removal
+
+ t = time()
+ img = nib.load(in_file)
+ img_data = img.get_data()
+ gibbs_corr_data = gibbs_removal(img_data)
+ print("Time taken for gibbs suppression: ", -t + time())
+ gibbs_free_file = sesdir + "/gibbs_free_data.nii.gz"
+ nib.save(
+ nib.Nifti1Image(gibbs_corr_data.astype(np.float32), img.affine, img.header),
+ gibbs_free_file,
+ )
+ return gibbs_free_file
+
+
+def estimate_sigma(in_file, gtab_file, mask, denoise_strategy, N=1, smooth_factor=3):
+ import os
+ from dipy.io import load_pickle
+ from dipy.denoise.noise_estimate import estimate_sigma
+ from dipy.denoise.pca_noise_estimate import pca_noise_estimate
+
+ gtab = load_pickle(gtab_file)
+ sigma_path = os.path.dirname(gtab_file) + '/sigma.npy'
+ img = nib.load(in_file)
+ img_data = np.asarray(img.get_data(caching='unchanged'), dtype=np.float32)
+ mask_data = np.asarray(nib.load(mask).get_data(caching='unchanged'), dtype=np.bool)
+
+ if denoise_strategy == "mppca" or denoise_strategy == "localpca":
+ sigma = pca_noise_estimate(img_data, gtab, correct_bias=True, smooth=smooth_factor)
+ elif denoise_strategy == "nlmeans":
+ sigma = estimate_sigma(img_data, N=N)
+ elif denoise_strategy == 'nlsam':
+ try:
+ import nlsam
+ except:
+ ImportError('NLSAM not installed. See https://github.com/samuelstjean/nlsam.git')
+ from nlsam.smoothing import local_standard_deviation
+ from nlsam.bias_correction import root_finder_sigma
+
+ # Fix the implausible signals
+ img_data[..., gtab.b0s_mask] = np.max(img_data, axis=-1, keepdims=True)
+
+ # Noise estimation part
+ sigma = root_finder_sigma(img_data, local_standard_deviation(img_data), N, mask=mask_data)
+ else:
+ sigma = None
+ sigma_path = None
+ raise ValueError("Denoising strategy not recognized.")
+
+ np.save(sigma_path, sigma)
+ return sigma_path
+
+
+def denoise(
+ in_file,
+ sesdir,
+ gtab_file,
+ mask,
+ denoise_strategy,
+ sigma_path,
+ omp_nthreads,
+ N=1,
+ patch_radius=2,
+ tau_factor=2.3,
+ block_radius=1,
+ n_iter=10,
+ sh_order=8
+):
+ from time import time
+ from dipy.denoise.localpca import genpca
+ from dipy.denoise.nlmeans import nlmeans
+ from dipy.io import load_pickle
+
+ gtab = load_pickle(gtab_file)
+ t = time()
+ img = nib.load(in_file)
+ img_data = np.asarray(img.get_data(caching='unchanged'), dtype=np.float32)
+ mask_data = np.asarray(nib.load(mask).get_data(caching='unchanged'), dtype=np.bool)
+
+ if sigma_path:
+ sigma = np.load(sigma_path)
+ if denoise_strategy == "mppca":
+ print('Running Marchenko-Pastur(MP) PCA denoising...')
+ img_data_den = genpca(img_data, sigma=sigma, mask=mask_data,
+ patch_radius=patch_radius,
+ pca_method='eig', tau_factor=None,
+ return_sigma=False, out_dtype=None)
+ elif denoise_strategy == "localpca":
+ print('Running Local PCA denoising...')
+ img_data_den = genpca(img_data, sigma=sigma, mask=mask_data,
+ patch_radius=patch_radius,
+ pca_method='eig', tau_factor=tau_factor,
+ return_sigma=False, out_dtype=None)
+ elif denoise_strategy == "nlmeans":
+ print('Running Non-Local Means denoising...')
+ img_data_den = nlmeans(
+ img_data,
+ sigma=sigma,
+ mask=mask_data,
+ patch_radius=patch_radius,
+ block_radius=block_radius,
+ rician=True,
+ )
+ elif denoise_strategy == 'nlsam':
+ print('Running Non Local Spatial and Angular Matching denoising...')
+ try:
+ import nlsam
+ except:
+ ImportError('NLSAM not installed. See https://github.com/samuelstjean/nlsam.git')
+ from nlsam.denoiser import nlsam_denoise
+ from nlsam.smoothing import sh_smooth
+ from nlsam.bias_correction import stabilization
+
+ # Fix the implausible signals
+ img_data[..., gtab.b0s_mask] = np.max(img_data, axis=-1, keepdims=True)
+
+ # Stabilizer part
+ data_stabilized = stabilization(img_data, sh_smooth(img_data, gtab.bvals, gtab.bvecs.T, sh_order=sh_order),
+ mask=mask_data, sigma=sigma, N=N)
+
+ # Denoising
+ sigma_3d = np.median(sigma, axis=-1)
+ block_size = np.array([3, 3, 3, 5])
+ img_data_den = nlsam_denoise(data_stabilized, sigma_3d, gtab.bvals, gtab.bvecs, block_size,
+ mask=mask_data, is_symmetric=False, subsample=True,
+ n_iter=n_iter, b0_threshold=gtab.b0_threshold, split_b0s=True,
+ verbose=True, use_threading=True, mp_method=None, n_cores=int(omp_nthreads))
+
+ else:
+ raise ValueError("Denoising strategy not recognized.")
+ print("Time taken for denoising: ", -t + time())
+ denoised_file = sesdir + "/preprocessed_data_denoised_" + denoise_strategy + ".nii.gz"
+ nib.save(
+ nib.Nifti1Image(img_data_den.astype(np.float32), img.affine, img.header),
+ denoised_file,
+ )
+ else:
+ raise ValueError('Cannot run denoising without sigma estimate!')
+
+ return denoised_file
+
+
+def compute_readout(params):
+ """
+ Computes readout time from epi params (see `eddy documentation
+ `_).
+
+ .. warning:: ``params['echospacing']`` should be in *sec* units.
+
+
+ """
+ epi_factor = 1.0
+ acc_factor = 1.0
+ try:
+ if params['epi_factor'] > 1:
+ epi_factor = float(params['epi_factor'] - 1)
+ except:
+ pass
+ try:
+ if params['acc_factor'] > 1:
+ acc_factor = 1.0 / params['acc_factor']
+ except:
+ pass
+ return acc_factor * epi_factor * params['echospacing']
+
+
+def make_basename(out_corrected):
+ base_name = out_corrected.split('.nii.gz')[0]
+ return base_name
+
+
+def bytesto(bytes, to, bsize=1024):
+ """convert bytes to megabytes, etc.
+ sample code:
+ print('mb= ' + str(bytesto(314575262000000, 'm')))
+ sample output:
+ mb= 300002347.946
+ """
+
+ a = {'k' : 1, 'm': 2, 'g' : 3, 't' : 4, 'p' : 5, 'e' : 6 }
+ r = float(bytes)
+ for i in range(a[to]):
+ r = r / bsize
+
+ return(r)
\ No newline at end of file
diff --git a/dmriprep/utils/qc.py b/dmriprep/utils/qc.py
new file mode 100644
index 00000000..fa3783cb
--- /dev/null
+++ b/dmriprep/utils/qc.py
@@ -0,0 +1,480 @@
+import base64
+import os.path as op
+from io import BytesIO
+import matplotlib
+matplotlib.use('agg')
+import matplotlib.pyplot as plt
+import nibabel as nib
+import numpy as np
+from dipy.segment.mask import median_otsu
+from nipype.utils.filemanip import save_json, load_json
+
+
+def normalize_xform(img):
+ """ Set identical, valid qform and sform matrices in an image
+ Selects the best available affine (sform > qform > shape-based), and
+ coerces it to be qform-compatible (no shears).
+ The resulting image represents this same affine as both qform and sform,
+ and is marked as NIFTI_XFORM_ALIGNED_ANAT, indicating that it is valid,
+ not aligned to template, and not necessarily preserving the original
+ coordinates.
+ If header would be unchanged, returns input image.
+ """
+ # Let nibabel convert from affine to quaternions, and recover xform
+ tmp_header = img.header.copy()
+ tmp_header.set_qform(img.affine)
+ xform = tmp_header.get_qform()
+ xform_code = 2
+
+ # Check desired codes
+ qform, qform_code = img.get_qform(coded=True)
+ sform, sform_code = img.get_sform(coded=True)
+ if all(
+ (
+ qform is not None and np.allclose(qform, xform),
+ sform is not None and np.allclose(sform, xform),
+ int(qform_code) == xform_code,
+ int(sform_code) == xform_code,
+ )
+ ):
+ return img
+
+ new_img = img.__class__(img.get_data(), xform, img.header)
+ # Unconditionally set sform/qform
+ new_img.set_sform(xform, xform_code)
+ new_img.set_qform(xform, xform_code)
+
+ return new_img
+
+
+def reorient_dwi(dwi_prep, bvecs, out_dir):
+ """
+ A function to reorient any dwi image and associated bvecs to RAS+.
+
+ Parameters
+ ----------
+ dwi_prep : str
+ File path to a dwi Nifti1Image.
+ bvecs : str
+ File path to corresponding bvecs file.
+ out_dir : str
+ Path to output directory.
+
+ Returns
+ -------
+ out_fname : str
+ File path to the reoriented dwi Nifti1Image.
+ out_bvec_fname : str
+ File path to corresponding reoriented bvecs file.
+ """
+ from dmriprep.utils.qc import normalize_xform
+
+ fname = dwi_prep
+ bvec_fname = bvecs
+ out_bvec_fname = "%s%s" % (out_dir, "/bvecs_reor.bvec")
+
+ input_img = nib.load(fname)
+ input_axcodes = nib.aff2axcodes(input_img.affine)
+ reoriented = nib.as_closest_canonical(input_img)
+ normalized = normalize_xform(reoriented)
+ # Is the input image oriented how we want?
+ new_axcodes = ("R", "A", "S")
+ if normalized is not input_img:
+ out_fname = "%s%s%s%s" % (
+ out_dir,
+ "/",
+ dwi_prep.split("/")[-1].split(".nii.gz")[0],
+ "_reor_RAS.nii.gz",
+ )
+ print("%s%s%s" % ("Reorienting ", dwi_prep, " to RAS+..."))
+
+ # Flip the bvecs
+ input_orientation = nib.orientations.axcodes2ornt(input_axcodes)
+ desired_orientation = nib.orientations.axcodes2ornt(new_axcodes)
+ transform_orientation = nib.orientations.ornt_transform(
+ input_orientation, desired_orientation
+ )
+ bvec_array = np.loadtxt(bvec_fname)
+ if bvec_array.shape[0] != 3:
+ bvec_array = bvec_array.T
+ if not bvec_array.shape[0] == transform_orientation.shape[0]:
+ raise ValueError("Unrecognized bvec format")
+ output_array = np.zeros_like(bvec_array)
+ for this_axnum, (axnum, flip) in enumerate(transform_orientation):
+ output_array[this_axnum] = bvec_array[int(axnum)] * float(flip)
+ np.savetxt(out_bvec_fname, output_array, fmt="%.8f ")
+ else:
+ out_fname = "%s%s%s%s" % (
+ out_dir,
+ "/",
+ dwi_prep.split("/")[-1].split(".nii.gz")[0],
+ "_noreor_RAS.nii.gz",
+ )
+ out_bvec_fname = bvec_fname
+
+ normalized.to_filename(out_fname)
+
+ return out_fname, out_bvec_fname
+
+
+def reorient_img(img, out_dir):
+ """
+ A function to reorient any non-dwi image to RAS+.
+
+ Parameters
+ ----------
+ img : str
+ File path to a Nifti1Image.
+ out_dir : str
+ Path to output directory.
+
+ Returns
+ -------
+ out_name : str
+ File path to reoriented Nifti1Image.
+ """
+ from dmriprep.utils.qc import normalize_xform
+
+ # Load image, orient as RAS
+ orig_img = nib.load(img)
+ reoriented = nib.as_closest_canonical(orig_img)
+ normalized = normalize_xform(reoriented)
+
+ # Image may be reoriented
+ if normalized is not orig_img:
+ print("%s%s%s" % ("Reorienting ", img, " to RAS+..."))
+ out_name = "%s%s%s%s" % (
+ out_dir,
+ "/",
+ img.split("/")[-1].split(".nii.gz")[0],
+ "_reor_RAS.nii.gz",
+ )
+ else:
+ out_name = "%s%s%s%s" % (
+ out_dir,
+ "/",
+ img.split("/")[-1].split(".nii.gz")[0],
+ "_noreor_RAS.nii.gz",
+ )
+
+ normalized.to_filename(out_name)
+
+ return out_name
+
+
+def match_target_vox_res(img_file, vox_size, out_dir):
+ """
+ A function to resample an image to a given isotropic voxel resolution.
+
+ Parameters
+ ----------
+ img_file : str
+ File path to a Nifti1Image.
+ vox_size : str
+ Voxel size in mm. (e.g. 2mm).
+ out_dir : str
+ Path to output directory.
+
+ Returns
+ -------
+ img_file : str
+ File path to resampled Nifti1Image.
+ """
+ import os
+ from dipy.align.reslice import reslice
+
+ # Check dimensions
+ img = nib.load(img_file)
+ data = img.get_fdata()
+ affine = img.affine
+ hdr = img.header
+ zooms = hdr.get_zooms()[:3]
+ if vox_size == "1mm":
+ new_zooms = (1.0, 1.0, 1.0)
+ elif vox_size == "2mm":
+ new_zooms = (2.0, 2.0, 2.0)
+
+ if (abs(zooms[0]), abs(zooms[1]), abs(zooms[2])) != new_zooms:
+ print("Reslicing image " + img_file + " to " + vox_size + "...")
+ img_file_res = "%s%s%s%s%s%s" % (
+ out_dir,
+ "/",
+ os.path.basename(img_file).split(".nii.gz")[0],
+ "_res",
+ vox_size,
+ ".nii.gz",
+ )
+ data2, affine2 = reslice(data, affine, zooms, new_zooms)
+ img2 = nib.Nifti1Image(data2, affine=affine2)
+ nib.save(img2, img_file_res)
+ img_file = img_file_res
+ else:
+ img_file_nores = "%s%s%s%s%s%s" % (
+ out_dir,
+ "/",
+ os.path.basename(img_file).split(".nii.gz")[0],
+ "_nores",
+ vox_size,
+ ".nii.gz",
+ )
+ nib.save(img, img_file_nores)
+ img_file = img_file_nores
+
+ return img_file
+
+
+def check_orient_and_dims(infile, vox_size, bvecs, outdir, overwrite=True):
+ """
+ An API to reorient any image to RAS+ and resample any image to a given voxel resolution.
+
+ Parameters
+ ----------
+ infile : str
+ File path to a dwi Nifti1Image.
+ vox_size : str
+ Voxel size in mm. (e.g. 2mm).
+ bvecs : str
+ File path to corresponding bvecs file if infile is a dwi.
+ outdir : str
+ Path to output directory.
+ overwrite : bool
+ Boolean indicating whether to overwrite existing outputs. Default is True.
+
+ Returns
+ -------
+ outfile : str
+ File path to the reoriented and/or resample Nifti1Image.
+ bvecs : str
+ File path to corresponding reoriented bvecs file if outfile is a dwi.
+ """
+ import warnings
+
+ warnings.filterwarnings("ignore")
+ from dmriprep.utils.qc import reorient_dwi, match_target_vox_res
+
+ # Check orientation
+ # dwi case
+ # Check orientation
+ if ("RAS" not in infile) or (overwrite is True):
+ [infile, bvecs] = reorient_dwi(infile, bvecs, outdir)
+ # Check dimensions
+ if ("reor" not in infile) or (overwrite is True):
+ outfile = match_target_vox_res(infile, vox_size, outdir)
+ print(outfile)
+
+ return outfile, bvecs
+
+
+def mplfig(data, outfile=None, as_bytes=False):
+ fig = plt.figure(frameon=False, dpi=data.shape[0])
+ fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
+ ax = plt.Axes(fig, [0., 0., 1., 1.])
+ ax.set_axis_off()
+ fig.add_axes(ax)
+ ax.imshow(data, aspect=1, cmap=plt.cm.Greys_r) # previous aspect="normal"
+ if outfile:
+ fig.savefig(outfile, dpi=data.shape[0], transparent=True)
+ plt.close()
+ return outfile
+ if as_bytes:
+ IObytes = BytesIO()
+ plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
+ IObytes.seek(0)
+ base64_jpgData = base64.b64encode(IObytes.read())
+ return base64_jpgData.decode("ascii")
+
+
+def mplfigcontour(data, outfile=None, as_bytes=False):
+ fig = plt.figure(frameon=False)
+ fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
+ ax = plt.Axes(fig, [0., 0., 1., 1.])
+ ax.set_axis_off()
+ fig.add_axes(ax)
+
+ bg = np.zeros(data.shape)
+ bg[:] = np.nan
+ ax.imshow(bg, aspect=1, cmap=plt.cm.Greys_r) # used to be aspect="normal"
+ ax.contour(data, colors="red", linewidths=0.1)
+ if outfile:
+ fig.savefig(outfile, dpi=data.shape[0], transparent=True)
+ plt.close()
+ return outfile
+ if as_bytes:
+ IObytes = BytesIO()
+ plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
+ IObytes.seek(0)
+ base64_jpgData = base64.b64encode(IObytes.read())
+ return base64_jpgData.decode("ascii")
+
+
+def load_and_reorient(filename):
+ img = nib.load(filename)
+ data, aff = img.get_data(), img.affine
+ data = reorient_array(data, aff)
+ return data
+
+
+def reshape3D(data, n=256):
+ return np.pad(data, (
+ (
+ (n-data.shape[0]) // 2,
+ ((n-data.shape[0]) + (data.shape[0] % 2 > 0)) // 2
+ ),
+ (
+ (n-data.shape[1]) // 2,
+ ((n-data.shape[1]) + (data.shape[1] % 2 > 0)) // 2
+ ),
+ (0, 0)
+ ), "constant", constant_values=(0, 0))
+
+
+def reshape4D(data, n=256):
+ return np.pad(data, (
+ (
+ (n-data.shape[0]) // 2,
+ ((n-data.shape[0]) + (data.shape[0] % 2 > 0)) // 2
+ ),
+ (
+ (n-data.shape[1]) // 2,
+ ((n-data.shape[1]) + (data.shape[1] % 2 > 0)) // 2
+ ),
+ (0, 0), (0, 0)
+ ), "constant", constant_values=(0, 0))
+
+
+def get_middle_slices(data, slice_direction):
+ slicer = {"ax": 0, "cor": 1, "sag": 2}
+ all_data_slicer = [slice(None), slice(None), slice(None)]
+ num_slices = data.shape[slicer[slice_direction]]
+ slice_num = int(num_slices / 2)
+ all_data_slicer[slicer[slice_direction]] = slice_num
+ tile = data[tuple(all_data_slicer)]
+
+ # make it a square
+ N = max(tile.shape[:2])
+ tile = reshape3D(tile, N)
+
+ return tile
+
+
+def nearest_square(limit):
+ answer = 0
+ while (answer+1)**2 < limit:
+ answer += 1
+ if (answer ** 2) == limit:
+ return answer
+ else:
+ return answer + 1
+
+
+def create_sprite_from_tiles(tile, out_file=None, as_bytes=False):
+ num_slices = tile.shape[-1]
+ N = nearest_square(num_slices)
+ M = int(np.ceil(num_slices/N))
+ # tile is square, so just make a big arr
+ pix = tile.shape[0]
+
+ if len(tile.shape) == 3:
+ mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0]))
+ else:
+ mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0], tile.shape[-2]))
+
+ mosaic[:] = np.nan
+ helper = np.arange(N*M).reshape((N, M))
+
+ for t in range(num_slices):
+ x, y = np.nonzero(helper == t)
+ xmin = x[0] * pix
+ xmax = (x[0] + 1) * pix
+ ymin = y[0] * pix
+ ymax = (y[0] + 1) * pix
+
+ if len(tile.shape) == 3:
+ mosaic[xmin:xmax, ymin:ymax] = tile[:, :, t]
+ else:
+ mosaic[xmin:xmax, ymin:ymax, :] = tile[:, :, :, t]
+
+ if as_bytes:
+ img = mplfig(mosaic, out_file, as_bytes=as_bytes)
+ return dict(img=img, N=N, M=M, pix=pix, num_slices=num_slices)
+
+ if out_file:
+ img = mplfig(mosaic, out_file), N, M, pix, num_slices
+
+ return dict(mosaic=mosaic, N=N, M=M, pix=pix, num_slices=num_slices)
+
+
+def createSprite4D(dwi_file):
+
+ # initialize output dict
+ output = []
+
+ # load the file
+ dwi = load_and_reorient(dwi_file)[:, :, :, 1:]
+
+ # create tiles from center slice on each orientation
+ for orient in ['sag', 'ax', 'cor']:
+ tile = get_middle_slices(dwi, orient)
+
+ # create sprite images for each
+ results = create_sprite_from_tiles(tile, as_bytes=True)
+ results['img_type'] = '4dsprite'
+ results['orientation'] = orient
+ output.append(results)
+
+ return output
+
+
+def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file):
+ colorfa = load_and_reorient(colorFA_file)
+ b0 = load_and_reorient(b0_file)[:, :, :, 0]
+ anat_mask = load_and_reorient(mask_file)
+
+ N = max(*b0.shape[:2])
+
+ # make a b0 sprite
+ b0 = reshape3D(b0, N)
+ _, mask = median_otsu(b0)
+ outb0 = create_sprite_from_tiles(b0, as_bytes=True)
+ outb0['img_type'] = 'brainsprite'
+
+ # make a colorFA sprite, masked by b0
+ Q = reshape4D(colorfa, N)
+ Q[np.logical_not(mask)] = np.nan
+ Q = np.moveaxis(Q, -2, -1)
+ outcolorFA = create_sprite_from_tiles(Q, as_bytes=True)
+ outcolorFA['img_type'] = 'brainsprite'
+
+ # make an anat mask contour sprite
+ outmask = create_sprite_from_tiles(reshape3D(anat_mask, N))
+ img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
+ outmask['img'] = img
+
+ return outb0, outcolorFA, outmask
+
+
+def create_report_json(dwi_corrected_file, eddy_rms, eddy_report,
+ color_fa_file, anat_mask_file,
+ outlier_indices,
+ eddy_qc_file,
+ outpath=op.abspath('./report.json')):
+
+ report = {}
+ report['dwi_corrected'] = createSprite4D(dwi_corrected_file)
+
+ b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(dwi_corrected_file,
+ color_fa_file,
+ anat_mask_file)
+ report['b0'] = b0
+ report['colorFA'] = colorFA
+ report['anat_mask'] = mask
+ report['outlier_volumes'] = outlier_indices.tolist()
+
+ with open(eddy_report, 'r') as f:
+ report['eddy_report'] = f.readlines()
+
+ report['eddy_params'] = np.genfromtxt(eddy_rms).tolist()
+ eddy_qc = load_json(eddy_qc_file)
+ report['eddy_quad'] = eddy_qc
+ save_json(outpath, report)
+ return outpath
diff --git a/dmriprep/workflows/_afq/cli.py b/dmriprep/workflows/_afq/cli.py
deleted file mode 100644
index 8b45d7eb..00000000
--- a/dmriprep/workflows/_afq/cli.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""Console script for dmriprep."""
-import os
-import sys
-import warnings
-
-import click
-
-from . import io
-from . import run
-from .data import get_dataset
-
-# Filter warnings that are visible whenever you import another package that
-# was compiled against an older numpy than is installed.
-warnings.filterwarnings("ignore", message="numpy.dtype size changed")
-warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
-
-
-@click.command()
-@click.option('--participant-label',
- help="The label(s) of the participant(s) that should be"
- "analyzed. The label corresponds to"
- "sub- from the BIDS spec (so it does"
- "not include 'sub-'). If this parameter is not provided"
- "all subjects will be analyzed. Multiple participants"
- "can be specified with a space separated list.",
- default=None)
-@click.option('--eddy-niter',
- help="Fixed number of eddy iterations. See "
- "https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide"
- "#A--niter",
- default=5, type=(int))
-@click.option('--slice-outlier-threshold',
- help="Number of allowed outlier slices per volume. "
- "If this is exceeded the volume is dropped from analysis. "
- "If an int is provided, it is treated as number of allowed "
- "outlier slices. If a float between 0 and 1 "
- "(exclusive) is provided, it is treated the fraction of "
- "allowed outlier slices.",
- default=0.02)
-@click.argument('bids_dir',
- )
-@click.argument('output_dir',
- )
-@click.argument('analysis_level',
- type=click.Choice(['participant', 'group']),
- default='participant')
-def main(participant_label, bids_dir, output_dir,
- eddy_niter=5, slice_outlier_threshold=0.02,
- analysis_level="participant"):
- """
- BIDS_DIR: The directory with the input dataset formatted according to
- the BIDS standard.
-
- OUTPUT_DIR: The directory where the output files should be stored.
- If you are running a group level analysis, this folder
- should be prepopulated with the results of
- the participant level analysis.
-
- ANALYSIS_LEVEL: Level of the analysis that will be performed. Multiple
- participant level analyses can be run independently
- (in parallel).
- """
- if analysis_level is not 'participant':
- raise NotImplementedError('The only valid analysis level for dmriprep '
- 'is participant at the moment.')
-
- inputs = io.get_bids_files(participant_label, bids_dir)
-
- for subject_inputs in inputs:
- run.run_dmriprep_pe(**subject_inputs,
- working_dir=os.path.join(output_dir, 'scratch'),
- out_dir=output_dir,
- eddy_niter=eddy_niter,
- slice_outlier_threshold=slice_outlier_threshold)
-
- return 0
-
-@click.command()
-@click.argument('output_dir',
- )
-@click.option('--subject', help="subject id to download (will choose 1 subject if not specified",
- default="sub-NDARBA507GCT")
-@click.option('--study', help="which study to download. Right now we only support the HBN dataset",
- default="HBN")
-def data(output_dir, study="HBN", subject="sub-NDARBA507GCT"):
- """
- Download dwi raw data in BIDS format from public datasets
-
- :param output_dir: A directory to write files to
- :param study: A study name, right now we only support 'HBN'
- :param subject: A subject from the study, starting with 'sub-'
- :return: None
- """
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
-
- if study.upper() != 'HBN':
- raise NotImplementedError('We only support data downloads from the HBN dataset right now.')
-
- get_dataset(os.path.abspath(output_dir), source=study.upper(), subject_id=subject)
- print('done')
-
-
-@click.command()
-@click.argument('output_dir')
-@click.argument('bucket')
-@click.option('--access_key', help="your AWS access key")
-@click.option('--secret_key', help="your AWS access secret")
-@click.option('--provider', default='s3', help="Cloud storage provider. Only S3 is supported right now.")
-@click.option('--subject', default=None, help="Subject id to upload (optional)")
-def upload(output_dir, bucket, access_key, secret_key, provider='s3', subject=None):
- """
- OUTPUT_DIR: The directory where the output files were stored.
-
- BUCKET: The cloud bucket name to upload data to.
- """
- import boto3
- from dask import compute, delayed
- from glob import glob
- from tqdm.auto import tqdm
-
- output_dir = os.path.abspath(output_dir)
- if not output_dir.endswith('/'):
- output_dir += '/'
-
- if provider == 's3' or provider == 'S3':
- client = boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key)
-
- if subject is not None:
- assert os.path.exists(os.path.join(output_dir, subject)), 'this subject id does not exist!'
- subjects = [subject]
- else:
- subjects = [os.path.split(s)[1] for s in glob(os.path.join(output_dir, 'sub-*'))]
-
- def upload_subject(sub, sub_idx):
- base_dir = os.path.join(output_dir, sub, 'dmriprep')
- for root, dirs, files in os.walk(base_dir):
- if len(files):
- for f in tqdm(files, desc=f"Uploading {sub} {root.split('/')[-1]}", position=sub_idx):
- filepath = os.path.join(root, f)
- key = root.replace(output_dir, '')
- client.upload_file(filepath, bucket, os.path.join(key, f))
-
- uploads = [delayed(upload_subject)(s, idx) for idx, s in enumerate(subjects)]
- _ = list(compute(*uploads, scheduler="threads"))
- else:
- raise NotImplementedError('Only S3 is the only supported provider for data uploads at the moment')
-
-
-if __name__ == "__main__":
- sys.exit(main()) # pragma: no cover
diff --git a/dmriprep/workflows/_afq/data.py b/dmriprep/workflows/_afq/data.py
deleted file mode 100644
index d06767e5..00000000
--- a/dmriprep/workflows/_afq/data.py
+++ /dev/null
@@ -1,1133 +0,0 @@
-"""
-Functions to download example data from public repositories.
-
-"""
-import copy
-import json
-import logging
-import os
-import os.path as op
-import re
-import subprocess
-from pathlib import Path
-
-import pandas as pd
-from dask import compute, delayed
-from dask.diagnostics import ProgressBar
-from tqdm.auto import tqdm
-
-
-mod_logger = logging.getLogger(__name__)
-
-
-def get_dataset(output_dir, source='HBN', subject_id='sub-NDARBA507GCT'):
- if source in ['HBN']:
- get_hbn_data(output_dir, subject_id)
- else:
- raise ValueError('Invalid dataset source')
-
-
-def get_hbn_data(output_dir, subject_id):
- hbn_study = HBN(subjects=subject_id)
- subject = hbn_study.subjects[0]
- subject.download(directory=output_dir)
- # TODO: return a dict of subject ids and folder locations.
- return os.path.join(output_dir, subject_id)
-
-
-def get_s3_client():
- """Return a boto3 s3 client
-
- Returns
- -------
- s3_client : boto3.client('s3')
- """
- import boto3
- from botocore import UNSIGNED
- from botocore.client import Config
-
- # Global s3 client to preserve anonymous config
- s3_client = boto3.client('s3', config=Config(signature_version=UNSIGNED))
- return s3_client
-
-
-def _get_matching_s3_keys(bucket, prefix='', suffix=''):
- """Generate all the matching keys in an S3 bucket.
-
- Parameters
- ----------
- bucket : str
- Name of the S3 bucket
-
- prefix : str, optional
- Only fetch keys that start with this prefix
-
- suffix : str, optional
- Only fetch keys that end with this suffix
-
- Yields
- ------
- key : str
- S3 keys that match the prefix and suffix
- """
- s3 = get_s3_client()
- kwargs = {'Bucket': bucket, "MaxKeys": 1000}
-
- # If the prefix is a single string (not a tuple of strings), we can
- # do the filtering directly in the S3 API.
- if isinstance(prefix, str):
- kwargs['Prefix'] = prefix
-
- while True:
- # The S3 API response is a large blob of metadata.
- # 'Contents' contains information about the listed objects.
- resp = s3.list_objects_v2(**kwargs)
-
- try:
- contents = resp['Contents']
- except KeyError:
- return
-
- for obj in contents:
- key = obj['Key']
- if key.startswith(prefix) and key.endswith(suffix):
- yield key
-
- # The S3 API is paginated, returning up to 1000 keys at a time.
- # Pass the continuation token into the next response, until we
- # reach the final page (when this field is missing).
- try:
- kwargs['ContinuationToken'] = resp['NextContinuationToken']
- except KeyError:
- break
-
-
-def _download_from_s3(fname, bucket, key, overwrite=False):
- """Download object from S3 to local file
-
- Parameters
- ----------
- fname : str
- File path to which to download the object
-
- bucket : str
- S3 bucket name
-
- key : str
- S3 key for the object to download
-
- overwrite : bool, default=False
- If True, overwrite file if it already exists.
- If False, skip download and return
- """
- # Create the directory and file if necessary
- s3 = get_s3_client()
- Path(op.dirname(fname)).mkdir(parents=True, exist_ok=True)
- try:
- Path(fname).touch(exist_ok=overwrite)
-
- # Download the file
- s3.download_file(Bucket=bucket, Key=key, Filename=fname)
- except FileExistsError:
- mod_logger.info(f'File {fname} already exists. Continuing...')
- pass
-
-
-def _recursive_split_ext(path):
- """Recursively split pathname `path` into components.
-
- Parameters
- ----------
- path : path-like object
-
- Returns
- -------
- tuple :
- tuple of path parts. All parts except the first have a beginning
- period.
-
- """
- p0, p1 = op.splitext(path)
- if p1:
- return _recursive_split_ext(p0) + (p1,)
- else:
- return (p0,)
-
-
-def _cumulative_paths(path_parts, add_ext=""):
- """Return all possible cumulative path names from a list of path parts
-
- For example, if `path_parts` = ["a", ".b", ".c", ".d"],
- this function will return ["a", "a.b", "a.b.c", "a.b.c.d"]
-
- Parameters
- ----------
- path_parts: sequence of strings
- The sequence of path substrings. All elements except the first must
- begin with a period.
-
- add_ext: str
- If provided, add this extension to each element of the result
-
- Returns
- -------
- list :
- List of cumulative path names
- """
- try:
- add_ext = add_ext if add_ext[0] == "." else "." + add_ext
- except IndexError:
- pass
-
- return [''.join(path_parts[:i+1]) + add_ext
- for i in range(len(path_parts))]
-
-
-class Study:
- """A dMRI based study with a BIDS compliant directory structure"""
- def __init__(self, study_id, bucket, s3_prefix, subjects=None):
- """Initialize a Study instance
-
- Parameters
- ----------
- study_id : str
- An identifier string for the study
-
- bucket : str
- The S3 bucket that contains the study data
-
- s3_prefix : str
- The S3 prefix common to all of the study objects on S3
-
- subjects : str, sequence(str), int, or None
- If int, retrieve S3 keys for the first `subjects` subjects.
- If "all", retrieve all subjects.
- If str or sequence of strings, retrieve S3 keys for the specified
- subjects. If None, retrieve S3 keys for the first subject.
- """
- if not isinstance(study_id, str):
- raise TypeError("subject_id must be a string.")
-
- if not isinstance(bucket, str):
- raise TypeError("bucket must be a string.")
-
- if not isinstance(s3_prefix, str):
- raise TypeError("s3_prefix must be a string.")
-
- if not (subjects is None or
- isinstance(subjects, int) or
- isinstance(subjects, str) or
- all(isinstance(s, str) for s in subjects)):
- raise TypeError("subjects must be an int, string or a "
- "sequence of strings.")
-
- if isinstance(subjects, int) and subjects < 1:
- raise ValueError("If subjects is an int, it must be "
- "greater than 0.")
-
- self._study_id = study_id
- self._bucket = bucket
- self._s3_prefix = s3_prefix
-
- self._all_subjects = self.list_all_subjects()
- if subjects is None or subjects == 1:
- enter_validation_loop = True
- subjects = [sorted(self._all_subjects.keys())[0]]
- self._n_requested = 1
- elif isinstance(subjects, int) and subjects > 1:
- enter_validation_loop = True
- self._n_requested = subjects
- subjects = sorted(self._all_subjects.keys())[:subjects]
- elif subjects == "all":
- enter_validation_loop = False
- self._n_requested = len(self._all_subjects)
- subjects = list(self._all_subjects.keys())
- elif isinstance(subjects, str):
- enter_validation_loop = False
- self._n_requested = 1
- subjects = [subjects]
- else:
- enter_validation_loop = False
- self._n_requested = len(subjects)
-
- if not set(subjects) <= set(self._all_subjects.keys()):
- raise ValueError(
- f"The following subjects could not be found in the study: "
- f"{set(subjects) - set(self._all_subjects.keys())}"
- )
-
- subs = [
- delayed(self._get_subject)(s) for s in set(subjects)
- ]
-
- print("Retrieving subject S3 keys")
- with ProgressBar():
- subjects = list(compute(*subs, scheduler="threads"))
-
- self._subjects = [s for s in subjects if s.valid]
-
- if enter_validation_loop:
- # index of first uninspected subject
- idx_lo = self._n_requested
- while len(self._subjects) < self._n_requested:
- n_needed = self._n_requested - len(self._subjects)
- if n_needed == 1:
- subjects = [sorted(self._all_subjects.keys())[idx_lo]]
- else:
- subjects = sorted(
- self._all_subjects.keys()
- )[idx_lo:idx_lo + n_needed]
-
- idx_lo += n_needed
-
- subs = [
- delayed(self._get_subject)(s) for s in set(subjects)
- ]
-
- with ProgressBar():
- subjects = list(compute(*subs, scheduler="threads"))
-
- self._subjects += [s for s in subjects if s.valid]
-
- self._n_discarded = 0
- else:
- self._n_discarded = len([s for s in subjects if not s.valid])
-
-
- @property
- def study_id(self):
- """An identifier string for the study"""
- return self._study_id
-
- @property
- def bucket(self):
- """The S3 bucket that contains the study data"""
- return self._bucket
-
- @property
- def s3_prefix(self):
- """The S3 prefix common to all of the study objects on S3"""
- return self._s3_prefix
-
- @property
- def subjects(self):
- """A list of Subject instances for each requested subject"""
- return self._subjects
-
- def __repr__(self):
- return (f"{type(self).__name__}(study_id={self.study_id}, "
- f"bucket={self.bucket}, s3_prefix={self.s3_prefix})")
-
- def _get_subject(self, subject_id):
- """Return a Subject instance from a subject-ID"""
- return Subject(subject_id=subject_id,
- site=self._all_subjects[subject_id],
- study=self)
-
- def list_all_subjects(self):
- """Return a study-specific list of subjects.
-
- Returns
- -------
- dict
- dict with participant_id as keys and site_id as values
- """
- raise NotImplementedError
-
- def filter_keys(self, subject):
- """Study-specific S3 key filtering
-
- Parameters
- ----------
- subject : dmriprep.data.Subject
- subject instance
- """
- pass
-
- def postprocess(self, subject):
- """Study-specific postprocessing steps
-
- Parameters
- ----------
- subject : dmriprep.data.Subject
- subject instance
- """
- pass
-
- def download(self, directory, include_site=False, overwrite=False,
- pbar=True):
- """Download files for each subject in the study
-
- Parameters
- ----------
- directory : str
- Directory to which to download subject files
-
- include_site : bool, default=False
- If True, include the site-ID in the download path
-
- overwrite : bool, default=False
- If True, overwrite files for each subject
-
- pbar : bool, default=True
- If True, include progress bar
-
- See Also
- --------
- dmriprep.data.Subject.download()
- """
- results = [delayed(sub.download)(
- directory=directory,
- include_site=include_site,
- overwrite=overwrite,
- pbar=pbar,
- pbar_idx=idx,
- ) for idx, sub in enumerate(self.subjects)]
-
- compute(*results, scheduler="threads")
-
-
-class S3BidsStudy(Study):
- """
-
- """
- def __init__(self, study_id, bucket, s3_prefix=None,
- subjects=None):
- """
- Initialize a study which is organized as BIDS compliant S3 bucket, or a
- sub-path of this bucket.
-
- Parameters
- ----------
- study_id : str An identifier string for the study
-
- bucket : str The S3 bucket that contains the study data
-
- s3_prefix : str, optional The S3 prefix common to all of the study
- objects on S3. Defaults to the root of the bucket.
-
- subjects : str, sequence(str), int, or None
- If int, retrieve S3 keys for the first `subjects` subjects.
- If str or sequence of strings, retrieve S3 keys for the specified
- subjects. If None, retrieve S3 keys for the first subject. If 'all',
- use all of the subjects in the study.
- """
- if s3_prefix is None:
- s3_prefix = ""
- super().__init__(
- study_id=study_id,
- bucket=bucket,
- s3_prefix=s3_prefix,
- subjects=subjects)
-
- def list_all_subjects(self):
- """
- Find the identifiers of all subjects
- """
- # XXX Ariel will figure this out.
-
-
-
-
-class HBN(Study):
- """The HBN study
-
- See Also
- --------
- dmriprep.data.Study
- """
- def __init__(self, subjects=None):
- """Initialize the HBN instance
-
- Parameters
- ----------
- subjects : str, sequence(str), int, or None
- If int, retrieve S3 keys for the first `subjects` subjects.
- If str or sequence of strings, retrieve S3 keys for the specified
- subjects. If None, retrieve S3 keys for the first subject.
- """
- super().__init__(
- study_id="HBN",
- bucket="fcp-indi",
- s3_prefix="data/Projects/HBN/MRI",
- subjects=subjects
- )
-
- def list_all_subjects(self):
- """Return dict of HBN subjects
-
- Retrieve subjects from the participants.tsv files at each site
-
- Returns
- -------
- dict
- dict with participant_id as keys and site_id as values
- """
- def get_site_tsv_keys(site_id):
- pre = 'data/Projects/HBN/MRI/'
- raw = pre + f'{site_id}/participants.tsv'
- deriv = pre + f'{site_id}/derivatives/participants.tsv'
- return {'raw': raw, 'deriv': deriv}
-
- sites = ['Site-CBIC', 'Site-RU', 'Site-SI']
- tsv_keys = {site: get_site_tsv_keys(site) for site in sites}
-
- s3 = get_s3_client()
-
- def get_subs_from_tsv_key(s3_key):
- response = s3.get_object(
- Bucket=self.bucket,
- Key=s3_key
- )
-
- return set(pd.read_csv(
- response.get('Body')
- ).participant_id.values)
-
- subjects = {}
- for site, s3_keys in tsv_keys.items():
- site_subs = {k: get_subs_from_tsv_key(v)
- for k, v in s3_keys.items()}
- subjects[site] = site_subs['raw'] & site_subs['deriv']
-
- all_subjects = {}
- for site, subs in subjects.items():
- for s in subs:
- all_subjects[s] = site
-
- return all_subjects
-
- def filter_keys(self, subject):
- """Filter S3 keys based on HBN specific vagaries
-
- HBN Site-CBIC has multiple anatomy folders due to
- motion correction software at the scanner level.
- If subject.site == "Site-CBIC" then choose only the
- anatomy files in the T1W_VNavNorm files
-
- Parameters
- ----------
- subject : dmriprep.data.Subject
- subject instance
- """
- if subject.site == "Site-CBIC":
- t1w_keys = subject.s3_keys['t1w']
- freesurfer_keys = subject.s3_keys['freesurfer']
- correct_dir = "T1w_VNavNorm"
-
- subject._s3_keys['t1w'] = list(filter(
- lambda x: correct_dir in x,
- t1w_keys
- ))
-
- subject._s3_keys['freesurfer'] = list(filter(
- lambda x: correct_dir in x,
- freesurfer_keys
- ))
-
- def postprocess(self, subject):
- """Move the T1 file back into the freesurfer directory.
-
- This step is specific to the HBN dataset where the T1 files
- are outside of the derivatives/sub-XXX/freesurfer directory,
- due to defacing protocols.
-
- Parameters
- ----------
- subject : dmriprep.data.Subject
- subject instance
- """
- for sess in subject.files.keys():
- t1_file = subject.files[sess]['t1w'][0]
- freesurfer_path = op.join(op.dirname(t1_file), 'freesurfer')
-
- convert_cmd = 'mri_convert {in_:s} {out_:s}'.format(
- in_=t1_file, out_=op.join(freesurfer_path, 'mri', 'orig.mgz')
- )
-
- fnull = open(os.devnull, 'w')
- subprocess.call(convert_cmd.split(),
- stdout=fnull,
- stderr=subprocess.STDOUT)
-
- # if the site is CBIC, then the freesurfer directory has an additional level.
- # move that level up by 1 (e.g. removing the T1w_VNavNorm folder
- if subject.site == 'Site-CBIC':
- newpath = freesurfer_path.replace('T1w_VNavNorm/', '')
- move_cmd = 'mv {oldpath} {newpath}'.format(oldpath=freesurfer_path, newpath=newpath)
- fnull1 = open(os.devnull, 'w')
- subprocess.call(move_cmd.split(),
- stdout=fnull1,
- stderr=subprocess.STDOUT)
-
- # now check that the AP/PA files are named correctly
- # eg it should look like "sub-{id}_dir-{dir}_acq-dwi_epi.nii.gz
- # but sometimes it looks like sub-{id}_acq-dwi_run-01_epi.nii.gz
- # which is silly. the direction should be in the filename.
-
-
-class Subject:
- """A single dMRI study subject"""
- def __init__(self, subject_id, study, site=None):
- """Initialize a Subject instance
-
- Parameters
- ----------
- subject_id : str
- Subject-ID for this subject
-
- study : dmriprep.data.Study
- The Study for which this subject was a participant
-
- site : str, optional
- Site-ID for the site from which this subject's data was collected
- """
- if not isinstance(subject_id, str):
- raise TypeError("subject_id must be a string.")
-
- if not isinstance(study, Study):
- raise TypeError("study must be an instance of Study.")
-
- self._subject_id = subject_id
- self._study = study
- self._site = site
- self._valid = False
- self._organize_s3_keys()
- if self.valid:
- self.study.filter_keys(self)
- self._s3_keys = self._determine_directions(self._s3_keys)
- self._files = None
-
- @property
- def subject_id(self):
- """An identifier string for the subject"""
- return self._subject_id
-
- @property
- def study(self):
- """The study in which this subject participated"""
- return self._study
-
- @property
- def site(self):
- """The site at which this subject was a participant"""
- return self._site
-
- @property
- def valid(self):
- """If True, this subject has all the required MRI files"""
- return self._valid
-
- @property
- def s3_keys(self):
- """S3 keys for this subject's dMRI data
-
- The S3 keys are stored in a dict with following keys:
- - "dwi": Nifti file for DWI image
- - "bval": Bval file
- - "bvec": Bvec file
- - "epi_ap": Nifti EPI image (anterior to posterior)
- - "epi_pa": Nifti EPI image (posterior to anterior)
- - "freesurfer": Freesurfer structural files
- - "t1w": T1W Nifti file
- """
- return self._s3_keys
-
- @property
- def files(self):
- """Local files for this subject's dMRI data
-
- Before the call to subject.download(), this is None.
- Afterward, the files are stored in a dict with keys
- for each imaging session. The value for each session
- is itself a dict with the following keys:
- - "dwi": Nifti file for DWI image
- - "bval": Bval file
- - "bvec": Bvec file
- - "epi_ap": Nifti EPI image (anterior to posterior)
- - "epi_pa": Nifti EPI image (posterior to anterior)
- - "freesurfer": Freesurfer structural files
- - "t1w": T1W Nifti file
- """
- return self._files
-
- def __repr__(self):
- return (f"{type(self).__name__}(subject_id={self.subject_id}, "
- f"study_id={self.study.study_id}, site={self.site}, "
- f"valid={self.valid})")
-
- def _list_s3_keys(self):
- """Get all required S3 keys for this subject
-
- Returns
- -------
- s3_keys : dict
- S3 keys organized into "raw" and "deriv" lists
- """
- prefixes = {
- 'raw': '/'.join([self.study.s3_prefix,
- self.site,
- self.subject_id]),
- 'deriv': '/'.join([self.study.s3_prefix,
- self.site,
- 'derivatives',
- self.subject_id]),
- }
-
- s3_keys = {
- rd: list(_get_matching_s3_keys(
- bucket=self.study.bucket,
- prefix=prefix,
- )) for rd, prefix in prefixes.items()
- }
-
- return s3_keys
-
- def _organize_s3_keys(self):
- """Organize S3 keys into a dict
-
- The dict keys are "dwi," "bvec," "bval," "epi_nii," "epi_json,"
- "freesurfer," "t1w" and the values are the associated S3 object keys
- """
- # Retrieve and unpack the s3_keys
- s3_keys = self._list_s3_keys()
- dwi_keys = [k for k in s3_keys['raw'] if '/dwi/' in k]
- fmap_keys = [k for k in s3_keys['raw'] if '/fmap/' in k]
- deriv_keys = s3_keys['deriv']
- all_json_keys = [k for k in s3_keys['raw'] if k.endswith('.json')]
-
- # Get the dwi files, bvec files, and bval files
- dwi = [f for f in dwi_keys
- if f.endswith('.nii.gz') and 'TRACEW' not in f]
- bvec = [f for f in dwi_keys if f.endswith('.bvec')]
- bval = [f for f in dwi_keys if f.endswith('.bval')]
- epi_nii = [f for f in fmap_keys if f.endswith('epi.nii.gz')
- and 'fMRI' not in f]
- epi_json = [f for f in fmap_keys if f.endswith('epi.json')
- and 'fMRI' not in f]
- t1w = [f for f in deriv_keys if f.endswith('/T1w.nii.gz')]
- freesurfer = [f for f in deriv_keys
- if '/freesurfer/' in f]
-
- json_keys = []
- for file_list in [dwi, bvec, bval, epi_nii, t1w]:
- for f in file_list:
- potential_keys = _cumulative_paths(_recursive_split_ext(f),
- add_ext="json")
- json_keys += [k for k in potential_keys if k in all_json_keys]
-
- # Use truthiness of non-empty lists to verify that all
- # of the required prereq files exist in `s3_keys`
- # TODO: If some of the files are missing, look farther up in the directory
- # TODO: structure to see if there are files we should inherit
- if all([dwi, bval, bvec, epi_nii, epi_json, t1w, freesurfer]):
- self._valid = True
- self._s3_keys = dict(
- dwi=dwi,
- bvec=bvec,
- bval=bval,
- json=json_keys,
- epi_nii=epi_nii,
- epi_json=epi_json,
- freesurfer=freesurfer,
- t1w=t1w,
- )
- else:
- self._valid = False
- self._s3_keys = None
-
- def download(self, directory, include_site=False,
- overwrite=False, pbar=True, pbar_idx=0):
- """Download files from S3
-
- Parameters
- ----------
- directory : str
- Directory to which to download subject files
-
- include_site : bool, default=False
- If True, include the site-ID in the download path
-
- overwrite : bool, default=False
- If True, overwrite files for each subject
-
- pbar : bool, default=True
- If True, include download progress bar
-
- pbar_idx : int, default=0
- Progress bar index for multithreaded progress bars
- """
- if not self.valid:
- mod_logger.warning(
- f"Subject {self.subject_id} is not a valid subject. "
- f"Skipping download."
- )
- return
-
- if include_site:
- directory = op.join(directory, self.site)
-
- files = {
- k: [op.abspath(op.join(
- directory, p.split('/' + self.site + '/')[-1]
- )) for p in v] for k, v in self.s3_keys.items()
- }
-
- # Generate list of (key, file) tuples
- key_file_pairs = []
-
- for ftype in self.s3_keys.keys():
- s3_keys = self.s3_keys[ftype]
- if isinstance(s3_keys, str):
- key_file_pairs.append((s3_keys, files[ftype]))
- elif all(isinstance(x, str) for x in s3_keys):
- for key, fname in zip(s3_keys, files[ftype]):
- key_file_pairs.append((key, fname))
- else:
- raise TypeError(
- f"This subject {self.subject_id} has {ftype} S3 keys that "
- f"are neither strings nor a sequence of strings. The "
- f"offending S3 keys are {s3_keys!s}"
- )
-
- try:
- files_by_session = self._separate_sessions(files)
- self._files = files_by_session
- except NotImplementedError:
- self._valid = False
- mod_logger.warning(
- f"Subject {self.subject_id} has inconsistent session numbers."
- f"Skipping download."
- )
- return
-
- if not files_by_session.keys():
- # There were no valid sessions
- self._valid = False
- mod_logger.warning(
- f"Subject {self.subject_id} is not a valid subject. "
- f"Skipping download."
- )
- return
-
- # Now iterate through the list and download each item
- if pbar:
- progress = tqdm(desc=f"Download {self.subject_id}",
- position=pbar_idx,
- total=len(key_file_pairs) + 1)
-
- for (key, fname) in key_file_pairs:
- _download_from_s3(fname=fname,
- bucket=self.study.bucket,
- key=key,
- overwrite=overwrite)
-
- if pbar:
- progress.update()
-
- if pbar:
- progress.set_description(f"Postproc {self.subject_id}")
-
- self.study.postprocess(subject=self)
-
- if pbar:
- progress.update()
- progress.close()
-
- def _determine_directions(self,
- input_files,
- input_type='s3',
- metadata_source='json',
- json_key='PhaseEncodingDirection',
- ap_value='j-', pa_value='j'):
- """Determine direction ['AP', 'PA'] of single subject's EPI nifty files
-
- Use either metadata in associated json file or filename
-
- Parameters
- ----------
- input_files : dict
- The local input files for the subject
-
- input_type : "s3" or "local", default="s3"
- The location of the input files, local or on S3
-
- metadata_source : "json" or "filename", default="json"
- If "filename," look for the direction in the filename,
- otherwise, use the json file and the other parameters
-
- json_key : string, default="PhaseEncodingDirection"
- The key that stores the direction information
-
- ap_value : string, default="j-"
- Metadata value to associate with dir-AP
-
- pa_value : string, default="j"
- Metadata value to associate with dir-PA
-
- Returns
- -------
- dict
- Filenames or S3 keys where all fields match self.files or
- self.s3_keys except that in the "epi_nii" and "epi_json" keys
- have been replaced with "epi_ap" and "epi_pa."
- """
- if metadata_source not in ['filename', 'json']:
- raise ValueError('metadata_source must be "filename" or "json".')
-
- if input_type not in ['s3', 'local']:
- raise ValueError('input_type must be "local" or "s3".')
-
- epi_files = input_files['epi_nii']
- json_files = input_files['epi_json']
- if metadata_source == 'filename':
- ap_files = [f for f in epi_files if 'dir-AP' in f]
- pa_files = [f for f in epi_files if 'dir-PA' in f]
- else:
- # Confirm that each nifty file has a corresponding json file.
- required_json = set([f.replace('.nii.gz', '.json') for f in epi_files])
- if set(json_files) != required_json:
- self._valid = False
- mod_logger.warning(
- f'Subject {self.subject_id} does not have json files '
- f'corresponding to its fmap NIFTI files. Failed to '
- f'find the following expected files: '
- f'{required_json - set(json_files)}. Subject deemed '
- f'invalid.'
- )
- return input_files
-
- def get_json(json_file):
- if input_type == 'local':
- with open(json_file, 'r') as fp:
- meta = json.load(fp)
- else:
- s3 = get_s3_client()
- response = s3.get_object(
- Bucket=self.study.bucket,
- Key=json_file,
- )
- meta = json.loads(response.get('Body').read())
-
- return meta
-
- ap_files = []
- pa_files = []
- for jfile in json_files:
- metadata = get_json(jfile)
-
- direction = metadata.get(json_key)
- if direction == ap_value:
- if 'dir-PA' in jfile:
- mod_logger.warning(
- 'The key {key:s}={val:s} does not match the direction '
- 'suggested by the filename {fn:s}'.format(
- key=json_key, val=direction, fn=jfile
- )
- )
- ap_files.append(jfile.replace('.json', '.nii.gz'))
- elif direction == pa_value:
- if 'dir-AP' in jfile:
- mod_logger.warning(
- 'The key {key:s}={val:s} does not match the direction '
- 'suggested by the filename {fn:s}'.format(
- key=json_key, val=direction, fn=jfile
- )
- )
- pa_files.append(jfile.replace('.json', '.nii.gz'))
- elif direction is None:
- mod_logger.warning(
- 'The key {key:s} does not exist in file {jfile:s}. '
- 'Falling back on filename to determine directionality.'
- '\n\n'.format(key=json_key, jfile=jfile)
- )
- if 'dir-AP' in jfile:
- ap_files.append(jfile.replace('.json', '.nii.gz'))
- elif 'dir-PA' in jfile:
- pa_files.append(jfile.replace('.json', '.nii.gz'))
- else:
- self._valid = False
- mod_logger.warning(
- f'Subject {self.subject_id} lacks the expected '
- f'{json_key} key in file {jfile} and the '
- f'directionality could not be inferred from the '
- f'file name. Setting subject validity to False.'
- )
- return input_files
- else:
- mod_logger.warning(
- 'The metadata in file {jfile:s} does not match the dir-PA '
- 'or dir-AP values that you provided. {key:s} = {val:s}. '
- 'Falling back on filename to determine directionality.\n\n'
- ''.format(jfile=jfile, key=json_key, val=direction)
- )
- if 'dir-AP' in jfile:
- ap_files.append(jfile.replace('.json', '.nii.gz'))
- elif 'dir-PA' in jfile:
- pa_files.append(jfile.replace('.json', '.nii.gz'))
- else:
- self._valid = False
- mod_logger.warning(
- 'The metadata for key {key:s} in file {jfile:s} does '
- 'not match the dir-PA or dir-AP values that you '
- 'provided. {key:s} = {val:s}. And the directionality '
- 'could not be inferred from the file name.'.format(
- key=json_key,
- jfile=jfile,
- val=direction,
- ))
- return input_files
-
- files = copy.deepcopy(input_files)
- del files['epi_nii']
- del files['epi_json']
- files['epi_ap'] = ap_files
- files['epi_pa'] = pa_files
-
- return files
-
- def _separate_sessions(self, input_files, multiples_policy='sessions',
- assign_empty_sessions=True):
- """Separate input file register into different sessions
-
- Parameters
- ----------
- input_files : dict
- Dictionary of subject files
-
- multiples_policy : "sessions" or "concatenate"
- Flag that dictates how to handle multiple files in a session.
- If "sessions," treat multiples as different sessions and assign
- to new session IDs. If "concatenate," concatenate multiples into
- a single session
-
- assign_empty_sessions : bool
- If True, assign session IDs to files without a session ID in
- their path
-
- Returns
- -------
- dict of dicts
- Dict of Dicts of file names
- """
- if multiples_policy not in ['sessions', 'concatenate']:
- raise ValueError('`multiples_policy` must be either "sessions" or '
- '"concatenate"')
-
- # Take only the first of the T1W nifty files
- if len(input_files['t1w']) > 1:
- mod_logger.warning(
- f"Found more than one T1W file for subject {self.subject_id} "
- f"at site {self.site}. Discarding the others.\n\n"
- )
-
- t1w = input_files['t1w']
-
- # Take only the first freesurfer directory
- freesurfer_dirs = {
- f.split('/freesurfer/')[0] for f in input_files['freesurfer']
- }
-
- if len(freesurfer_dirs) > 1:
- mod_logger.warning(
- f"Found more than one freesurfer directory for subject "
- f"{self.subject_id} at site {self.site}. Discarding the "
- f"others.\n\n"
- )
-
- freesurfer_dir = freesurfer_dirs.pop()
- freesurfer = [f for f in input_files['freesurfer']
- if f.startswith(freesurfer_dir)]
-
- # Organize the files by session ID
- def get_sess_id(filename, fallback='null'):
- # Retrieve the session ID from a filename
- match = re.search('/ses-[0-9a-zA-Z]*/', filename)
- if match is not None:
- return match.group().strip('/')
- else:
- return fallback
-
- ftypes = ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']
-
- sess_ids = {ft: {get_sess_id(fn) for fn in input_files[ft]}
- for ft in ftypes}
-
- if not all([s == list(sess_ids.values())[0]
- for s in sess_ids.values()]):
- mod_logger.warning(
- "Session numbers are inconsistent for subject {sub:s} at site "
- "{site:s}. Ses-IDs: {sess_ids!s}.\n"
- "Files: {files!s}\n\n".format(
- sub=self.subject_id,
- site=self.site,
- sess_ids=sess_ids,
- files={
- k: (v) for k, v in input_files.items()
- if k in ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']
- },
- )
- )
- return
-
- # We just confirmed that all of the session ID sets are equal so we can
- # pop one set of session IDs off of `sess_ids` and use it from now on
- sess_ids = sess_ids[ftypes[0]]
-
- # Collect files by session ID and then file type
- files_by_session = {
- sess: {
- ft: [
- f for f in input_files[ft] if get_sess_id(f) == sess
- ]
- for ft in ftypes
- }
- for sess in sess_ids
- }
-
- output_files = {}
-
- # Loop over each session ID
- for session, files in files_by_session.items():
- # Confirm that the subject has an equal number of each type of file
- n_files = {k: len(v) for k, v in files.items()
- if k in ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']}
-
- if len(set(n_files.values())) != 1:
- mod_logger.warning(
- f"The number of files is inconsistent for subject "
- f"{self.subject_id} at site {self.site}. The file numbers "
- f"are {n_files!s}\n\n"
- )
- elif len(set(n_files.values())) == 1:
- # There is only one set of files in this session.
- # Append to output.
- if session == 'null':
- output_session = 'ses-01' if assign_empty_sessions else None
- else:
- output_session = session
-
- output_files[output_session] = dict(
- dwi=input_files['dwi'],
- bvec=input_files['bvec'],
- bval=input_files['bval'],
- epi_ap=input_files['epi_ap'],
- epi_pa=input_files['epi_pa'],
- json=input_files['json'],
- t1w=t1w,
- freesurfer=freesurfer,
- )
- else:
- # There are multiple copies of files for this one session ID.
- if multiples_policy == 'concatenate':
- # The multiple copies represent one session and should be
- # concatenated
- raise NotImplementedError("Concatenation of multiples not "
- "yet implemented.")
- else:
- # The multiple copies represent multiple sessions and
- # should be further subdivided into sessions
- raise NotImplementedError("Session subdivision not yet "
- "implemented.")
-
- return output_files
diff --git a/dmriprep/workflows/_afq/dmriprep.py b/dmriprep/workflows/_afq/dmriprep.py
deleted file mode 100644
index 47911d9e..00000000
--- a/dmriprep/workflows/_afq/dmriprep.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""Main module."""
-
-import logging
-import os
-import os.path as op
-import subprocess
-
-from .run import run_dmriprep
-
-
-mod_logger = logging.getLogger(__name__)
diff --git a/dmriprep/workflows/_afq/io.py b/dmriprep/workflows/_afq/io.py
deleted file mode 100644
index a1543873..00000000
--- a/dmriprep/workflows/_afq/io.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""
-BIDS-functions to return inputs for the run.py functions.
-
-"""
-import os
-import os.path as op
-from glob import glob
-
-import bids
-
-
-def get_bids_subject_input_files(subject_id, bids_input_directory):
- """
- Function to return the needed files for dmriprep based on subject id and a bids directory.
-
- :param subject_id: string
- :param bids_input_directory: string to bids dir
- :return: dict of inputs
- """
- layout = bids.layout.BIDSLayout(bids_input_directory, validate=False)
- subjects = layout.get_subjects()
- assert subject_id in subjects, "subject {} is not in the bids folder".format(subject_id)
-
- ap_file = layout.get(subject=subject_id,
- datatype='fmap',
- suffix='epi',
- dir='AP',
- extensions=['.nii', '.nii.gz'])
- assert len(ap_file) == 1, 'found {} ap fieldmap files and we need just 1'.format(len(ap_file))
-
- pa_file = layout.get(subject=subject_id,
- datatype='fmap',
- suffix='epi',
- dir='PA',
- extensions=['.nii', '.nii.gz'])
- assert len(pa_file) == 1, 'found {} pa fieldmap files and we need just 1'.format(len(pa_file))
-
- dwi_files = layout.get(subject=subject_id, datatype='dwi', suffix='dwi')
- valid_dwi_files = []
-
- for d in dwi_files:
- if d.path.startswith(op.abspath(op.join(bids_input_directory, 'sub-' + subject_id))):
- valid_dwi_files.append(d.path)
-
- dwi_file = [d for d in valid_dwi_files if d.endswith('.nii.gz') and not "TRACE" in d]
- assert len(dwi_file) == 1, 'found {} dwi files and we need just 1'.format(len(dwi_file))
-
- bval_file = [d.path for d in dwi_files if d.filename.endswith('.bval')]
- assert len(bval_file) == 1, 'found {} bval files and we need just 1'.format(len(bval_file))
-
- bvec_file = [d.path for d in dwi_files if d.filename.endswith('.bvec')]
- assert len(bvec_file) == 1, 'found {} bvec files and we need just 1'.format(len(bvec_file))
-
- subjects_dir = op.join(bids_input_directory, 'derivatives', 'sub-'+subject_id)
-
- if not op.exists(op.join(subjects_dir, 'freesurfer')):
- raise NotImplementedError('we have not yet implemented a version of dmriprep that runs freesurfer for you.'
- 'please run freesurfer and try again'
- )
-
- outputs = dict(subject_id="sub-"+subject_id,
- dwi_file=dwi_file[0],
- dwi_file_AP=ap_file[0].path,
- dwi_file_PA=pa_file[0].path,
- bvec_file=bvec_file[0],
- bval_file=bval_file[0],
- subjects_dir=op.abspath(subjects_dir))
- return outputs
-
-
-def get_bids_files(subject_id, bids_input_directory):
- """
- subject to get all bids files for am optional subject id and bids dir. if subject id is blank then all subjects
- are used.
- :param subject_id:
- :param bids_input_directory:
- :return:
- """
- if not subject_id:
- subjects = [s.split("/")[-1].replace("sub-", "") for s in glob(os.path.join(bids_input_directory, "sub-*"))]
- assert len(subjects), "No subject files found in bids directory"
- return [get_bids_subject_input_files(sub, bids_input_directory) for sub in subjects]
- else:
- return [get_bids_subject_input_files(subject_id, bids_input_directory)]
diff --git a/dmriprep/workflows/_afq/qc.py b/dmriprep/workflows/_afq/qc.py
deleted file mode 100644
index b6663f22..00000000
--- a/dmriprep/workflows/_afq/qc.py
+++ /dev/null
@@ -1,238 +0,0 @@
-import base64
-import os.path as op
-from io import BytesIO
-
-import matplotlib
-matplotlib.use('agg')
-import matplotlib.pyplot as plt
-import nibabel as nib
-import numpy as np
-
-from dipy.segment.mask import median_otsu
-from nipype.utils.filemanip import save_json, load_json
-
-
-def reorient_array(data, aff):
- # rearrange the matrix to RAS orientation
- orientation = nib.orientations.io_orientation(aff)
- data_RAS = nib.orientations.apply_orientation(data, orientation)
- # In RAS
- return nib.orientations.apply_orientation(
- data_RAS,
- nib.orientations.axcodes2ornt("IPL")
- )
-
-
-def mplfig(data, outfile=None, as_bytes=False):
- fig = plt.figure(frameon=False, dpi=data.shape[0])
- fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
- ax = plt.Axes(fig, [0., 0., 1., 1.])
- ax.set_axis_off()
- fig.add_axes(ax)
- ax.imshow(data, aspect=1, cmap=plt.cm.Greys_r) # previous aspect="normal"
- if outfile:
- fig.savefig(outfile, dpi=data.shape[0], transparent=True)
- plt.close()
- return outfile
- if as_bytes:
- IObytes = BytesIO()
- plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
- IObytes.seek(0)
- base64_jpgData = base64.b64encode(IObytes.read())
- return base64_jpgData.decode("ascii")
-
-
-def mplfigcontour(data, outfile=None, as_bytes=False):
- fig = plt.figure(frameon=False)
- fig.set_size_inches(float(data.shape[1])/data.shape[0], 1)
- ax = plt.Axes(fig, [0., 0., 1., 1.])
- ax.set_axis_off()
- fig.add_axes(ax)
-
- bg = np.zeros(data.shape)
- bg[:] = np.nan
- ax.imshow(bg, aspect=1, cmap=plt.cm.Greys_r) # used to be aspect="normal"
- ax.contour(data, colors="red", linewidths=0.1)
- if outfile:
- fig.savefig(outfile, dpi=data.shape[0], transparent=True)
- plt.close()
- return outfile
- if as_bytes:
- IObytes = BytesIO()
- plt.savefig(IObytes, format='png', dpi=data.shape[0], transparent=True)
- IObytes.seek(0)
- base64_jpgData = base64.b64encode(IObytes.read())
- return base64_jpgData.decode("ascii")
-
-
-def load_and_reorient(filename):
- img = nib.load(filename)
- data, aff = img.get_data(), img.affine
- data = reorient_array(data, aff)
- return data
-
-
-def reshape3D(data, n=256):
- return np.pad(data, (
- (
- (n-data.shape[0]) // 2,
- ((n-data.shape[0]) + (data.shape[0] % 2 > 0)) // 2
- ),
- (
- (n-data.shape[1]) // 2,
- ((n-data.shape[1]) + (data.shape[1] % 2 > 0)) // 2
- ),
- (0, 0)
- ), "constant", constant_values=(0, 0))
-
-
-def reshape4D(data, n=256):
- return np.pad(data, (
- (
- (n-data.shape[0]) // 2,
- ((n-data.shape[0]) + (data.shape[0] % 2 > 0)) // 2
- ),
- (
- (n-data.shape[1]) // 2,
- ((n-data.shape[1]) + (data.shape[1] % 2 > 0)) // 2
- ),
- (0, 0), (0, 0)
- ), "constant", constant_values=(0, 0))
-
-
-def get_middle_slices(data, slice_direction):
- slicer = {"ax": 0, "cor": 1, "sag": 2}
- all_data_slicer = [slice(None), slice(None), slice(None)]
- num_slices = data.shape[slicer[slice_direction]]
- slice_num = int(num_slices / 2)
- all_data_slicer[slicer[slice_direction]] = slice_num
- tile = data[tuple(all_data_slicer)]
-
- # make it a square
- N = max(tile.shape[:2])
- tile = reshape3D(tile, N)
-
- return tile
-
-
-def nearest_square(limit):
- answer = 0
- while (answer+1)**2 < limit:
- answer += 1
- if (answer ** 2) == limit:
- return answer
- else:
- return answer + 1
-
-
-def create_sprite_from_tiles(tile, out_file=None, as_bytes=False):
- num_slices = tile.shape[-1]
- N = nearest_square(num_slices)
- M = int(np.ceil(num_slices/N))
- # tile is square, so just make a big arr
- pix = tile.shape[0]
-
- if len(tile.shape) == 3:
- mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0]))
- else:
- mosaic = np.zeros((N*tile.shape[0], M*tile.shape[0], tile.shape[-2]))
-
- mosaic[:] = np.nan
- helper = np.arange(N*M).reshape((N, M))
-
- for t in range(num_slices):
- x, y = np.nonzero(helper == t)
- xmin = x[0] * pix
- xmax = (x[0] + 1) * pix
- ymin = y[0] * pix
- ymax = (y[0] + 1) * pix
-
- if len(tile.shape) == 3:
- mosaic[xmin:xmax, ymin:ymax] = tile[:, :, t]
- else:
- mosaic[xmin:xmax, ymin:ymax, :] = tile[:, :, :, t]
-
- if as_bytes:
- img = mplfig(mosaic, out_file, as_bytes=as_bytes)
- return dict(img=img, N=N, M=M, pix=pix, num_slices=num_slices)
-
- if out_file:
- img = mplfig(mosaic, out_file), N, M, pix, num_slices
-
- return dict(mosaic=mosaic, N=N, M=M, pix=pix, num_slices=num_slices)
-
-
-def createSprite4D(dwi_file):
-
- # initialize output dict
- output = []
-
- # load the file
- dwi = load_and_reorient(dwi_file)[:, :, :, 1:]
-
- # create tiles from center slice on each orientation
- for orient in ['sag', 'ax', 'cor']:
- tile = get_middle_slices(dwi, orient)
-
- # create sprite images for each
- results = create_sprite_from_tiles(tile, as_bytes=True)
- results['img_type'] = '4dsprite'
- results['orientation'] = orient
- output.append(results)
-
- return output
-
-
-def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file):
- colorfa = load_and_reorient(colorFA_file)
- b0 = load_and_reorient(b0_file)[:, :, :, 0]
- anat_mask = load_and_reorient(mask_file)
-
- N = max(*b0.shape[:2])
-
- # make a b0 sprite
- b0 = reshape3D(b0, N)
- _, mask = median_otsu(b0)
- outb0 = create_sprite_from_tiles(b0, as_bytes=True)
- outb0['img_type'] = 'brainsprite'
-
- # make a colorFA sprite, masked by b0
- Q = reshape4D(colorfa, N)
- Q[np.logical_not(mask)] = np.nan
- Q = np.moveaxis(Q, -2, -1)
- outcolorFA = create_sprite_from_tiles(Q, as_bytes=True)
- outcolorFA['img_type'] = 'brainsprite'
-
- # make an anat mask contour sprite
- outmask = create_sprite_from_tiles(reshape3D(anat_mask, N))
- img = mplfigcontour(outmask.pop("mosaic"), as_bytes=True)
- outmask['img'] = img
-
- return outb0, outcolorFA, outmask
-
-
-def create_report_json(dwi_corrected_file, eddy_rms, eddy_report,
- color_fa_file, anat_mask_file,
- outlier_indices,
- eddy_qc_file,
- outpath=op.abspath('./report.json')):
-
- report = {}
- report['dwi_corrected'] = createSprite4D(dwi_corrected_file)
-
- b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(dwi_corrected_file,
- color_fa_file,
- anat_mask_file)
- report['b0'] = b0
- report['colorFA'] = colorFA
- report['anat_mask'] = mask
- report['outlier_volumes'] = outlier_indices.tolist()
-
- with open(eddy_report, 'r') as f:
- report['eddy_report'] = f.readlines()
-
- report['eddy_params'] = np.genfromtxt(eddy_rms).tolist()
- eddy_qc = load_json(eddy_qc_file)
- report['eddy_quad'] = eddy_qc
- save_json(outpath, report)
- return outpath
diff --git a/dmriprep/workflows/_afq/run.py b/dmriprep/workflows/_afq/run.py
deleted file mode 100644
index 9ac70d8b..00000000
--- a/dmriprep/workflows/_afq/run.py
+++ /dev/null
@@ -1,855 +0,0 @@
-import os
-import os.path as op
-from shutil import copyfile
-
-
-def run_dmriprep(dwi_file, bvec_file, bval_file,
- subjects_dir, working_dir, out_dir):
-
- """
- Runs dmriprep for acquisitions with just one PE direction.
-
- """
- from glob import glob
- import nibabel as nib
- import nipype.interfaces.freesurfer as fs
- import nipype.interfaces.fsl as fsl
- import nipype.interfaces.io as nio
- import nipype.interfaces.utility as niu
- import nipype.pipeline.engine as pe
- import numpy as np
- from nipype.algorithms.rapidart import ArtifactDetect
- from nipype.interfaces.dipy import DTI
- from nipype.interfaces.fsl.utils import AvScale
- from nipype.utils.filemanip import fname_presuffix
- from nipype.workflows.dmri.fsl.epi import create_dmri_preprocessing
-
- wf = create_dmri_preprocessing(name='dmriprep',
- use_fieldmap=False,
- fieldmap_registration=False)
- wf.inputs.inputnode.ref_num = 0
- wf.inputs.inputnode.in_file = dwi_file
- wf.inputs.inputnode.in_bvec = bvec_file
-
- dwi_fname = op.split(dwi_file)[1].split(".nii.gz")[0]
- bids_sub_name = dwi_fname.split("_")[0]
- assert bids_sub_name.startswith("sub-")
-
- # inputnode = wf.get_node("inputnode")
- outputspec = wf.get_node("outputnode")
-
- # QC: FLIRT translation and rotation parameters
- flirt = wf.get_node("motion_correct.coregistration")
- # flirt.inputs.save_mats = True
-
- get_tensor = pe.Node(DTI(), name="dipy_tensor")
- wf.connect(outputspec, "dmri_corrected", get_tensor, "in_file")
- # wf.connect(inputspec2,"bvals", get_tensor, "in_bval")
- get_tensor.inputs.in_bval = bval_file
- wf.connect(outputspec, "bvec_rotated", get_tensor, "in_bvec")
-
- scale_tensor = pe.Node(name='scale_tensor', interface=fsl.BinaryMaths())
- scale_tensor.inputs.operation = 'mul'
- scale_tensor.inputs.operand_value = 1000
- wf.connect(get_tensor, 'out_file', scale_tensor, 'in_file')
-
- fslroi = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name="fslroi")
- wf.connect(outputspec, "dmri_corrected", fslroi, "in_file")
-
- bbreg = pe.Node(fs.BBRegister(contrast_type="t2", init="fsl",
- out_fsl_file=True, subjects_dir=subjects_dir,
- epi_mask=True), name="bbreg")
- # wf.connect(inputspec2,"fsid", bbreg,"subject_id")
- bbreg.inputs.subject_id = 'freesurfer' # bids_sub_name
- wf.connect(fslroi, "roi_file", bbreg, "source_file")
-
- voltransform = pe.Node(fs.ApplyVolTransform(inverse=True),
- iterfield=['source_file', 'reg_file'],
- name='transform')
- voltransform.inputs.subjects_dir = subjects_dir
-
- vt2 = voltransform.clone("transform_aparcaseg")
- vt2.inputs.interp = "nearest"
-
- def binarize_aparc(aparc_aseg):
- img = nib.load(aparc_aseg)
- data, aff = img.get_data(), img.affine
- outfile = fname_presuffix(
- aparc_aseg, suffix="bin.nii.gz",
- newpath=op.abspath("."), use_ext=False
- )
- nib.Nifti1Image((data > 0).astype(float), aff).to_filename(outfile)
- return outfile
-
- # wf.connect(inputspec2, "mask_nii", voltransform, "target_file")
- create_mask = pe.Node(niu.Function(input_names=["aparc_aseg"],
- output_names=["outfile"],
- function=binarize_aparc),
- name="bin_aparc")
-
- def get_aparc_aseg(subjects_dir, sub):
- return op.join(subjects_dir, sub, "mri", "aparc+aseg.mgz")
-
- create_mask.inputs.aparc_aseg = get_aparc_aseg(subjects_dir, 'freesurfer')
- wf.connect(create_mask, "outfile", voltransform, "target_file")
-
- wf.connect(fslroi, "roi_file", voltransform, "source_file")
- wf.connect(bbreg, "out_reg_file", voltransform, "reg_file")
-
- vt2.inputs.target_file = get_aparc_aseg(subjects_dir, 'freesurfer')
- # wf.connect(inputspec2, "aparc_aseg", vt2, "target_file")
- wf.connect(fslroi, "roi_file", vt2, "source_file")
- wf.connect(bbreg, "out_reg_file", vt2, "reg_file")
-
- # AK (2017): THIS NODE MIGHT NOT BE NECESSARY
- # AK (2018) doesn't know why she said that above..
- threshold2 = pe.Node(fs.Binarize(min=0.5, out_type='nii.gz', dilate=1),
- iterfield=['in_file'],
- name='threshold2')
- wf.connect(voltransform, "transformed_file", threshold2, "in_file")
-
- # wf.connect(getmotion, "motion_params", datasink, "dti.@motparams")
-
- def get_flirt_motion_parameters(flirt_out_mats):
- def get_params(A):
- """This is a copy of spm's spm_imatrix where
- we already know the rotations and translations matrix,
- shears and zooms (as outputs from fsl FLIRT/avscale)
- Let A = the 4x4 rotation and translation matrix
- R = [ c5*c6, c5*s6, s5]
- [-s4*s5*c6-c4*s6, -s4*s5*s6+c4*c6, s4*c5]
- [-c4*s5*c6+s4*s6, -c4*s5*s6-s4*c6, c4*c5]
- """
- def rang(b):
- a = min(max(b, -1), 1)
- return a
- Ry = np.arcsin(A[0, 2])
- # Rx = np.arcsin(A[1, 2] / np.cos(Ry))
- # Rz = np.arccos(A[0, 1] / np.sin(Ry))
-
- if (abs(Ry)-np.pi/2)**2 < 1e-9:
- Rx = 0
- Rz = np.arctan2(-rang(A[1, 0]), rang(-A[2, 0]/A[0, 2]))
- else:
- c = np.cos(Ry)
- Rx = np.arctan2(rang(A[1, 2]/c), rang(A[2, 2]/c))
- Rz = np.arctan2(rang(A[0, 1]/c), rang(A[0, 0]/c))
-
- rotations = [Rx, Ry, Rz]
- translations = [A[0, 3], A[1, 3], A[2, 3]]
-
- return rotations, translations
-
- motion_params = open(op.abspath('motion_parameters.par'), 'w')
- for mat in flirt_out_mats:
- res = AvScale(mat_file=mat).run()
- A = np.asarray(res.outputs.rotation_translation_matrix)
- rotations, translations = get_params(A)
- for i in rotations+translations:
- motion_params.write('%f ' % i)
- motion_params.write('\n')
- motion_params.close()
- motion_params = op.abspath('motion_parameters.par')
- return motion_params
-
- getmotion = pe.Node(
- niu.Function(input_names=["flirt_out_mats"],
- output_names=["motion_params"],
- function=get_flirt_motion_parameters),
- name="get_motion_parameters",
- iterfield="flirt_out_mats"
- )
-
- wf.connect(flirt, "out_matrix_file", getmotion, "flirt_out_mats")
-
- art = pe.Node(interface=ArtifactDetect(), name="art")
- art.inputs.use_differences = [True, True]
- art.inputs.save_plot = False
- art.inputs.use_norm = True
- art.inputs.norm_threshold = 3
- art.inputs.zintensity_threshold = 9
- art.inputs.mask_type = 'spm_global'
- art.inputs.parameter_source = 'FSL'
-
- wf.connect(getmotion, "motion_params", art, "realignment_parameters")
- wf.connect(outputspec, "dmri_corrected", art, "realigned_files")
-
- datasink = pe.Node(nio.DataSink(), name="sinker")
- datasink.inputs.base_directory = out_dir
- datasink.inputs.substitutions = [
- ("vol0000_flirt_merged.nii.gz", dwi_fname + '.nii.gz'),
- ("stats.vol0000_flirt_merged.txt", dwi_fname + ".art.json"),
- ("motion_parameters.par", dwi_fname + ".motion.txt"),
- ("_rotated.bvec", ".bvec"),
- ("aparc+aseg_warped_out", dwi_fname.replace("_dwi", "_aparc+aseg")),
- ("art.vol0000_flirt_merged_outliers.txt", dwi_fname + ".outliers.txt"),
- ("vol0000_flirt_merged", dwi_fname),
- ("_roi_bbreg_freesurfer", "_register"),
- ("aparc+asegbin_warped_thresh", dwi_fname.replace("_dwi", "_mask")),
- ("derivatives/dmriprep", "derivatives/{}/dmriprep".format(bids_sub_name))
- ]
-
- wf.connect(art, "statistic_files", datasink, "dmriprep.art.@artstat")
- wf.connect(art, "outlier_files", datasink, "dmriprep.art.@artoutlier")
- wf.connect(outputspec, "dmri_corrected", datasink, "dmriprep.dwi.@corrected")
- wf.connect(outputspec, "bvec_rotated", datasink, "dmriprep.dwi.@rotated")
- wf.connect(getmotion, "motion_params", datasink, "dmriprep.art.@motion")
-
- wf.connect(get_tensor, "out_file", datasink, "dmriprep.dti.@tensor")
- wf.connect(get_tensor, "fa_file", datasink, "dmriprep.dti.@fa")
- wf.connect(get_tensor, "md_file", datasink, "dmriprep.dti.@md")
- wf.connect(get_tensor, "ad_file", datasink, "dmriprep.dti.@ad")
- wf.connect(get_tensor, "rd_file", datasink, "dmriprep.dti.@rd")
- wf.connect(get_tensor, "out_file", datasink, "dmriprep.dti.@scaled_tensor")
-
- wf.connect(bbreg, "min_cost_file", datasink, "dmriprep.reg.@mincost")
- wf.connect(bbreg, "out_fsl_file", datasink, "dmriprep.reg.@fslfile")
- wf.connect(bbreg, "out_reg_file", datasink, "dmriprep.reg.@reg")
- wf.connect(threshold2, "binary_file", datasink, "dmriprep.anat.@mask")
- # wf.connect(vt2, "transformed_file", datasink, "dwi.@aparc_aseg")
-
- convert = pe.Node(fs.MRIConvert(out_type="niigz"), name="convert2nii")
- wf.connect(vt2, "transformed_file", convert, "in_file")
- wf.connect(convert, "out_file", datasink, "dmriprep.anat.@aparc_aseg")
-
- wf.base_dir = working_dir
- wf.run()
-
- copyfile(bval_file, op.join(
- out_dir, bids_sub_name, "dmriprep", "dwi",
- op.split(bval_file)[1]
- ))
-
- dmri_corrected = glob(op.join(out_dir, '*/dmriprep/dwi', '*.nii.gz'))[0]
- bvec_rotated = glob(op.join(out_dir, '*/dmriprep/dwi', '*.bvec'))[0]
- bval_file = glob(op.join(out_dir, '*/dmriprep/dwi', '*.bval'))[0]
- art_file = glob(op.join(out_dir, '*/dmriprep/art', '*.art.json'))[0]
- motion_file = glob(op.join(out_dir, '*/dmriprep/art', '*.motion.txt'))[0]
- outlier_file = glob(op.join(out_dir, '*/dmriprep/art', '*.outliers.txt'))[0]
- return dmri_corrected, bvec_rotated, art_file, motion_file, outlier_file
-
-
-def run_dmriprep_pe(subject_id, dwi_file, dwi_file_AP, dwi_file_PA,
- bvec_file, bval_file,
- subjects_dir, working_dir, out_dir,
- eddy_niter=5, slice_outlier_threshold=0.02):
- """Run the dmriprep (phase encoded) nipype workflow
-
- Parameters
- ----------
- subject_id : str
- Subject identifier
-
- dwi_file : str
- Path to dwi nifti file
-
- dwi_file_AP : str
- Path to EPI nifti file (anterior-posterior)
-
- dwi_file_PA : str
- Path to EPI nifti file (posterior-anterior)
-
- bvec_file : str
- Path to bvec file
-
- bval_file : str
- Path to bval file
-
- subjects_dir : str
- Path to subject's freesurfer directory
-
- working_dir : str
- Path to workflow working directory
-
- out_dir : str
- Path to output directory
-
- eddy_niter : int, default=5
- Fixed number of eddy iterations. See
- https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/UsersGuide#A--niter
-
- slice_outlier_threshold: int or float
- Number of allowed outlier slices per volume. If this is exceeded the
- volume is dropped from analysis. If `slice_outlier_threshold` is an
- int, it is treated as number of allowed outlier slices. If
- `slice_outlier_threshold` is a float between 0 and 1 (exclusive), it is
- treated the fraction of allowed outlier slices.
-
- Notes
- -----
- This assumes that there are scans with phase-encode directions AP/PA for
- topup.
-
- See Also
- --------
- dmriprep.run.get_dmriprep_pe_workflow
- """
- wf = get_dmriprep_pe_workflow()
- wf.base_dir = op.join(op.abspath(working_dir), subject_id)
-
- inputspec = wf.get_node('inputspec')
- inputspec.inputs.subject_id = subject_id
- inputspec.inputs.dwi_file = dwi_file
- inputspec.inputs.dwi_file_ap = dwi_file_AP
- inputspec.inputs.dwi_file_pa = dwi_file_PA
- inputspec.inputs.bvec_file = bvec_file
- inputspec.inputs.bval_file = bval_file
- inputspec.inputs.subjects_dir = subjects_dir
- inputspec.inputs.out_dir = op.abspath(out_dir)
- inputspec.inputs.eddy_niter = eddy_niter
- inputspec.inputs.slice_outlier_threshold = slice_outlier_threshold
-
- # write the graph (this is saved to the working dir)
- wf.write_graph()
- wf.config['execution']['remove_unnecessary_outputs'] = False
- wf.config['execution']['keep_inputs'] = True
- wf.run()
-
-
-def get_dmriprep_pe_workflow():
- """Return the dmriprep (phase encoded) nipype workflow
-
- Parameters
- ----------
-
-
- Returns
- -------
- wf : nipype.pipeline.engine.Workflow
- Nipype dmriprep workflow
-
- Notes
- -----
- This assumes that there are scans with phase-encode directions AP/PA for
- topup.
- """
- import nipype.interfaces.freesurfer as fs
- import nipype.interfaces.fsl as fsl
- import nipype.interfaces.io as nio
- import nipype.interfaces.utility as niu
- import nipype.pipeline.engine as pe
- from nipype.interfaces.dipy import DTI
- from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline
-
- inputspec = pe.Node(niu.IdentityInterface(fields=[
- 'subject_id',
- 'dwi_file',
- 'dwi_file_ap',
- 'dwi_file_pa',
- 'bvec_file',
- 'bval_file',
- 'subjects_dir',
- 'out_dir',
- 'eddy_niter',
- 'slice_outlier_threshold'
- ]), name="inputspec")
-
- # AK: watch out, other datasets might be encoded LR
- epi_ap = {'echospacing': 66.5e-3, 'enc_dir': 'y-'}
- epi_pa = {'echospacing': 66.5e-3, 'enc_dir': 'y'}
- prep = all_fsl_pipeline(epi_params=epi_ap, altepi_params=epi_pa)
-
- # initialize an overall workflow
- wf = pe.Workflow(name="dmriprep")
-
- wf.connect(inputspec, 'dwi_file', prep, 'inputnode.in_file')
- wf.connect(inputspec, 'bvec_file', prep, 'inputnode.in_bvec')
- wf.connect(inputspec, 'bval_file', prep, 'inputnode.in_bval')
- wf.connect(inputspec, 'eddy_niter', prep, 'fsl_eddy.niter')
-
- eddy = prep.get_node('fsl_eddy')
- eddy.inputs.repol = True
- eddy.inputs.cnr_maps = True
- eddy.inputs.residuals = True
- import multiprocessing
- eddy.inputs.num_threads = multiprocessing.cpu_count()
- import numba.cuda
- try:
- if numba.cuda.gpus:
- eddy.inputs.use_cuda = True
- except:
- eddy.inputs.use_cuda = False
-
- topup = prep.get_node('peb_correction.topup')
- topup.inputs.checksize = True
-
- applytopup = prep.get_node('peb_correction.unwarp')
- applytopup.inputs.checksize = True
-
- eddy.inputs.checksize = True
-
- eddy_quad = pe.Node(fsl.EddyQuad(verbose=True, checksize=True), name="eddy_quad")
- get_path = lambda x: x.split('.nii.gz')[0].split('_fix')[0]
- get_qc_path = lambda x: x.split('.nii.gz')[0] + '.qc'
- wf.connect(prep, ('fsl_eddy.out_corrected', get_path), eddy_quad, 'base_name')
- wf.connect(inputspec, 'bval_file', eddy_quad, 'bval_file')
- wf.connect(prep, 'Rotate_Bvec.out_file', eddy_quad, 'bvec_file')
- wf.connect(prep, 'peb_correction.topup.out_field', eddy_quad, 'field')
- wf.connect(prep, 'gen_index.out_file', eddy_quad, 'idx_file')
- wf.connect(prep, 'peb_correction.topup.out_enc_file', eddy_quad, 'param_file')
- wf.connect(prep, ('fsl_eddy.out_corrected', get_qc_path), eddy_quad, 'output_dir')
-
- # need a mask file for eddy_quad. lets get it from the B0.
- def get_b0_mask_fn(b0_file):
- import nibabel as nib
- from nipype.utils.filemanip import fname_presuffix
- from dipy.segment.mask import median_otsu
- import os
-
- mask_file = fname_presuffix(b0_file, suffix="_mask", newpath=os.path.abspath('.'))
- img = nib.load(b0_file)
- data, aff = img.get_data(), img.affine
- _, mask = median_otsu(data, 2, 1)
- nib.Nifti1Image(mask.astype(float), aff).to_filename(mask_file)
- return mask_file
-
-
- def id_outliers_fn(outlier_report, threshold, dwi_file):
- """Get list of scans that exceed threshold for number of outliers
-
- Parameters
- ----------
- outlier_report: string
- Path to the fsl_eddy outlier report
-
- threshold: int or float
- If threshold is an int, it is treated as number of allowed outlier
- slices. If threshold is a float between 0 and 1 (exclusive), it is
- treated the fraction of allowed outlier slices before we drop the
- whole volume.
-
- dwi_file: string
- Path to nii dwi file to determine total number of slices
-
- Returns
- -------
- drop_scans: numpy.ndarray
- List of scan indices to drop
- """
- import nibabel as nib
- import numpy as np
- import os.path as op
- import parse
-
- with open(op.abspath(outlier_report), 'r') as fp:
- lines = fp.readlines()
-
- p = parse.compile(
- "Slice {slice:d} in scan {scan:d} is an outlier with "
- "mean {mean_sd:f} standard deviations off, and mean "
- "squared {mean_sq_sd:f} standard deviations off."
- )
-
- outliers = [p.parse(l).named for l in lines]
- scans = {d['scan'] for d in outliers}
-
- def num_outliers(scan, outliers):
- return len([d for d in outliers if d['scan'] == scan])
-
- if 0 < threshold < 1:
- img = nib.load(dwi_file)
- try:
- threshold *= img.header.get_n_slices()
- except nib.spatialimages.HeaderDataError:
- print('WARNING. We are not sure which dimension has the '
- 'slices in this image. So we are using the 3rd dim.', img.shape)
- threshold *= img.shape[2]
-
- drop_scans = np.array([
- s for s in scans
- if num_outliers(s, outliers) > threshold
- ])
-
- outpath = op.abspath("dropped_scans.txt")
- np.savetxt(outpath, drop_scans, fmt="%d")
-
- return drop_scans, outpath
-
- id_outliers_node = pe.Node(niu.Function(
- input_names=["outlier_report", "threshold", "dwi_file"],
- output_names=["drop_scans", "outpath"],
- function=id_outliers_fn),
- name="id_outliers_node"
- )
-
- wf.connect(inputspec, 'dwi_file', id_outliers_node, 'dwi_file')
- wf.connect(inputspec, 'slice_outlier_threshold', id_outliers_node, 'threshold')
-
- wf.connect(prep, "fsl_eddy.out_outlier_report",
- id_outliers_node, "outlier_report")
-
- list_merge = pe.Node(niu.Merge(numinputs=2), name="list_merge")
- wf.connect(inputspec, 'dwi_file_ap', list_merge, 'in1')
- wf.connect(inputspec, 'dwi_file_pa', list_merge, 'in2')
-
- merge = pe.Node(fsl.Merge(dimension='t'), name="mergeAPPA")
- wf.connect(merge, 'merged_file', prep, 'inputnode.alt_file')
- wf.connect(list_merge, 'out', merge, 'in_files')
-
- fslroi = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name="fslroi")
- wf.connect(prep, "outputnode.out_file", fslroi, "in_file")
-
- b0mask_node = pe.Node(niu.Function(input_names=['b0_file'],
- output_names=['mask_file'],
- function=get_b0_mask_fn),
- name="getB0Mask")
- wf.connect(fslroi, 'roi_file', b0mask_node, 'b0_file')
- wf.connect(b0mask_node, 'mask_file', eddy_quad, 'mask_file')
-
- bbreg = pe.Node(fs.BBRegister(contrast_type="t2", init="coreg",
- out_fsl_file=True,
- # subjects_dir=subjects_dir,
- epi_mask=True),
- name="bbreg")
- bbreg.inputs.subject_id = 'freesurfer' # bids_sub_name
- wf.connect(fslroi, "roi_file", bbreg, "source_file")
- wf.connect(inputspec, 'subjects_dir', bbreg, 'subjects_dir')
-
- def drop_outliers_fn(in_file, in_bval, in_bvec, drop_scans):
- """Drop outlier volumes from dwi file
-
- Parameters
- ----------
- in_file: string
- Path to nii dwi file to drop outlier volumes from
-
- in_bval: string
- Path to bval file to drop outlier volumes from
-
- in_bvec: string
- Path to bvec file to drop outlier volumes from
-
- drop_scans: numpy.ndarray
- List of scan indices to drop
-
- Returns
- -------
- out_file: string
- Path to "thinned" output dwi file
-
- out_bval: string
- Path to "thinned" output bval file
-
- out_bvec: string
- Path to "thinned" output bvec file
- """
- import nibabel as nib
- import numpy as np
- import os.path as op
- from nipype.utils.filemanip import fname_presuffix
-
- img = nib.load(op.abspath(in_file))
- img_data = img.get_data()
- img_data_thinned = np.delete(img_data,
- drop_scans,
- axis=3)
- if isinstance(img, nib.nifti1.Nifti1Image):
- img_thinned = nib.Nifti1Image(img_data_thinned.astype(np.float64),
- img.affine,
- header=img.header)
- elif isinstance(img, nib.nifti2.Nifti2Image):
- img_thinned = nib.Nifti2Image(img_data_thinned.astype(np.float64),
- img.affine,
- header=img.header)
- else:
- raise TypeError("in_file does not contain Nifti image datatype.")
-
- out_file = fname_presuffix(in_file, suffix="_thinned", newpath=op.abspath('.'))
- nib.save(img_thinned, op.abspath(out_file))
-
- bval = np.loadtxt(in_bval)
- bval_thinned = np.delete(bval, drop_scans, axis=0)
- out_bval = fname_presuffix(in_bval, suffix="_thinned", newpath=op.abspath('.'))
- np.savetxt(out_bval, bval_thinned)
-
- bvec = np.loadtxt(in_bvec)
- bvec_thinned = np.delete(bvec, drop_scans, axis=1)
- out_bvec = fname_presuffix(in_bvec, suffix="_thinned", newpath=op.abspath('.'))
- np.savetxt(out_bvec, bvec_thinned)
-
- return out_file, out_bval, out_bvec
-
- drop_outliers_node = pe.Node(niu.Function(
- input_names=["in_file", "in_bval", "in_bvec", "drop_scans"],
- output_names=["out_file", "out_bval", "out_bvec"],
- function=drop_outliers_fn),
- name="drop_outliers_node"
- )
-
- # Align the output of drop_outliers_node & also the eddy corrected version to the anatomical space
- # without resampling. and then for aparc+aseg & the mask, resample to the larger voxel size of the B0 image from
- # fslroi. Also we need to apply the transformation to both bvecs (dropped & eddied) and I think we can just load
- # the affine from bbreg (sio.loadmat) and np.dot(coord, aff) for each coord in bvec
-
- def get_orig(subjects_dir, sub='freesurfer'):
- import os.path as op
- return op.join(subjects_dir, sub, "mri", "orig.mgz")
-
- def get_aparc_aseg(subjects_dir, sub='freesurfer'):
- import os.path as op
- return op.join(subjects_dir, sub, "mri", "aparc+aseg.mgz")
-
- # transform the dropped volume version to anat space w/ out resampling
- voltransform = pe.Node(fs.ApplyVolTransform(no_resample=True),
- iterfield=['source_file', 'reg_file'],
- name='transform')
-
- wf.connect(inputspec, 'subjects_dir', voltransform, 'subjects_dir')
- wf.connect(inputspec, ('subjects_dir', get_aparc_aseg), voltransform, 'target_file')
- wf.connect(prep, "outputnode.out_file", voltransform, "source_file")
- wf.connect(bbreg, "out_reg_file", voltransform, "reg_file")
-
- def apply_transform_to_bvecs_fn(bvec_file, reg_mat_file):
- import numpy as np
- import nipype.utils.filemanip as fm
- import os
-
- aff = np.loadtxt(reg_mat_file)
- bvecs = np.loadtxt(bvec_file)
- bvec_trans = []
- for bvec in bvecs.T:
- coord = np.hstack((bvec, [1]))
- coord_trans = np.dot(coord, aff)[:-1]
- bvec_trans.append(coord_trans)
- out_bvec = fm.fname_presuffix(bvec_file, suffix="anat_space", newpath=os.path.abspath('.'))
- np.savetxt(out_bvec, np.asarray(bvec_trans).T)
- return out_bvec
-
- apply_transform_to_bvecs_node = pe.Node(niu.Function(input_names=['bvec_file', 'reg_mat_file'],
- output_names=['out_bvec'],
- function=apply_transform_to_bvecs_fn),
- name="applyTransformToBvecs")
- wf.connect(bbreg, 'out_fsl_file', apply_transform_to_bvecs_node, 'reg_mat_file')
- wf.connect(prep, 'outputnode.out_bvec', apply_transform_to_bvecs_node, 'bvec_file')
-
- # ok cool, now lets do the thresholding.
-
- wf.connect(id_outliers_node, "drop_scans", drop_outliers_node, "drop_scans")
- wf.connect(voltransform, "transformed_file", drop_outliers_node, "in_file")
- wf.connect(inputspec, 'bval_file', drop_outliers_node, 'in_bval')
- wf.connect(apply_transform_to_bvecs_node, "out_bvec", drop_outliers_node, "in_bvec")
-
- # lets compute the tensor on both the dropped volume scan
- # and also the original, eddy corrected one.
- get_tensor = pe.Node(DTI(), name="dipy_tensor")
- wf.connect(drop_outliers_node, "out_file", get_tensor, "in_file")
- wf.connect(drop_outliers_node, "out_bval", get_tensor, "in_bval")
- wf.connect(drop_outliers_node, "out_bvec", get_tensor, "in_bvec")
-
- get_tensor_eddy = get_tensor.clone('dipy_tensor_eddy')
- wf.connect(voltransform, 'transformed_file', get_tensor_eddy, "in_file")
- wf.connect(apply_transform_to_bvecs_node, 'out_bvec', get_tensor_eddy, "in_bvec")
- wf.connect(inputspec, 'bval_file', get_tensor_eddy, 'in_bval')
-
- # AK: What is this, some vestigal node from a previous workflow?
- # I'm not sure why the tensor gets scaled. but i guess lets scale it for
- # both the dropped & eddy corrected versions.
- scale_tensor = pe.Node(name='scale_tensor', interface=fsl.BinaryMaths())
- scale_tensor.inputs.operation = 'mul'
- scale_tensor.inputs.operand_value = 1000
- wf.connect(get_tensor, 'out_file', scale_tensor, 'in_file')
-
- scale_tensor_eddy = scale_tensor.clone('scale_tensor_eddy')
- wf.connect(get_tensor_eddy, 'out_file', scale_tensor_eddy, 'in_file')
-
- # OK now that anatomical stuff (segmentation & mask)
- # We'll need:
- # 1. the B0 image in anat space (fslroi the 'transformed file' of voltransform
- # 2. the aparc aseg resampled-like the B0 image (mri_convert)
- # 3. the resample aparc_aseg binarized to be the mask image.
-
- def binarize_aparc(aparc_aseg):
- import nibabel as nib
- from nipype.utils.filemanip import fname_presuffix
- import os.path as op
-
- img = nib.load(aparc_aseg)
- data, aff = img.get_data(), img.affine
- outfile = fname_presuffix(
- aparc_aseg, suffix="bin.nii.gz",
- newpath=op.abspath("."), use_ext=False
- )
- nib.Nifti1Image((data > 0).astype(float), aff).to_filename(outfile)
- return outfile
-
- create_mask = pe.Node(niu.Function(input_names=["aparc_aseg"],
- output_names=["outfile"],
- function=binarize_aparc),
- name="bin_aparc")
-
- get_b0_anat = fslroi.clone('get_b0_anat')
- wf.connect(voltransform, 'transformed_file', get_b0_anat, 'in_file')
-
- # reslice the anat-space aparc+aseg to the DWI resolution
- reslice_to_dwi = pe.Node(fs.MRIConvert(resample_type="nearest"),
- name="reslice_to_dwi")
- wf.connect(get_b0_anat, 'roi_file', reslice_to_dwi, 'reslice_like')
- wf.connect(inputspec, ('subjects_dir', get_aparc_aseg), reslice_to_dwi, 'in_file')
-
- # also reslice the orig i suppose
- reslice_orig_to_dwi = reslice_to_dwi.clone('resliceT1wToDwi')
- wf.connect(inputspec, ('subjects_dir', get_orig), reslice_orig_to_dwi, 'in_file')
- # reslice_orig_to_dwi.inputs.in_file = get_orig(subjects_dir, 'freesurfer')
- reslice_orig_to_dwi.inputs.out_type = 'niigz'
- wf.connect(get_b0_anat, 'roi_file', reslice_orig_to_dwi, 'reslice_like')
-
- # we assume the freesurfer is the output of BIDS
- # so the freesurfer output is in /path/to/derivatives/sub-whatever/freesurfer
- # which means the subject_dir is /path/to/derivatives/sub-whatever
- # reslice_to_dwi.inputs.in_file = get_aparc_aseg(subjects_dir, 'freesurfer')
-
- # now we have a nice aparc+aseg still in anat space but resliced like the dwi file
- # lets create a mask file from it.
-
- wf.connect(reslice_to_dwi, 'out_file', create_mask, 'aparc_aseg')
-
- # save all the things
- datasink = pe.Node(nio.DataSink(), name="sinker")
- wf.connect(inputspec, 'out_dir', datasink, 'base_directory')
- wf.connect(inputspec, 'subject_id', datasink, 'container')
-
- wf.connect(drop_outliers_node, "out_file", datasink, "dmriprep.dwi.@thinned")
- wf.connect(drop_outliers_node, "out_bval", datasink, "dmriprep.dwi.@bval_thinned")
- wf.connect(drop_outliers_node, "out_bvec", datasink, "dmriprep.dwi.@bvec_thinned")
-
- # eddy corrected files
- wf.connect(prep, "outputnode.out_file", datasink, "dmriprep.dwi_eddy.@corrected")
- wf.connect(prep, "outputnode.out_bvec", datasink, "dmriprep.dwi_eddy.@rotated")
- wf.connect(inputspec, 'bval_file', datasink, 'dmriprep.dwi_eddy.@bval')
-
- # all the eddy stuff except the corrected files
- wf.connect(prep, "fsl_eddy.out_movement_rms",
- datasink, "dmriprep.qc.@eddyparamsrms")
- wf.connect(prep, "fsl_eddy.out_outlier_report",
- datasink, "dmriprep.qc.@eddyparamsreport")
- wf.connect(prep, "fsl_eddy.out_restricted_movement_rms",
- datasink, "dmriprep.qc.@eddyparamsrestrictrms")
- wf.connect(prep, "fsl_eddy.out_shell_alignment_parameters",
- datasink, "dmriprep.qc.@eddyparamsshellalign")
- wf.connect(prep, "fsl_eddy.out_parameter",
- datasink, "dmriprep.qc.@eddyparams")
- wf.connect(prep, "fsl_eddy.out_cnr_maps",
- datasink, "dmriprep.qc.@eddycndr")
- wf.connect(prep, "fsl_eddy.out_residuals",
- datasink, "dmriprep.qc.@eddyresid")
-
- # the file that told us which volumes to drop
- wf.connect(id_outliers_node, "outpath", datasink, "dmriprep.qc.@droppedscans")
-
- # the tensors of the dropped volumes dwi
- wf.connect(get_tensor, "out_file", datasink, "dmriprep.dti.@tensor")
- wf.connect(get_tensor, "fa_file", datasink, "dmriprep.dti.@fa")
- wf.connect(get_tensor, "md_file", datasink, "dmriprep.dti.@md")
- wf.connect(get_tensor, "ad_file", datasink, "dmriprep.dti.@ad")
- wf.connect(get_tensor, "rd_file", datasink, "dmriprep.dti.@rd")
- wf.connect(get_tensor, "color_fa_file", datasink, "dmriprep.dti.@colorfa")
- wf.connect(scale_tensor, "out_file", datasink, "dmriprep.dti.@scaled_tensor")
-
- # the tensors of the eddied volumes dwi
- wf.connect(get_tensor_eddy, "out_file", datasink, "dmriprep.dti_eddy.@tensor")
- wf.connect(get_tensor_eddy, "fa_file", datasink, "dmriprep.dti_eddy.@fa")
- wf.connect(get_tensor_eddy, "md_file", datasink, "dmriprep.dti_eddy.@md")
- wf.connect(get_tensor_eddy, "ad_file", datasink, "dmriprep.dti_eddy.@ad")
- wf.connect(get_tensor_eddy, "rd_file", datasink, "dmriprep.dti_eddy.@rd")
- wf.connect(get_tensor_eddy, "color_fa_file", datasink, "dmriprep.dti_eddy.@colorfa")
- wf.connect(scale_tensor_eddy, "out_file", datasink, "dmriprep.dti_eddy.@scaled_tensor")
-
- # all the eddy_quad stuff
- wf.connect(eddy_quad, 'qc_json', datasink, "dmriprep.qc.@eddyquad_json")
- wf.connect(eddy_quad, 'qc_pdf', datasink, "dmriprep.qc.@eddyquad_pdf")
- wf.connect(eddy_quad, 'avg_b_png', datasink, "dmriprep.qc.@eddyquad_bpng")
- wf.connect(eddy_quad, 'avg_b0_pe_png',
- datasink, "dmriprep.qc.@eddyquad_b0png")
- wf.connect(eddy_quad, 'cnr_png', datasink, "dmriprep.qc.@eddyquad_cnr")
- wf.connect(eddy_quad, 'vdm_png', datasink, "dmriprep.qc.@eddyquad_vdm")
- wf.connect(eddy_quad, 'residuals', datasink, 'dmriprep.qc.@eddyquad_resid')
-
- # anatomical registration stuff
- wf.connect(bbreg, "min_cost_file", datasink, "dmriprep.reg.@mincost")
- wf.connect(bbreg, "out_fsl_file", datasink, "dmriprep.reg.@fslfile")
- wf.connect(bbreg, "out_reg_file", datasink, "dmriprep.reg.@reg")
-
- # anatomical files resliced
- wf.connect(reslice_to_dwi, 'out_file', datasink, 'dmriprep.anat.@segmentation')
- wf.connect(create_mask, 'outfile', datasink, 'dmriprep.anat.@mask')
- wf.connect(reslice_orig_to_dwi, 'out_file', datasink, 'dmriprep.anat.@T1w')
-
- def report_fn(dwi_corrected_file, eddy_rms, eddy_report,
- color_fa_file, anat_mask_file, outlier_indices,
- eddy_qc_file):
- from dmriprep.qc import create_report_json
-
- report = create_report_json(dwi_corrected_file, eddy_rms, eddy_report,
- color_fa_file, anat_mask_file, outlier_indices,
- eddy_qc_file)
- return report
-
- report_node = pe.Node(niu.Function(
- input_names=['dwi_corrected_file', 'eddy_rms',
- 'eddy_report', 'color_fa_file',
- 'anat_mask_file', 'outlier_indices', 'eddy_qc_file'],
- output_names=['report'],
- function=report_fn
- ), name="reportJSON")
-
- # for the report, lets show the eddy corrected (full volume) image
- wf.connect(voltransform, "transformed_file", report_node, 'dwi_corrected_file')
- wf.connect(eddy_quad, 'qc_json', report_node, 'eddy_qc_file')
-
- # add the rms movement output from eddy
- wf.connect(prep, "fsl_eddy.out_movement_rms", report_node, 'eddy_rms')
- wf.connect(prep, "fsl_eddy.out_outlier_report", report_node, 'eddy_report')
- wf.connect(id_outliers_node, 'drop_scans', report_node, 'outlier_indices')
-
- # the mask file to check our registration, and the colorFA file go in the report
- wf.connect(create_mask, "outfile", report_node, 'anat_mask_file')
- wf.connect(get_tensor, "color_fa_file", report_node, 'color_fa_file')
-
- # save that report!
- wf.connect(report_node, 'report', datasink, 'dmriprep.report.@report')
-
- # this part is done last, to get the filenames *just right*
- # its super annoying.
- def name_files_nicely(dwi_file, subject_id):
- import os.path as op
-
- dwi_fname = op.split(dwi_file)[1].split(".nii.gz")[0]
- substitutions = [
- ("vol0000_flirt_merged.nii.gz", dwi_fname + '.nii.gz'),
- ("stats.vol0000_flirt_merged.txt", dwi_fname + ".art.json"),
- ("motion_parameters.par", dwi_fname + ".motion.txt"),
- ("_rotated.bvec", ".bvec"),
- ("art.vol0000_flirt_merged_outliers.txt", dwi_fname + ".outliers.txt"),
- ("vol0000_flirt_merged", dwi_fname),
- ("_roi_bbreg_freesurfer", "_register"),
- ("dwi/eddy_corrected", "dwi/%s" % dwi_fname),
- ("dti/eddy_corrected", "dti/%s" % dwi_fname.replace("_dwi", "")),
- ("reg/eddy_corrected", "reg/%s" % dwi_fname.replace("_dwi", "")),
- ("aparc+aseg_outbin", dwi_fname.replace("_dwi", "_mask")),
- ("aparc+aseg_out", dwi_fname.replace("_dwi", "_aparc+aseg")),
- ("art.eddy_corrected_outliers", dwi_fname.replace("dwi", "outliers")),
- ("color_fa", "colorfa"),
- ("orig_out", dwi_fname.replace("_dwi", "_T1w")),
- ("stats.eddy_corrected", dwi_fname.replace("dwi", "artStats")),
- ("eddy_corrected.eddy_parameters", dwi_fname + ".eddy_parameters"),
- ("qc/eddy_corrected", "qc/" + dwi_fname),
- ("derivatives/dmriprep", "derivatives/{}/dmriprep".format(subject_id)),
- ("_rotatedanat_space_thinned", ""),
- ("_thinned", ""),
- ("eddy_corrected", dwi_fname),
- ("_warped", ""),
- ("_maths", "_scaled"),
- ("dropped_scans", dwi_fname.replace("_dwi", "_dwi_dropped_scans")),
- ("report.json", dwi_fname.replace("_dwi", "_dwi_report.json"))
- ]
- return substitutions
-
- node_name_files_nicely = pe.Node(niu.Function(input_names=['dwi_file', 'subject_id'],
- output_names=['substitutions'],
- function=name_files_nicely),
- name="name_files_nicely")
- wf.connect(inputspec, 'dwi_file', node_name_files_nicely, 'dwi_file')
- wf.connect(inputspec, 'subject_id', node_name_files_nicely, 'subject_id')
- wf.connect(node_name_files_nicely, 'substitutions', datasink, 'substitutions')
-
- return wf
diff --git a/dmriprep/workflows/_afq/utils.py b/dmriprep/workflows/_afq/utils.py
deleted file mode 100644
index cd92cc17..00000000
--- a/dmriprep/workflows/_afq/utils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Utility functions for other submodules
-
-"""
-import itertools
-
-import numpy as np
-
-
-mod_logger = logging.getLogger(__name__)
-
-
-def is_hemispherical(vecs):
- """Test whether all points on a unit sphere lie in the same hemisphere.
-
- Parameters
- ----------
- vecs : numpy.ndarray
- 2D numpy array with shape (N, 3) where N is the number of points.
- All points must lie on the unit sphere.
-
- Returns
- -------
- is_hemi : bool
- If True, one can find a hemisphere that contains all the points.
- If False, then the points do not lie in any hemisphere
-
- pole : numpy.ndarray
- If `is_hemi == True`, then pole is the "central" pole of the
- input vectors. Otherwise, pole is the zero vector.
-
- References
- ----------
- https://rstudio-pubs-static.s3.amazonaws.com/27121_a22e51b47c544980bad594d5e0bb2d04.html # noqa
- """
- if vecs.shape[1] != 3:
- raise ValueError("Input vectors must be 3D vectors")
- if not np.allclose(1, np.linalg.norm(vecs, axis=1)):
- raise ValueError("Input vectors must be unit vectors")
-
- # Generate all pairwise cross products
- v0, v1 = zip(*[p for p in itertools.permutations(vecs, 2)])
- cross_prods = np.cross(v0, v1)
-
- # Normalize them
- cross_prods /= np.linalg.norm(cross_prods, axis=1)[:, np.newaxis]
-
- # `cross_prods` now contains all candidate vertex points for "the polygon"
- # in the reference. "The polygon" is a subset. Find which points belong to
- # the polygon using a dot product test with each of the original vectors
- angles = np.arccos(np.dot(cross_prods, vecs.transpose()))
-
- # And test whether it is orthogonal or less
- dot_prod_test = angles <= np.pi / 2.0
-
- # If there is at least one point that is orthogonal or less to each
- # input vector, then the points lie on some hemisphere
- is_hemi = len(vecs) in np.sum(dot_prod_test.astype(int), axis=1)
-
- if is_hemi:
- vertices = cross_prods[np.sum(dot_prod_test.astype(int), axis=1) == len(vecs)]
- pole = np.mean(vertices, axis=0)
- pole /= np.linalg.norm(pole)
- else:
- pole = np.array([0.0, 0.0, 0.0])
- return is_hemi, pole
diff --git a/dmriprep/workflows/base.py b/dmriprep/workflows/base.py
deleted file mode 100755
index 2474204f..00000000
--- a/dmriprep/workflows/base.py
+++ /dev/null
@@ -1,506 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
-# vi: set ft=python sts=4 ts=4 sw=4 et:
-"""
-dMRIPrep base processing workflows.
-
-.. autofunction:: init_dmriprep_wf
-.. autofunction:: init_single_subject_wf
-
-"""
-
-import sys
-import os
-from collections import OrderedDict
-from copy import deepcopy
-
-from nipype import __version__ as nipype_ver
-from nipype.pipeline import engine as pe
-from nipype.interfaces import utility as niu
-from nilearn import __version__ as nilearn_ver
-
-from niworkflows.engine.workflows import LiterateWorkflow as Workflow
-from niworkflows.interfaces.bids import (
- BIDSInfo, BIDSFreeSurferDir
-)
-from niworkflows.utils.misc import fix_multi_T1w_source_name
-from smriprep.workflows.anatomical import init_anat_preproc_wf
-
-from ..interfaces import DerivativesDataSink, BIDSDataGrabber
-from ..interfaces.reports import SubjectSummary, AboutSummary
-from ..utils.bids import collect_data
-from ..__about__ import __version__
-# from .dwi import init_dwi_preproc_wf
-
-
-def init_dmriprep_wf(
- anat_only,
- debug,
- force_syn,
- freesurfer,
- hires,
- ignore,
- layout,
- longitudinal,
- low_mem,
- omp_nthreads,
- output_dir,
- output_spaces,
- run_uuid,
- skull_strip_fixed_seed,
- skull_strip_template,
- subject_list,
- use_syn,
- work_dir,
-):
- """
- Create the base workflow.
-
- This workflow organizes the execution of *dMRIPrep*, with a sub-workflow for
- each subject. If FreeSurfer's recon-all is to be run, a FreeSurfer derivatives folder is
- created and populated with any needed template subjects.
-
- .. workflow::
- :graph2use: orig
- :simple_form: yes
-
- import os
- from collections import namedtuple, OrderedDict
- BIDSLayout = namedtuple('BIDSLayout', ['root'])
- from dmriprep.workflows.base import init_dmriprep_wf
- os.environ['FREESURFER_HOME'] = os.getcwd()
- wf = init_dmriprep_wf(
- anat_only=False,
- debug=False,
- force_syn=True,
- freesurfer=True,
- hires=True,
- ignore=[],
- layout=BIDSLayout('.'),
- longitudinal=False,
- low_mem=False,
- omp_nthreads=1,
- output_dir='.',
- output_spaces=OrderedDict([
- ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
- ('T1w', {}), ('fsnative', {})]),
- run_uuid='X',
- skull_strip_fixed_seed=False,
- skull_strip_template=('OASIS30ANTs', {}),
- subject_list=['dmripreptest'],
- use_syn=True,
- work_dir='.',
- )
-
-
- Parameters
- ----------
- anat_only : bool
- Disable diffusion MRI workflows
- debug : bool
- Enable debugging outputs
- force_syn : bool
- **Temporary**: Always run SyN-based SDC
- freesurfer : bool
- Enable FreeSurfer surface reconstruction (may increase runtime)
- hires : bool
- Enable sub-millimeter preprocessing in FreeSurfer
- ignore : list
- Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
- layout : BIDSLayout object
- BIDS dataset layout
- longitudinal : bool
- Treat multiple sessions as longitudinal (may increase runtime)
- See sub-workflows for specific differences
- low_mem : bool
- Write uncompressed .nii files in some cases to reduce memory usage
- omp_nthreads : int
- Maximum number of threads an individual process may use
- output_dir : str
- Directory in which to save derivatives
- output_spaces : OrderedDict
- Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
- ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
- nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
- or paths pointing to custom templates organized in a TemplateFlow-like structure.
- Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
- could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
- resolution version of the selected template).
- run_uuid : str
- Unique identifier for execution instance
- skull_strip_template : tuple
- Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
- and corresponding dictionary of output-space modifiers.
- skull_strip_fixed_seed : bool
- Do not use a random seed for skull-stripping - will ensure
- run-to-run replicability when used with --omp-nthreads 1
- subject_list : list
- List of subject labels
- use_syn : bool
- **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
- If fieldmaps are present and enabled, this is not run, by default.
- work_dir : str
- Directory in which to store workflow execution state and temporary files
-
- """
- dmriprep_wf = Workflow(name='dmriprep_wf')
- dmriprep_wf.base_dir = work_dir
-
- if freesurfer:
- fsdir = pe.Node(
- BIDSFreeSurferDir(
- derivatives=output_dir,
- freesurfer_home=os.getenv('FREESURFER_HOME'),
- spaces=[s for s in output_spaces.keys() if s.startswith('fsaverage')] + [
- 'fsnative'] * ('fsnative' in output_spaces)),
- name='fsdir_run_' + run_uuid.replace('-', '_'), run_without_submitting=True)
-
- reportlets_dir = os.path.join(work_dir, 'reportlets')
- for subject_id in subject_list:
- single_subject_wf = init_single_subject_wf(
- anat_only=anat_only,
- debug=debug,
- force_syn=force_syn,
- freesurfer=freesurfer,
- hires=hires,
- ignore=ignore,
- layout=layout,
- longitudinal=longitudinal,
- low_mem=low_mem,
- name="single_subject_" + subject_id + "_wf",
- omp_nthreads=omp_nthreads,
- output_dir=output_dir,
- output_spaces=output_spaces,
- reportlets_dir=reportlets_dir,
- skull_strip_fixed_seed=skull_strip_fixed_seed,
- skull_strip_template=skull_strip_template,
- subject_id=subject_id,
- use_syn=use_syn,
- )
-
- single_subject_wf.config['execution']['crashdump_dir'] = (
- os.path.join(output_dir, "dmriprep", "sub-" + subject_id, 'log', run_uuid)
- )
- for node in single_subject_wf._get_all_nodes():
- node.config = deepcopy(single_subject_wf.config)
- if freesurfer:
- dmriprep_wf.connect(fsdir, 'subjects_dir',
- single_subject_wf, 'inputnode.subjects_dir')
- else:
- dmriprep_wf.add_nodes([single_subject_wf])
-
- return dmriprep_wf
-
-
-def init_single_subject_wf(
- anat_only,
- debug,
- force_syn,
- freesurfer,
- hires,
- ignore,
- layout,
- longitudinal,
- low_mem,
- name,
- omp_nthreads,
- output_dir,
- output_spaces,
- reportlets_dir,
- skull_strip_fixed_seed,
- skull_strip_template,
- subject_id,
- use_syn,
-):
- """
- Set-up the preprocessing pipeline for a single subject.
-
- It collects and reports information about the subject, and prepares
- sub-workflows to perform anatomical and diffusion MRI preprocessing.
-
- Anatomical preprocessing is performed in a single workflow, regardless of
- the number of sessions.
- Functional preprocessing is performed using a separate workflow for each
- individual BOLD series.
-
- .. workflow::
- :graph2use: orig
- :simple_form: yes
-
- from dmriprep.workflows.base import init_single_subject_wf
- from collections import namedtuple, OrderedDict
- BIDSLayout = namedtuple('BIDSLayout', ['root'])
- wf = init_single_subject_wf(
- anat_only=False,
- debug=False,
- force_syn=True,
- freesurfer=True,
- hires=True,
- ignore=[],
- layout=BIDSLayout('.'),
- longitudinal=False,
- low_mem=False,
- name='single_subject_wf',
- omp_nthreads=1,
- output_dir='.',
- output_spaces=OrderedDict([
- ('MNI152Lin', {}), ('fsaverage', {'density': '10k'}),
- ('T1w', {}), ('fsnative', {})]),
- reportlets_dir='.',
- skull_strip_fixed_seed=False,
- skull_strip_template=('OASIS30ANTs', {}),
- subject_id='test',
- use_syn=True,
- )
-
-
- Parameters
- ----------
- anat_only : bool
- Disable diffusion MRI workflows
- debug : bool
- Enable debugging outputs
- force_syn : bool
- **Temporary**: Always run SyN-based SDC
- freesurfer : bool
- Enable FreeSurfer surface reconstruction (may increase runtime)
- hires : bool
- Enable sub-millimeter preprocessing in FreeSurfer
- ignore : list
- Preprocessing steps to skip (may include "slicetiming", "fieldmaps")
- layout : BIDSLayout object
- BIDS dataset layout
- longitudinal : bool
- Treat multiple sessions as longitudinal (may increase runtime)
- See sub-workflows for specific differences
- low_mem : bool
- Write uncompressed .nii files in some cases to reduce memory usage
- name : str
- Name of workflow
- omp_nthreads : int
- Maximum number of threads an individual process may use
- output_dir : str
- Directory in which to save derivatives
- output_spaces : OrderedDict
- Ordered dictionary where keys are TemplateFlow ID strings (e.g., ``MNI152Lin``,
- ``MNI152NLin6Asym``, ``MNI152NLin2009cAsym``, or ``fsLR``) strings designating
- nonstandard references (e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.),
- or paths pointing to custom templates organized in a TemplateFlow-like structure.
- Values of the dictionary aggregate modifiers (e.g., the value for the key ``MNI152Lin``
- could be ``{'resolution': 2}`` if one wants the resampling to be done on the 2mm
- resolution version of the selected template).
- reportlets_dir : str
- Directory in which to save reportlets
- skull_strip_fixed_seed : bool
- Do not use a random seed for skull-stripping - will ensure
- run-to-run replicability when used with --omp-nthreads 1
- skull_strip_template : tuple
- Name of target template for brain extraction with ANTs' ``antsBrainExtraction``,
- and corresponding dictionary of output-space modifiers.
- subject_id : str
- List of subject labels
- use_syn : bool
- **Experimental**: Enable ANTs SyN-based susceptibility distortion correction (SDC).
- If fieldmaps are present and enabled, this is not run, by default.
-
-
- Inputs
-
- subjects_dir
- FreeSurfer SUBJECTS_DIR
-
- """
- from ..config import NONSTANDARD_REFERENCES
- if name in ('single_subject_wf', 'single_subject_dmripreptest_wf'):
- # for documentation purposes
- subject_data = {
- 't1w': ['/completely/made/up/path/sub-01_T1w.nii.gz'],
- 'dwi': ['/completely/made/up/path/sub-01_task-nback_bold.nii.gz']
- }
- else:
- subject_data = collect_data(layout, subject_id)[0]
-
- # Make sure we always go through these two checks
- if not anat_only and subject_data['dwi'] == []:
- raise Exception("No DWI data found for participant {}. "
- "All workflows require DWI images.".format(subject_id))
-
- if not subject_data['t1w']:
- raise Exception("No T1w images found for participant {}. "
- "All workflows require T1w images.".format(subject_id))
-
- workflow = Workflow(name=name)
- workflow.__desc__ = """
-Results included in this manuscript come from preprocessing
-performed using *dMRIPrep* {dmriprep_ver}
-(@dmriprep; RRID:SCR_017412),
-which is based on *Nipype* {nipype_ver}
-(@nipype1; @nipype2; RRID:SCR_002502).
-
-""".format(dmriprep_ver=__version__, nipype_ver=nipype_ver)
- workflow.__postdesc__ = """
-
-Many internal operations of *dMRIPrep* use
-*Nilearn* {nilearn_ver} [@nilearn, RRID:SCR_001362],
-mostly within the diffusion MRI processing workflow.
-For more details of the pipeline, see [the section corresponding
-to workflows in *dMRIPrep*'s documentation]\
-(https://dmriprep.readthedocs.io/en/latest/workflows.html \
-"dMRIPrep's documentation").
-
-
-### Copyright Waiver
-
-The above boilerplate text was automatically generated by dMRIPrep
-with the express intention that users should copy and paste this
-text into their manuscripts *unchanged*.
-It is released under the [CC0]\
-(https://creativecommons.org/publicdomain/zero/1.0/) license.
-
-### References
-
-""".format(nilearn_ver=nilearn_ver)
-
- # Filter out standard spaces to a separate dict
- std_spaces = OrderedDict([
- (key, modifiers) for key, modifiers in output_spaces.items()
- if key not in NONSTANDARD_REFERENCES])
-
- inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']),
- name='inputnode')
-
- bidssrc = pe.Node(BIDSDataGrabber(subject_data=subject_data, anat_only=anat_only),
- name='bidssrc')
-
- bids_info = pe.Node(BIDSInfo(
- bids_dir=layout.root, bids_validate=False), name='bids_info')
-
- summary = pe.Node(SubjectSummary(
- std_spaces=list(std_spaces.keys()),
- nstd_spaces=list(set(NONSTANDARD_REFERENCES).intersection(output_spaces.keys()))),
- name='summary', run_without_submitting=True)
-
- about = pe.Node(AboutSummary(version=__version__,
- command=' '.join(sys.argv)),
- name='about', run_without_submitting=True)
-
- ds_report_summary = pe.Node(
- DerivativesDataSink(base_directory=reportlets_dir,
- desc='summary', keep_dtype=True),
- name='ds_report_summary', run_without_submitting=True)
-
- ds_report_about = pe.Node(
- DerivativesDataSink(base_directory=reportlets_dir,
- desc='about', keep_dtype=True),
- name='ds_report_about', run_without_submitting=True)
-
- # Preprocessing of T1w (includes registration to MNI)
- anat_preproc_wf = init_anat_preproc_wf(
- bids_root=layout.root,
- debug=debug,
- freesurfer=freesurfer,
- hires=hires,
- longitudinal=longitudinal,
- name="anat_preproc_wf",
- num_t1w=len(subject_data['t1w']),
- omp_nthreads=omp_nthreads,
- output_dir=output_dir,
- output_spaces=std_spaces,
- reportlets_dir=reportlets_dir,
- skull_strip_fixed_seed=skull_strip_fixed_seed,
- skull_strip_template=skull_strip_template,
- )
-
- workflow.connect([
- (inputnode, anat_preproc_wf, [('subjects_dir', 'inputnode.subjects_dir')]),
- (bidssrc, bids_info, [(('t1w', fix_multi_T1w_source_name), 'in_file')]),
- (inputnode, summary, [('subjects_dir', 'subjects_dir')]),
- (bidssrc, summary, [('t1w', 't1w'),
- ('t2w', 't2w'),
- ('dwi', 'dwi')]),
- (bids_info, summary, [('subject', 'subject_id')]),
- (bids_info, anat_preproc_wf, [(('subject', _prefix), 'inputnode.subject_id')]),
- (bidssrc, anat_preproc_wf, [('t1w', 'inputnode.t1w'),
- ('t2w', 'inputnode.t2w'),
- ('roi', 'inputnode.roi'),
- ('flair', 'inputnode.flair')]),
- (bidssrc, ds_report_summary, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
- (summary, ds_report_summary, [('out_report', 'in_file')]),
- (bidssrc, ds_report_about, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
- (about, ds_report_about, [('out_report', 'in_file')]),
- ])
-
- # Overwrite ``out_path_base`` of smriprep's DataSinks
- for node in workflow.list_node_names():
- if node.split('.')[-1].startswith('ds_'):
- workflow.get_node(node).interface.out_path_base = 'dmriprep'
-
- if anat_only:
- return workflow
-
- # for dwi_file in subject_data['dwi']:
- # dwi_preproc_wf = init_dwi_preproc_wf(
- # aroma_melodic_dim=aroma_melodic_dim,
- # bold2t1w_dof=bold2t1w_dof,
- # bold_file=bold_file,
- # cifti_output=cifti_output,
- # debug=debug,
- # dummy_scans=dummy_scans,
- # err_on_aroma_warn=err_on_aroma_warn,
- # fmap_bspline=fmap_bspline,
- # fmap_demean=fmap_demean,
- # force_syn=force_syn,
- # freesurfer=freesurfer,
- # ignore=ignore,
- # layout=layout,
- # low_mem=low_mem,
- # medial_surface_nan=medial_surface_nan,
- # num_bold=len(subject_data['bold']),
- # omp_nthreads=omp_nthreads,
- # output_dir=output_dir,
- # output_spaces=output_spaces,
- # reportlets_dir=reportlets_dir,
- # regressors_all_comps=regressors_all_comps,
- # regressors_fd_th=regressors_fd_th,
- # regressors_dvars_th=regressors_dvars_th,
- # t2s_coreg=t2s_coreg,
- # use_aroma=use_aroma,
- # use_syn=use_syn,
- # )
-
- # workflow.connect([
- # (anat_preproc_wf, dwi_preproc_wf,
- # [(('outputnode.t1_preproc', _pop), 'inputnode.t1_preproc'),
- # ('outputnode.t1_brain', 'inputnode.t1_brain'),
- # ('outputnode.t1_mask', 'inputnode.t1_mask'),
- # ('outputnode.t1_seg', 'inputnode.t1_seg'),
- # ('outputnode.t1_aseg', 'inputnode.t1_aseg'),
- # ('outputnode.t1_aparc', 'inputnode.t1_aparc'),
- # ('outputnode.t1_tpms', 'inputnode.t1_tpms'),
- # ('outputnode.template', 'inputnode.template'),
- # ('outputnode.forward_transform', 'inputnode.anat2std_xfm'),
- # ('outputnode.reverse_transform', 'inputnode.std2anat_xfm'),
- # ('outputnode.joint_template', 'inputnode.joint_template'),
- # ('outputnode.joint_forward_transform', 'inputnode.joint_anat2std_xfm'),
- # ('outputnode.joint_reverse_transform', 'inputnode.joint_std2anat_xfm'),
- # # Undefined if --no-freesurfer, but this is safe
- # ('outputnode.subjects_dir', 'inputnode.subjects_dir'),
- # ('outputnode.subject_id', 'inputnode.subject_id'),
- # ('outputnode.t1_2_fsnative_forward_transform',
- # 'inputnode.t1_2_fsnative_forward_transform'),
- # ('outputnode.t1_2_fsnative_reverse_transform',
- # 'inputnode.t1_2_fsnative_reverse_transform')]),
- # ])
-
- return workflow
-
-
-def _prefix(subid):
- if subid.startswith('sub-'):
- return subid
- return '-'.join(('sub', subid))
-
-
-def _pop(inlist):
- if isinstance(inlist, (list, tuple)):
- return inlist[0]
- return inlist
diff --git a/dmriprep/workflows/dwi/__init__.py b/dmriprep/workflows/dwi/__init__.py
index e69de29b..a55fa8c8 100644
--- a/dmriprep/workflows/dwi/__init__.py
+++ b/dmriprep/workflows/dwi/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+"""
+
+Pre-processing dMRI workflows
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. automodule:: dmriprep.workflows.dwi.base
+.. automodule:: dmriprep.workflows.dwi.util
+
+"""
+
+from .base import init_dwi_preproc_wf
+from .util import init_dwi_concat_wf
+
+__all__ = [
+ 'init_dwi_preproc_wf',
+ 'init_dwi_concat_wf'
+]
diff --git a/dmriprep/workflows/dwi/base.py b/dmriprep/workflows/dwi/base.py
new file mode 100755
index 00000000..e7b31899
--- /dev/null
+++ b/dmriprep/workflows/dwi/base.py
@@ -0,0 +1,1089 @@
+# -*- coding: utf-8 -*-
+import os
+
+
+def init_dwi_preproc_wf(
+ sub,
+ ses,
+ dwi_file,
+ fbval,
+ fbvec,
+ metadata,
+ out_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ eddy_mem_gb
+):
+ import json
+ import shutil
+ from numba import cuda
+ import pkg_resources
+ from nipype.pipeline import engine as pe
+ from nipype.interfaces import utility as niu
+ from nipype.interfaces import fsl
+ from nipype.algorithms.rapidart import ArtifactDetect
+ from dmriprep.interfaces import fsl_extensions
+ from dmriprep.utils import core, qc
+ from shutil import which
+
+ import_list = [
+ "import sys",
+ "import os",
+ "import numpy as np",
+ "import nibabel as nib",
+ "import warnings",
+ 'warnings.filterwarnings("ignore")',
+ ]
+
+ subdir = "%s%s%s" % (out_dir, "/", sub)
+ if not os.path.isdir(subdir):
+ os.mkdir(subdir)
+ sesdir = "%s%s%s%s%s" % (out_dir, "/", sub, "/ses-", ses)
+ if not os.path.isdir(sesdir):
+ os.mkdir(sesdir)
+
+ eddy_cfg_file = pkg_resources.resource_filename('dmriprep.config', "eddy_params.json")
+
+ # Create dictionary of eddy args
+ with open(eddy_cfg_file, "r") as f:
+ eddy_args = json.load(f)
+
+ wf = pe.Workflow(name="single_subject_dmri_" + str(ses))
+ wf.base_dir = sesdir
+ inputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=[
+ "dwi_file",
+ "fbvec",
+ "fbval",
+ "metadata",
+ "sub",
+ "ses",
+ "sesdir",
+ "denoise_strategy",
+ "vox_size",
+ "outlier_thresh",
+ "eddy_cfg_file",
+ "omp_nthreads"
+ ]
+ ),
+ name="inputnode",
+ )
+ inputnode.inputs.dwi_file = dwi_file
+ inputnode.inputs.fbvec = fbvec
+ inputnode.inputs.fbval = fbval
+ inputnode.inputs.metadata = metadata
+ inputnode.inputs.sub = sub
+ inputnode.inputs.ses = ses
+ inputnode.inputs.sesdir = sesdir
+ inputnode.inputs.denoise_strategy = denoise_strategy
+ inputnode.inputs.vox_size = vox_size
+ inputnode.inputs.outlier_thresh = outlier_thresh
+ inputnode.inputs.eddy_cfg_file = eddy_cfg_file
+ inputnode.inputs.omp_nthreads = omp_nthreads
+
+ check_orient_and_dims_dwi_node = pe.Node(
+ niu.Function(
+ input_names=["infile", "vox_size", "bvecs", "outdir"],
+ output_names=["outfile", "bvecs"],
+ function=qc.check_orient_and_dims,
+ imports=import_list,
+ ),
+ name="check_orient_and_dims_dwi_node",
+ )
+ check_orient_and_dims_dwi_node._mem_gb = 1
+
+ # Make gtab and get b0 indices
+ correct_vecs_and_make_b0s_node = pe.Node(
+ niu.Function(
+ input_names=["fbval", "fbvec", "dwi_file", "sesdir"],
+ output_names=["initial_mean_b0", "gtab_file", "b0_vols", "b0s", "slm"],
+ function=core.correct_vecs_and_make_b0s,
+ imports=import_list,
+ ),
+ name="correct_vecs_and_make_b0s",
+ )
+ correct_vecs_and_make_b0s_node._mem_gb = 1
+
+ btr_node_premoco = pe.Node(fsl.BET(), name="bet_pre_moco")
+ btr_node_premoco.inputs.mask = True
+ btr_node_premoco.inputs.frac = 0.2
+ btr_node_premoco._mem_gb = 1
+
+ apply_mask_premoco_node = pe.Node(fsl.ApplyMask(), name="apply_mask_pre_moco")
+ apply_mask_premoco_node._mem_gb = 1
+
+ # Detect and remove motion outliers
+ fsl_split_node = pe.Node(fsl.Split(dimension="t"), name="fsl_split")
+ fsl_split_node._mem_gb = 1
+
+ coreg = pe.MapNode(
+ fsl.FLIRT(no_search=True, interp="spline", padding_size=1, dof=6),
+ name="coregistration",
+ iterfield=["in_file"],
+ )
+ coreg._mem_gb = 1
+
+ get_motion_params_node = pe.Node(
+ niu.Function(
+ input_names=["flirt_mats"],
+ output_names=["motion_params"],
+ function=core.get_flirt_motion_parameters,
+ imports=import_list,
+ ),
+ name="get_motion_params",
+ )
+ get_motion_params_node._mem_gb = 0.5
+
+ fsl_merge_node = pe.Node(fsl.Merge(dimension="t"), name="fsl_merge")
+ fsl_merge_node._mem_gb = 2
+
+ art_node = pe.Node(interface=ArtifactDetect(), name="art")
+ art_node.inputs.use_differences = [True, True]
+ art_node.inputs.save_plot = False
+ art_node.inputs.use_norm = True
+ # scan-to-scan head-motion composite changes
+ art_node.inputs.norm_threshold = 3
+ # z-score scan-to-scan global signal changes
+ art_node.inputs.zintensity_threshold = 9
+ art_node.inputs.mask_type = "spm_global"
+ art_node.inputs.parameter_source = "FSL"
+ art_node._mem_gb = 1
+
+ drop_outliers_fn_node = pe.Node(
+ niu.Function(
+ input_names=["in_file", "in_bval", "in_bvec", "drop_scans", "in_sigma"],
+ output_names=["out_file", "out_bval", "out_bvec", "out_sigma"],
+ function=core.drop_outliers_fn,
+ imports=import_list,
+ ),
+ name="drop_outliers_fn",
+ )
+ drop_outliers_fn_node._mem_gb = 1
+
+ make_gtab_node = pe.Node(
+ niu.Function(
+ input_names=["fbval", "fbvec", "sesdir", "final"],
+ output_names=["gtab_file", "gtab", "final_bval_path", "final_bvec_path"],
+ function=core.make_gtab,
+ imports=import_list,
+ ),
+ name="make_gtab",
+ )
+ make_gtab_node.inputs.final = False
+ make_gtab_node._mem_gb = 1
+
+ estimate_noise_node = pe.Node(
+ niu.Function(
+ input_names=["in_file", "gtab_file", "mask", "denoise_strategy"],
+ output_names=["sigma_path"],
+ function=core.estimate_sigma,
+ imports=import_list,
+ ),
+ name="estimate_noise",
+ )
+ estimate_noise_node._mem_gb = omp_nthreads*2
+ estimate_noise_node.n_procs = omp_nthreads
+
+ # Suppress gibbs ringing
+ suppress_gibbs_node = pe.Node(
+ niu.Function(
+ input_names=["in_file", "sesdir"],
+ output_names=["gibbs_free_file"],
+ function=core.suppress_gibbs,
+ imports=import_list,
+ ),
+ name="suppress_gibbs",
+ )
+ suppress_gibbs_node._mem_gb = 12
+ suppress_gibbs_node.n_procs = 6
+
+ extract_metadata_node = pe.Node(
+ niu.Function(
+ input_names=["metadata"],
+ output_names=["spec_acqps", "vol_legend"],
+ function=core.extract_metadata,
+ imports=import_list,
+ ),
+ name="extract_metadata",
+ )
+ extract_metadata_node._mem_gb = 0.5
+
+ # Gather TOPUP/EDDY inputs
+ check_shelled_node = pe.Node(
+ niu.Function(
+ input_names=["gtab_file"],
+ output_names=["check_shelled"],
+ function=core.check_shelled,
+ imports=import_list,
+ ),
+ name="check_shelled",
+ )
+ check_shelled_node._mem_gb = 0.5
+
+ get_topup_inputs_node = pe.Node(
+ niu.Function(
+ input_names=["dwi_file", "sesdir", "spec_acqp", "b0_vols", "b0s", "vol_legend"],
+ output_names=[
+ "datain_file",
+ "imain_output",
+ "example_b0",
+ "datain_lines",
+ "topup_config",
+ "susceptibility_args"
+ ],
+ function=core.topup_inputs_from_dwi_files,
+ imports=import_list,
+ ),
+ name="get_topup_inputs",
+ )
+ get_topup_inputs_node._mem_gb = 0.5
+
+ get_eddy_inputs_node = pe.Node(
+ niu.Function(
+ input_names=["sesdir", "gtab_file"],
+ output_names=["index_file"],
+ function=core.eddy_inputs_from_dwi_files,
+ imports=import_list,
+ ),
+ name="get_eddy_inputs",
+ )
+ get_eddy_inputs_node._mem_gb = 0.5
+
+ # Run TOPUP
+ topup_node = pe.Node(fsl.TOPUP(), name="topup")
+ topup_node._mem_gb = 14
+ topup_node.n_procs = 8
+ topup_node.interface.mem_gb = 14
+ topup_node.interface.n_procs = 8
+
+ # Run BET on mean b0 of TOPUP-corrected output
+ make_mean_b0_node = pe.Node(
+ niu.Function(
+ input_names=["in_file"],
+ output_names=["mean_file_out"],
+ function=core.make_mean_b0,
+ imports=import_list,
+ ),
+ name="make_mean_b0",
+ )
+ btr_node = pe.Node(fsl.BET(), name="bet")
+ btr_node.inputs.mask = True
+ btr_node.inputs.frac = 0.2
+ btr_node._mem_gb = 1
+
+ # Run EDDY
+ eddy_node = pe.Node(fsl_extensions.ExtendedEddy(**eddy_args), name="eddy")
+ eddy_node.inputs.num_threads = omp_nthreads
+ eddy_node._mem_gb = eddy_mem_gb
+ eddy_node.n_procs = omp_nthreads
+ eddy_node.interface.mem_gb = eddy_mem_gb
+ eddy_node.interface.n_procs = omp_nthreads
+
+ make_basename_node = pe.Node(
+ niu.Function(
+ input_names=["out_corrected"],
+ output_names=["base_name"],
+ function=core.make_basename,
+ imports=import_list,
+ ),
+ name="make_basename",
+ )
+ btr_node._mem_gb = 0.5
+
+ # Handle gpu case
+ try:
+ if cuda.gpus:
+ eddy_node.inputs.use_cuda = True
+ except:
+ eddy_node.inputs.use_cuda = False
+
+ eddy_quad = pe.Node(fsl.EddyQuad(verbose=True), name='eddy_quad')
+ eddy_quad.inputs.verbose = True
+ eddy_qc_outdir = sesdir + '/eddy_quad'
+ if os.path.isdir(eddy_qc_outdir):
+ shutil.rmtree(eddy_qc_outdir)
+ eddy_quad.inputs.output_dir = eddy_qc_outdir
+ eddy_quad._mem_gb = 1
+
+ make_gtab_node_final = pe.Node(
+ niu.Function(
+ input_names=["fbval", "fbvec", "sesdir", "final"],
+ output_names=["gtab_file", "gtab", "final_bval_path", "final_bvec_path"],
+ function=core.make_gtab,
+ imports=import_list,
+ ),
+ name="make_gtab_final",
+ )
+ make_gtab_node_final.inputs.final = True
+ make_gtab_node_final._mem_gb = 1
+
+ apply_mask_node = pe.Node(fsl.ApplyMask(), name="apply_mask")
+ apply_mask_node._mem_gb = 1
+
+ id_outliers_from_eddy_report_node = pe.Node(
+ niu.Function(
+ input_names=["outlier_report", "threshold", "dwi_file"],
+ output_names=["drop_scans", "outpath"],
+ function=core.id_outliers_fn,
+ imports=import_list,
+ ),
+ name="id_outliers_from_eddy_report",
+ )
+ id_outliers_from_eddy_report_node._mem_gb = 1
+
+ drop_outliers_from_eddy_report_node = pe.Node(
+ niu.Function(
+ input_names=["in_file", "in_bval", "in_bvec", "drop_scans", "in_sigma"],
+ output_names=["out_file", "out_bval", "out_bvec", "out_sigma"],
+ function=core.drop_outliers_fn,
+ imports=import_list,
+ ),
+ name="drop_outliers_from_eddy_report",
+ )
+ drop_outliers_from_eddy_report_node._mem_gb = 1
+
+ denoise_node = pe.Node(
+ niu.Function(
+ input_names=["in_file", "sesdir", "gtab_file", "mask", "denoise_strategy", "sigma_path", "omp_nthreads"],
+ output_names=["denoised_file"],
+ function=core.denoise,
+ imports=import_list,
+ ),
+ name="denoise",
+ )
+ if denoise_strategy == 'nlsam':
+ denoise_node._mem_gb = omp_nthreads*6
+ else:
+ denoise_node._mem_gb = omp_nthreads*4
+ denoise_node.n_procs = omp_nthreads
+
+ which('N4BiasFieldCorrection')
+ if which('N4BiasFieldCorrection') is not None:
+ no_ants = False
+ else:
+ print('Warning: could not import ANTS so N4BiasFieldCorrection cannot be performed.')
+ no_ants = True
+
+ rename_final_preprocessed_file_node = pe.Node(
+ niu.Function(
+ input_names=["in_file", "sesdir"],
+ output_names=["out_file"],
+ function=core.rename_final_preprocessed_file,
+ imports=import_list,
+ ),
+ name="rename_final_preprocessed_file",
+ )
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(fields=["preprocessed_data", "final_bvec", "final_bval",
+ "out_eddy_quad_pdf", "out_eddy_quad_json"]),
+ name="outputnode",
+ )
+
+ wf.connect([(inputnode, check_orient_and_dims_dwi_node, [("fbvec", "bvecs"),
+ ("dwi_file", "infile"),
+ ("vox_size", "vox_size"),
+ ("sesdir", "outdir")]),
+ (inputnode, correct_vecs_and_make_b0s_node, [("fbval", "fbval"),
+ ("sesdir", "sesdir")]),
+ (check_orient_and_dims_dwi_node, correct_vecs_and_make_b0s_node, [("bvecs", "fbvec"),
+ ("outfile", "dwi_file")]),
+ (correct_vecs_and_make_b0s_node, btr_node_premoco, [("initial_mean_b0", "in_file")]),
+ (btr_node_premoco, apply_mask_premoco_node, [("mask_file", "mask_file")]),
+ (check_orient_and_dims_dwi_node, apply_mask_premoco_node, [("outfile", "in_file")]),
+ (apply_mask_premoco_node, fsl_split_node, [("out_file", "in_file")]),
+ (correct_vecs_and_make_b0s_node, coreg, [("initial_mean_b0", "reference")]),
+ (fsl_split_node, coreg, [("out_files", "in_file")]),
+ (coreg, get_motion_params_node, [("out_matrix_file", "flirt_mats")]),
+ (coreg, fsl_merge_node, [("out_file", "in_files")]),
+ (get_motion_params_node, art_node, [("motion_params", "realignment_parameters")]),
+ (fsl_merge_node, art_node, [("merged_file", "realigned_files")]),
+ (check_orient_and_dims_dwi_node, drop_outliers_fn_node, [("bvecs", "in_bvec"),
+ ("outfile", "in_file")]),
+ (inputnode, drop_outliers_fn_node, [("fbval", "in_bval")]),
+ (art_node, drop_outliers_fn_node, [("outlier_files", "drop_scans")]),
+ (inputnode, estimate_noise_node, [("denoise_strategy", "denoise_strategy")]),
+ (drop_outliers_fn_node, make_gtab_node, [("out_bvec", "fbvec"),
+ ("out_bval", "fbval")]),
+ (inputnode, make_gtab_node, [("sesdir", "sesdir")]),
+ (drop_outliers_fn_node, estimate_noise_node, [("out_file", "in_file")]),
+ (btr_node_premoco, estimate_noise_node, [("mask_file", "mask")]),
+ (make_gtab_node, estimate_noise_node, [("gtab_file", "gtab_file")]),
+ (inputnode, suppress_gibbs_node, [("sesdir", "sesdir")]),
+ (drop_outliers_fn_node, suppress_gibbs_node, [("out_file", "in_file")]),
+ (suppress_gibbs_node, eddy_node, [("gibbs_free_file", "in_file")]),
+ (inputnode, extract_metadata_node, [("metadata", "metadata")]),
+ (make_gtab_node, check_shelled_node, [("gtab_file", "gtab_file")]),
+ (inputnode, get_eddy_inputs_node, [("sesdir", "sesdir")]),
+ (make_gtab_node, get_eddy_inputs_node, [("gtab_file", "gtab_file")]),
+ (extract_metadata_node, get_eddy_inputs_node, [("spec_acqps", "spec_acqp")]),
+ (make_mean_b0_node, btr_node, [("mean_file_out", "in_file")]),
+ (check_shelled_node, eddy_node, [("check_shelled", "is_shelled")]),
+ (correct_vecs_and_make_b0s_node, eddy_node, [("slm", "slm")]),
+ (btr_node, eddy_node, [("mask_file", "in_mask")]),
+ (get_eddy_inputs_node, eddy_node, [("index_file", "in_index")]),
+ (drop_outliers_fn_node, eddy_node, [("out_bval", "in_bval"),
+ ("out_bvec", "in_bvec")]),
+ (eddy_node, id_outliers_from_eddy_report_node, [("out_corrected", "dwi_file"),
+ ("out_outlier_report", "outlier_report")]),
+ (inputnode, id_outliers_from_eddy_report_node, [("outlier_thresh", "threshold")]),
+ (id_outliers_from_eddy_report_node, drop_outliers_from_eddy_report_node, [("drop_scans", "drop_scans")]),
+ (eddy_node, drop_outliers_from_eddy_report_node, [("out_corrected", "in_file"),
+ ("out_rotated_bvecs", "in_bvec")]),
+ (estimate_noise_node, drop_outliers_from_eddy_report_node, [("sigma_path", "in_sigma")]),
+ (drop_outliers_fn_node, drop_outliers_from_eddy_report_node, [("out_bval", "in_bval")]),
+ (drop_outliers_from_eddy_report_node, apply_mask_node, [("out_file", "in_file")]),
+ (btr_node, apply_mask_node, [("mask_file", "mask_file")]),
+ (inputnode, denoise_node, [("sesdir", "sesdir"),
+ ('omp_nthreads', 'omp_nthreads'),
+ ("denoise_strategy", "denoise_strategy")]),
+ (drop_outliers_from_eddy_report_node, denoise_node, [("out_sigma", "sigma_path")]),
+ (btr_node, denoise_node, [("mask_file", "mask")]),
+ (make_gtab_node_final, denoise_node, [("gtab_file", "gtab_file")]),
+ (apply_mask_node, denoise_node, [("out_file", "in_file")]),
+ (drop_outliers_from_eddy_report_node, make_gtab_node_final, [("out_bvec", "fbvec")]),
+ (drop_outliers_from_eddy_report_node, make_gtab_node_final, [("out_bval", "fbval")]),
+ (inputnode, make_gtab_node_final, [("sesdir", "sesdir")]),
+ (make_gtab_node_final, outputnode, [("final_bvec_path", "final_bvec")]),
+ (inputnode, rename_final_preprocessed_file_node, [("sesdir", "sesdir")]),
+ (eddy_node, eddy_quad, [("out_rotated_bvecs", "bvec_file")]),
+ (eddy_node, make_basename_node, [("out_corrected", "out_corrected")]),
+ (make_basename_node, eddy_quad, [("base_name", "base_name")]),
+ (drop_outliers_fn_node, eddy_quad, [("out_bval", "bval_file")]),
+ (make_gtab_node_final, outputnode, [("final_bval_path", "final_bval")]),
+ (btr_node, eddy_quad, [("mask_file", "mask_file")]),
+ (get_eddy_inputs_node, eddy_quad, [("index_file", "idx_file")]),
+ (eddy_quad, outputnode, [('qc_json', 'out_eddy_quad_json'),
+ ('qc_pdf', 'out_eddy_quad_pdf')])
+ ])
+
+ if no_ants is True:
+ wf.connect([
+ (denoise_node, rename_final_preprocessed_file_node, [("denoised_file", "in_file")]),
+ (rename_final_preprocessed_file_node, outputnode, [("out_file", "preprocessed_data")])
+ ])
+ else:
+ from nipype.interfaces import ants
+ n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3, save_bias=True,
+ bspline_fitting_distance=600, bspline_order=4,
+ n_iterations=[50, 50, 40, 30], shrink_factor=2,
+ convergence_threshold=1e-6),
+ name='Bias_b0')
+ n4.inputs.num_threads = omp_nthreads
+ n4.n_procs = omp_nthreads
+ n4._mem_gb = omp_nthreads*4
+ n4.interface.n_procs = omp_nthreads
+ n4.interface.mem_gb = omp_nthreads*4
+
+ split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
+ split._mem_gb = 1
+ mult = pe.MapNode(fsl.MultiImageMaths(op_string='-div %s'), iterfield=['in_file'],
+ name='RemoveBiasOfDWIs')
+ mult._mem_gb = 1
+ thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegative')
+ thres._mem_gb = 1
+ merge = pe.Node(fsl.utils.Merge(dimension='t'), name='MergeDWIs')
+ merge._mem_gb = 1
+
+ wf.connect([
+ (denoise_node, split, [("denoised_file", "in_file")]),
+ (make_mean_b0_node, n4, [("mean_file_out", 'input_image')]),
+ (btr_node, n4, [("mask_file", 'mask_image')]),
+ (n4, mult, [('bias_image', 'operand_files')]),
+ (split, mult, [('out_files', 'in_file')]),
+ (mult, thres, [('out_file', 'in_file')]),
+ (thres, merge, [('out_file', 'in_files')]),
+ (merge, rename_final_preprocessed_file_node, [('merged_file', "in_file")]),
+ (rename_final_preprocessed_file_node, outputnode, [("out_file", "preprocessed_data")])
+ ])
+
+ # TODO: Flesh out the sdc-fieldmap option
+ # if sdc_method == 'fieldmap':
+ # wf.connect([
+ # (inputnode, eddy_node, [('fieldmap_file', 'field')])
+ # ])
+
+ if sdc_method == 'topup':
+ wf.connect([
+ (drop_outliers_fn_node, get_topup_inputs_node, [("out_file", "dwi_file")]),
+ (inputnode, get_topup_inputs_node, [("sesdir", "sesdir")]),
+ (correct_vecs_and_make_b0s_node, get_topup_inputs_node, [("b0_vols", "b0_vols"),
+ ("b0s", "b0s")]),
+ (extract_metadata_node, get_topup_inputs_node, [("spec_acqps", "spec_acqp"),
+ ("vol_legend", "vol_legend")]),
+ (get_topup_inputs_node, topup_node, [("datain_file", "encoding_file"),
+ ("imain_output", "in_file"),
+ ("topup_config", "config")]),
+ (topup_node, make_mean_b0_node, [("out_corrected", "in_file")]),
+ (topup_node, eddy_node, [("out_movpar", "in_topup_movpar"),
+ ("out_fieldcoef", "in_topup_fieldcoef")]),
+ (get_topup_inputs_node, eddy_node, [("datain_file", "in_acqp"), ("susceptibility_args", "args")]),
+ (topup_node, eddy_quad, [("out_field", "field")]),
+ (get_topup_inputs_node, eddy_quad, [("datain_file", "param_file")]),
+ ])
+
+ return wf
+
+
+def init_base_wf(
+ bids_dict,
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ work_dir
+):
+ import os
+ import nibabel as nib
+ from nipype.pipeline import engine as pe
+ from nipype.interfaces import utility as niu
+ from dmriprep.workflows.dwi.base import init_dwi_preproc_wf, wf_multi_session
+ from dmriprep.workflows.dwi.util import init_dwi_concat_wf
+ from dmriprep.utils import core
+
+ participant = list(bids_dict.keys())[0]
+ sessions = list(bids_dict[participant].keys())
+
+ # Multiple sessions case
+ if len(sessions) > 1:
+ wf = wf_multi_session(bids_dict,
+ participant,
+ sessions,
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ work_dir)
+ else:
+ # Single session case
+ session = sessions[0]
+ if len(bids_dict[participant][session].keys()) == 1:
+ dwi_file = bids_dict[participant][session][1]['dwi_file']
+ fbvec = bids_dict[participant][session][1]['fbvec']
+ fbval = bids_dict[participant][session][1]['fbval']
+ metadata = bids_dict[participant][session][1]['metadata']
+
+ dwi_img = nib.load(dwi_file)
+ if vox_size == '2mm':
+ res_factor = dwi_img.header.get_zooms()[1]/2
+ elif vox_size == '1mm':
+ res_factor = dwi_img.header.get_zooms()[1]/1
+ else:
+ res_factor = 1
+ exp_bytes = res_factor * 24 * dwi_img.shape[0] * dwi_img.shape[1] * dwi_img.shape[2] * dwi_img.shape[3]
+ eddy_mem_gb = core.bytesto(exp_bytes, to='g', bsize=1024)
+
+ wf = init_dwi_preproc_wf(participant,
+ session,
+ dwi_file,
+ fbval,
+ fbvec,
+ metadata,
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ eddy_mem_gb)
+ else:
+ # Multiple runs case
+ dwi_files = []
+ fbvecs = []
+ fbvals = []
+ metadata_files = []
+ for acq in bids_dict[participant][session].keys():
+ dwi_files.append(bids_dict[participant][session][acq]['dwi_file'])
+ fbvecs.append(bids_dict[participant][session][acq]['fbvec'])
+ fbvals.append(bids_dict[participant][session][acq]['fbval'])
+ metadata_files.append(bids_dict[participant][session][acq]['metadata'])
+
+ wf_multi_run_name = "%s%s%s%s" % ('wf_multi_run_', participant, '_', session)
+ wf = pe.Workflow(name=wf_multi_run_name)
+ wf.base_dir = work_dir + '/' + wf_multi_run_name
+ if not os.path.isdir(wf.base_dir):
+ os.mkdir(wf.base_dir)
+
+ dwi_img = nib.load(dwi_files[0])
+ if vox_size == '2mm':
+ res_factor = dwi_img.header.get_zooms()[1]/2
+ elif vox_size == '1mm':
+ res_factor = dwi_img.header.get_zooms()[1]/1
+ else:
+ res_factor = 1
+ exp_bytes = res_factor * 24 * dwi_img.shape[0] * dwi_img.shape[1] * dwi_img.shape[2] * dwi_img.shape[3]
+ eddy_mem_gb = core.bytesto(exp_bytes, to='g', bsize=1024)
+
+ meta_inputnode = pe.Node(niu.IdentityInterface(fields=["dwi_files",
+ "fbvecs",
+ "fbvals",
+ "metadata_files",
+ "sub",
+ "ses",
+ "output_dir",
+ "sdc_method",
+ "denoise_strategy",
+ "vox_size",
+ "outlier_thresh",
+ "omp_nthreads",
+ "eddy_mem_gb"]),
+ name='meta_inputnode')
+
+ meta_inputnode.inputs.dwi_files = dwi_files
+ meta_inputnode.inputs.fbvecs = fbvecs
+ meta_inputnode.inputs.fbvals = fbvals
+ meta_inputnode.inputs.metadata_files = metadata_files
+ meta_inputnode.inputs.sub = participant
+ meta_inputnode.inputs.ses = session
+ meta_inputnode.inputs.output_dir = output_dir
+ meta_inputnode.inputs.sdc_method = sdc_method
+ meta_inputnode.inputs.denoise_strategy = denoise_strategy
+ meta_inputnode.inputs.vox_size = vox_size
+ meta_inputnode.inputs.outlier_thresh = outlier_thresh
+ meta_inputnode.inputs.omp_nthreads = omp_nthreads
+ meta_inputnode.inputs.eddy_mem_gb = eddy_mem_gb
+
+ wf_concat = init_dwi_concat_wf(dwi_files, fbvals, fbvecs, metadata_files, participant, session, output_dir)
+
+ wf_dwi_preproc = init_dwi_preproc_wf(participant,
+ session,
+ dwi_files[0],
+ fbvals[0],
+ fbvecs[0],
+ metadata_files[0],
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ eddy_mem_gb)
+
+ meta_outputnode = pe.Node(
+ niu.IdentityInterface(fields=["preprocessed_data", "final_bvec", "final_bval",
+ "out_eddy_quad_json", "out_eddy_quad_pdf"]),
+ name="meta_outputnode",
+ )
+ wf.add_nodes([meta_inputnode])
+ wf.add_nodes([wf_concat])
+ wf.add_nodes([wf_dwi_preproc])
+ wf.add_nodes([meta_outputnode])
+
+ wf.connect([(meta_inputnode, wf.get_node('dwi_concat_wf').get_node('inputnode'),
+ [('dwi_files', 'dwi_files'),
+ ('fbvecs', 'fbvecs'),
+ ('fbvals', 'fbvals'),
+ ('metadata_files', 'metadata_files'),
+ ('sub', 'participant'),
+ ('ses', 'session'),
+ ('output_dir', 'output_dir')]),
+ (wf.get_node('dwi_concat_wf').get_node('outputnode'), wf_dwi_preproc.get_node('inputnode'),
+ [('dwi_file', 'dwi_file'),
+ ('bvec_file', 'fbvec'),
+ ('bval_file', 'fbval'),
+ ('metadata_file', 'metadata')]),
+ (meta_inputnode, wf_dwi_preproc.get_node('inputnode'),
+ [('sub', 'participant'),
+ ('ses', 'session'),
+ ('output_dir', 'output_dir'),
+ ('sdc_method', 'sdc_method'),
+ ('denoise_strategy', 'denoise_strategy'),
+ ('vox_size', 'vox_size'),
+ ('outlier_thresh', 'outlier_thresh'),
+ ('omp_nthreads', 'omp_nthreads'),
+ ('eddy_mem_gb', 'eddy_mem_gb')]),
+ (wf_dwi_preproc.get_node('outputnode'), meta_outputnode,
+ [("final_bval", "final_bval"),
+ ("preprocessed_data", "preprocessed_data"),
+ ("final_bvec", "final_bvec"),
+ ("out_eddy_quad_json", "out_eddy_quad_json"),
+ ("out_eddy_quad_pdf", "out_eddy_quad_pdf")])
+ ])
+
+ wf.get_node(wf_dwi_preproc.name).get_node('check_orient_and_dims_dwi_node')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('correct_vecs_and_make_b0s')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('bet_pre_moco')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('bet_pre_moco').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('apply_mask_pre_moco')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('apply_mask_pre_moco').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('fsl_split')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('fsl_split').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('coregistration')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('coregistration').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('get_motion_params')._mem_gb = 0.5
+ wf.get_node(wf_dwi_preproc.name).get_node('fsl_merge')._mem_gb = 2
+ wf.get_node(wf_dwi_preproc.name).get_node('fsl_merge').interface._mem_gb = 2
+ wf.get_node(wf_dwi_preproc.name).get_node('art')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('art').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('drop_outliers_fn')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('make_gtab')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('extract_metadata')._mem_gb = 0.5
+ wf.get_node(wf_dwi_preproc.name).get_node('check_shelled')._mem_gb = 0.5
+ wf.get_node(wf_dwi_preproc.name).get_node('get_topup_inputs')._mem_gb = 0.5
+ wf.get_node(wf_dwi_preproc.name).get_node('get_eddy_inputs')._mem_gb = 0.5
+ wf.get_node(wf_dwi_preproc.name).get_node('make_mean_b0')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('make_basename')._mem_gb = 0.5
+ wf.get_node(wf_dwi_preproc.name).get_node('eddy_quad')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('eddy_quad').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('make_gtab_final')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('apply_mask')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('apply_mask').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('id_outliers_from_eddy_report')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('drop_outliers_from_eddy_report')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('SplitDWIs')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('SplitDWIs').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('RemoveBiasOfDWIs')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('RemoveBiasOfDWIs').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('RemoveNegative')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('RemoveNegative').interface._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('MergeDWIs')._mem_gb = 1
+ wf.get_node(wf_dwi_preproc.name).get_node('MergeDWIs').interface._mem_gb = 1
+
+ wf.get_node(wf_dwi_preproc.name).get_node('topup')._n_procs = 8
+ wf.get_node(wf_dwi_preproc.name).get_node('topup')._mem_gb = 14
+ wf.get_node(wf_dwi_preproc.name).get_node('topup').interface.n_procs = 8
+ wf.get_node(wf_dwi_preproc.name).get_node('topup').interface.mem_gb = 14
+ wf.get_node(wf_dwi_preproc.name).get_node('Bias_b0')._n_procs = omp_nthreads
+ wf.get_node(wf_dwi_preproc.name).get_node('Bias_b0')._mem_gb = omp_nthreads*4
+ wf.get_node(wf_dwi_preproc.name).get_node('Bias_b0').interface.n_procs = omp_nthreads
+ wf.get_node(wf_dwi_preproc.name).get_node('Bias_b0').interface.mem_gb = omp_nthreads*4
+ wf.get_node(wf_dwi_preproc.name).get_node('eddy')._n_procs = omp_nthreads
+ wf.get_node(wf_dwi_preproc.name).get_node('eddy')._mem_gb = eddy_mem_gb
+ wf.get_node(wf_dwi_preproc.name).get_node('eddy').interface.n_procs = omp_nthreads
+ wf.get_node(wf_dwi_preproc.name).get_node('eddy').interface.mem_gb = eddy_mem_gb
+ wf.get_node(wf_dwi_preproc.name).get_node('suppress_gibbs')._n_procs = 6
+ wf.get_node(wf_dwi_preproc.name).get_node('suppress_gibbs')._mem_gb = 12
+ wf.get_node(wf_dwi_preproc.name).get_node('estimate_noise')._n_procs = omp_nthreads
+ wf.get_node(wf_dwi_preproc.name).get_node('estimate_noise')._mem_gb = omp_nthreads*2
+ wf.get_node(wf_dwi_preproc.name).get_node('denoise')._n_procs = omp_nthreads
+ if denoise_strategy == 'nlsam':
+ wf.get_node(wf_dwi_preproc.name).get_node('denoise')._mem_gb = omp_nthreads*6
+ else:
+ wf.get_node(wf_dwi_preproc.name).get_node('denoise')._mem_gb = omp_nthreads*4
+
+
+ cfg = dict(execution={'stop_on_first_crash': False, 'crashfile_format': 'txt', 'parameterize_dirs': True,
+ 'display_variable': ':0', 'job_finished_timeout': 120, 'matplotlib_backend': 'Agg',
+ 'plugin': 'MultiProc', 'use_relative_paths': True, 'remove_unnecessary_outputs': False,
+ 'remove_node_directories': False})
+ for key in cfg.keys():
+ for setting, value in cfg[key].items():
+ wf.config[key][setting] = value
+
+ return wf
+
+
+# Multi-session pipeline
+def wf_multi_session(bids_dict,
+ participant,
+ sessions,
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ work_dir):
+ """A function interface for generating multiple single-session workflows -- i.e. a 'multi-session' workflow"""
+ import os
+ import nibabel as nib
+ from nipype.pipeline import engine as pe
+ from nipype.interfaces import utility as niu
+ from dmriprep.workflows.dwi.base import init_dwi_preproc_wf
+ from dmriprep.workflows.dwi.util import init_dwi_concat_wf
+ from dmriprep.utils import core
+
+ wf_multi_session_name = "%s%s%s" % ('wf_multi_session_', participant, '_multi_session')
+ wf_multi = pe.Workflow(name=wf_multi_session_name)
+ wf_multi.base_dir = work_dir + '/' + wf_multi_session_name
+ if not os.path.isdir(wf_multi.base_dir):
+ os.mkdir(wf_multi.base_dir)
+
+ i = 0
+ if len(bids_dict[participant][sessions[0]].keys()) == 1:
+ for session in sessions:
+ dwi_file = bids_dict[participant][session][1]['dwi_file']
+ dwi_img = nib.load(dwi_file)
+ if vox_size == '2mm':
+ res_factor=dwi_img.header.get_zooms()[1]/2
+ elif vox_size == '1mm':
+ res_factor = dwi_img.header.get_zooms()[1]/1
+ else:
+ res_factor = 1
+ exp_bytes = res_factor * 24 * dwi_img.shape[0] * dwi_img.shape[1] * dwi_img.shape[2] * dwi_img.shape[3]
+ eddy_mem_gb = core.bytesto(exp_bytes, to='g', bsize=1024)
+ fbvec = bids_dict[participant][session][1]['fbvec']
+ fbval = bids_dict[participant][session][1]['fbval']
+ metadata = bids_dict[participant][session][1]['metadata']
+ wf = init_dwi_preproc_wf(participant,
+ session,
+ dwi_file,
+ fbval,
+ fbvec,
+ metadata,
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ eddy_mem_gb)
+ wf_multi.add_nodes([wf])
+ i = i + 1
+
+ wf_multi.get_node(wf.name).get_node('check_orient_and_dims_dwi_node')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('correct_vecs_and_make_b0s')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('bet_pre_moco')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('bet_pre_moco').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('apply_mask_pre_moco')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('apply_mask_pre_moco').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('fsl_split')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('fsl_split').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('coregistration')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('coregistration').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('get_motion_params')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node('fsl_merge')._mem_gb = 2
+ wf_multi.get_node(wf.name).get_node('fsl_merge').interface._mem_gb = 2
+ wf_multi.get_node(wf.name).get_node('art')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('art').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('drop_outliers_fn')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('make_gtab')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('extract_metadata')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node('check_shelled')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node('get_topup_inputs')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node('get_eddy_inputs')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node('make_mean_b0')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('make_basename')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node('eddy_quad')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('eddy_quad').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('make_gtab_final')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('apply_mask')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('apply_mask').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('id_outliers_from_eddy_report')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('drop_outliers_from_eddy_report')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('SplitDWIs')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('SplitDWIs').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('RemoveBiasOfDWIs')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('RemoveBiasOfDWIs').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('RemoveNegative')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('RemoveNegative').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('MergeDWIs')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node('MergeDWIs').interface._mem_gb = 1
+
+ wf_multi.get_node(wf.name).get_node('topup')._n_procs = 8
+ wf_multi.get_node(wf.name).get_node('topup')._mem_gb = 14
+ wf_multi.get_node(wf.name).get_node('topup').interface.n_procs = 8
+ wf_multi.get_node(wf.name).get_node('topup').interface.mem_gb = 14
+ wf_multi.get_node(wf.name).get_node('Bias_b0')._n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node('Bias_b0')._mem_gb = omp_nthreads*4
+ wf_multi.get_node(wf.name).get_node('Bias_b0').interface.n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node('Bias_b0').interface.mem_gb = omp_nthreads*4
+ wf_multi.get_node(wf.name).get_node('eddy')._n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node('eddy')._mem_gb = eddy_mem_gb
+ wf_multi.get_node(wf.name).get_node('eddy').interface.n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node('eddy').interface.mem_gb = eddy_mem_gb
+ wf_multi.get_node(wf.name).get_node('suppress_gibbs')._n_procs = 6
+ wf_multi.get_node(wf.name).get_node('suppress_gibbs')._mem_gb = 12
+ wf_multi.get_node(wf.name).get_node('estimate_noise')._n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node('estimate_noise')._mem_gb = omp_nthreads*2
+ wf_multi.get_node(wf.name).get_node('denoise')._n_procs = omp_nthreads
+ if denoise_strategy == 'nlsam':
+ wf_multi.get_node(wf.name).get_node('denoise')._mem_gb = omp_nthreads*6
+ else:
+ wf_multi.get_node(wf.name).get_node('denoise')._mem_gb = omp_nthreads*4
+
+ else:
+ for session in sessions:
+ dwi_files = []
+ fbvecs = []
+ fbvals = []
+ metadata_files = []
+ for acq in bids_dict[participant][session].keys():
+ dwi_files.append(bids_dict[participant][session][acq]['dwi_file'])
+ fbvecs.append(bids_dict[participant][session][acq]['fbvec'])
+ fbvals.append(bids_dict[participant][session][acq]['fbval'])
+ metadata_files.append(bids_dict[participant][session][acq]['metadata'])
+
+ wf_multi_run_name = "%s%s%s%s%s%s" % ('wf_multi_run_', participant, '_', session, '_', acq)
+ wf = pe.Workflow(name=wf_multi_run_name)
+ wf.base_dir = work_dir + '/' + wf_multi_run_name
+ if not os.path.isdir(wf.base_dir):
+ os.mkdir(wf.base_dir)
+
+ dwi_img = nib.load(dwi_files[0])
+ if vox_size == '2mm':
+ res_factor=dwi_img.header.get_zooms()[1]/2
+ elif vox_size == '1mm':
+ res_factor = dwi_img.header.get_zooms()[1]/1
+ else:
+ res_factor = 1
+ exp_bytes = res_factor * 24 * dwi_img.shape[0] * dwi_img.shape[1] * dwi_img.shape[2] * dwi_img.shape[3]
+ eddy_mem_gb = core.bytesto(exp_bytes, to='g', bsize=1024)
+
+ meta_inputnode = pe.Node(niu.IdentityInterface(fields=["dwi_files",
+ "fbvecs",
+ "fbvals",
+ "metadata_files",
+ "sub",
+ "ses",
+ "output_dir",
+ "sdc_method",
+ "denoise_strategy",
+ "vox_size",
+ "outlier_thresh",
+ "omp_nthreads",
+ "eddy_mem_gb"]),
+ name='meta_inputnode')
+
+ meta_inputnode.inputs.dwi_files = dwi_files
+ meta_inputnode.inputs.fbvecs = fbvecs
+ meta_inputnode.inputs.fbvals = fbvals
+ meta_inputnode.inputs.metadata_files = metadata_files
+ meta_inputnode.inputs.sub = participant
+ meta_inputnode.inputs.ses = session
+ meta_inputnode.inputs.output_dir = output_dir
+ meta_inputnode.inputs.sdc_method = sdc_method
+ meta_inputnode.inputs.denoise_strategy = denoise_strategy
+ meta_inputnode.inputs.vox_size = vox_size
+ meta_inputnode.inputs.outlier_thresh = outlier_thresh
+ meta_inputnode.inputs.omp_nthreads = omp_nthreads
+ meta_inputnode.inputs.eddy_mem_gb = eddy_mem_gb
+
+ wf_concat = init_dwi_concat_wf(dwi_files, fbvals, fbvecs, metadata_files, participant, session, output_dir)
+
+ wf_dwi_preproc = init_dwi_preproc_wf(participant,
+ session,
+ dwi_files[0],
+ fbvals[0],
+ fbvecs[0],
+ metadata_files[0],
+ output_dir,
+ sdc_method,
+ denoise_strategy,
+ vox_size,
+ outlier_thresh,
+ omp_nthreads,
+ eddy_mem_gb)
+
+ meta_outputnode = pe.Node(
+ niu.IdentityInterface(fields=["preprocessed_data", "final_bvec", "final_bval",
+ "out_eddy_quad_json", "out_eddy_quad_pdf"]),
+ name="meta_outputnode",
+ )
+ wf.add_nodes([meta_inputnode])
+ wf.add_nodes([wf_concat])
+ wf.add_nodes([wf_dwi_preproc])
+ wf.add_nodes([meta_outputnode])
+
+ wf.connect([(meta_inputnode, wf.get_node('dwi_concat_wf').get_node('inputnode'),
+ [('dwi_files', 'dwi_files'),
+ ('fbvecs', 'fbvecs'),
+ ('fbvals', 'fbvals'),
+ ('metadata_files', 'metadata_files'),
+ ('sub', 'participant'),
+ ('ses', 'session'),
+ ('output_dir', 'output_dir')]),
+ (wf.get_node('dwi_concat_wf').get_node('outputnode'), wf_dwi_preproc.get_node('inputnode'),
+ [('dwi_file', 'dwi_file'),
+ ('bvec_file', 'fbvec'),
+ ('bval_file', 'fbval'),
+ ('metadata_file', 'metadata')]),
+ (meta_inputnode, wf_dwi_preproc.get_node('inputnode'),
+ [('sub', 'participant'),
+ ('ses', 'session'),
+ ('output_dir', 'output_dir'),
+ ('sdc_method', 'sdc_method'),
+ ('denoise_strategy', 'denoise_strategy'),
+ ('vox_size', 'vox_size'),
+ ('outlier_thresh', 'outlier_thresh'),
+ ('omp_nthreads', 'omp_nthreads'),
+ ('eddy_mem_gb', 'eddy_mem_gb')]),
+ (wf_dwi_preproc.get_node('outputnode'), meta_outputnode,
+ [("final_bval", "final_bval"),
+ ("preprocessed_data", "preprocessed_data"),
+ ("final_bvec", "final_bvec"),
+ ("out_eddy_quad_json", "out_eddy_quad_json"),
+ ("out_eddy_quad_pdf", "out_eddy_quad_pdf")])
+ ])
+
+ wf_multi.add_nodes([wf])
+ i = i + 1
+
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('check_orient_and_dims_dwi_node')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('correct_vecs_and_make_b0s')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('bet_pre_moco')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('bet_pre_moco').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('apply_mask_pre_moco')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('apply_mask_pre_moco').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('fsl_split')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('fsl_split').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('coregistration')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('coregistration').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('get_motion_params')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('fsl_merge')._mem_gb = 2
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('fsl_merge').interface._mem_gb = 2
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('art')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('art').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('drop_outliers_fn')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('make_gtab')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('extract_metadata')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('check_shelled')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('get_topup_inputs')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('get_eddy_inputs')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('make_mean_b0')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('make_basename')._mem_gb = 0.5
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('eddy_quad')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('eddy_quad').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('make_gtab_final')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('apply_mask')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('apply_mask').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('id_outliers_from_eddy_report')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('drop_outliers_from_eddy_report')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('SplitDWIs')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('SplitDWIs').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('RemoveBiasOfDWIs')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('RemoveBiasOfDWIs').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('RemoveNegative')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('RemoveNegative').interface._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('MergeDWIs')._mem_gb = 1
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('MergeDWIs').interface._mem_gb = 1
+
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('topup')._n_procs = 8
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('topup')._mem_gb = 14
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('topup').interface.n_procs = 8
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('topup').interface.mem_gb = 14
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('Bias_b0')._n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('Bias_b0')._mem_gb = omp_nthreads*4
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('Bias_b0').interface.n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('Bias_b0').interface.mem_gb = omp_nthreads*4
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('eddy')._n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('eddy')._mem_gb = eddy_mem_gb
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('eddy').interface.n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('eddy').interface.mem_gb = eddy_mem_gb
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('suppress_gibbs')._n_procs = 6
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('suppress_gibbs')._mem_gb = 12
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('estimate_noise')._n_procs = omp_nthreads
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('estimate_noise')._mem_gb = omp_nthreads*2
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('denoise')._n_procs = omp_nthreads
+ if denoise_strategy == 'nlsam':
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('denoise')._mem_gb = omp_nthreads*6
+ else:
+ wf_multi.get_node(wf.name).get_node(wf_dwi_preproc.name).get_node('denoise')._mem_gb = omp_nthreads*4
+
+ cfg = dict(execution={'stop_on_first_crash': False, 'crashfile_format': 'txt', 'parameterize_dirs': True,
+ 'display_variable': ':0', 'job_finished_timeout': 120, 'matplotlib_backend': 'Agg',
+ 'plugin': 'MultiProc', 'use_relative_paths': True, 'remove_unnecessary_outputs': False,
+ 'remove_node_directories': False})
+ for key in cfg.keys():
+ for setting, value in cfg[key].items():
+ wf_multi.config[key][setting] = value
+
+ return wf_multi
diff --git a/dmriprep/workflows/dwi/util.py b/dmriprep/workflows/dwi/util.py
new file mode 100644
index 00000000..8893471d
--- /dev/null
+++ b/dmriprep/workflows/dwi/util.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+
+"""
+Utility workflows
+^^^^^^^^^^^^^^^^^
+
+.. autofunction:: init_dwi_concat_wf
+
+"""
+import os
+from nipype.pipeline import engine as pe
+from nipype.interfaces import utility as niu
+
+
+def init_dwi_concat_wf(dwi_files, fbvals, fbvecs, metadata_files, sub, ses, out_dir):
+ """
+ This workflow concatenates a list of dwi images as well as their associated
+ bvecs and bvals.
+
+ .. workflow::
+ :graph2use: orig
+ :simple_form: yes
+
+ from dmriprep.workflows.dwi import init_dwi_concat_wf
+ wf = init_dwi_concat_wf(ref_file='/madeup/path/sub-01_dwi.nii.gz')
+
+ **Inputs**
+
+ dwis : :obj:`list`
+ list of dwi NIfTI files
+ fbvecs : :obj:`list`
+ list of associated bvec files
+ fbvals : :obj:`list`
+ list of associated bval files
+
+ **Outputs**
+
+ dwi_file : :obj:`str`
+ concatenated dwi NIfTI file
+ bvec_file
+ concatenated bvec file
+ bval_file
+ concatenated bval file
+
+ """
+ import_list = [
+ "import sys",
+ "import os",
+ "import numpy as np",
+ "import nibabel as nib",
+ "import warnings",
+ 'warnings.filterwarnings("ignore")',
+ ]
+
+ subdir = "%s%s%s" % (out_dir, "/", sub)
+ if not os.path.isdir(subdir):
+ os.mkdir(subdir)
+ sesdir = "%s%s%s%s%s" % (out_dir, "/", sub, "/ses-", ses)
+ if not os.path.isdir(sesdir):
+ os.mkdir(sesdir)
+
+ wf = pe.Workflow(name='dwi_concat_wf')
+ wf.base_dir = '/tmp'
+
+ inputnode = pe.Node(niu.IdentityInterface(fields=['dwi_list',
+ 'bvec_list',
+ 'bval_list',
+ 'metadata_list',
+ 'sesdir']),
+ name='inputnode')
+
+ inputnode.inputs.bval_list = fbvals
+ inputnode.inputs.bvec_list = fbvecs
+ inputnode.inputs.dwi_list = dwi_files
+ inputnode.inputs.metadata_list = metadata_files
+ inputnode.inputs.sesdir = sesdir
+
+ outputnode = pe.Node(niu.IdentityInterface(fields=['dwi_file',
+ 'bvec_file',
+ 'bval_file',
+ 'metadata_file']),
+ name='outputnode')
+
+ def concat_dwis(sesdir, dwi_list):
+ import nibabel as nib
+ import numpy as np
+ import os
+ from nilearn.image import concat_imgs
+
+ out_file = sesdir + '/' + os.path.basename(dwi_list[0]).split('_acq')[0] + '_concat.nii.gz'
+
+ dwi_data = [nib.load(dwi_file) for dwi_file in dwi_list]
+
+ new_nii = concat_imgs(dwi_data)
+
+ hdr = dwi_data[0].header.copy()
+ hdr.set_data_shape(new_nii.shape)
+ hdr.set_xyzt_units('mm')
+ hdr.set_data_dtype(np.float32)
+ nib.Nifti1Image(new_nii.get_data(), dwi_data[0].affine, hdr).to_filename(out_file)
+ return out_file
+
+ concat_dwis = pe.Node(
+ niu.Function(
+ input_names=['sesdir', 'dwi_list'],
+ output_names=['out_file'],
+ function=concat_dwis
+ ),
+ imports=import_list,
+ name='concat_dwis')
+
+ def concat_bvecs(sesdir, bvec_list, bvec_norm_epsilon=0.1):
+ import numpy as np
+ import os
+ out_file = sesdir + '/' + os.path.basename(bvec_list[0]).split('_acq')[0] + '_concat.bvec'
+
+ bvec_vals = []
+ for bvec in bvec_list:
+ bvecs = np.genfromtxt(bvec)
+ b0s = np.linalg.norm(bvecs, axis=1) < bvec_norm_epsilon
+ bvecs[~b0s] /= np.linalg.norm(bvecs[~b0s], axis=1)[..., np.newaxis]
+ bvec_vals.append(bvecs)
+ np.savetxt(out_file,
+ np.concatenate(bvec_vals, axis=1),
+ fmt='%.4f',
+ delimiter=' ')
+ return out_file
+
+ concat_bvecs = pe.Node(
+ niu.Function(
+ input_names=['sesdir', 'bvec_list'],
+ output_names=['out_file'],
+ function=concat_bvecs
+ ),
+ imports=import_list,
+ name='concat_bvecs')
+
+ def concat_bvals(sesdir, bval_list):
+ import numpy as np
+ import os
+ out_file = sesdir + '/' + os.path.basename(bval_list[0]).split('_acq')[0] + '_concat.bval'
+
+ bval_vals = []
+ for bval in bval_list:
+ bval_vals.append(np.genfromtxt(bval))
+ np.savetxt(out_file,
+ np.concatenate((bval_vals), axis=0),
+ fmt='%i',
+ delimiter=' ',
+ newline=' ')
+ return out_file
+
+ concat_bvals = pe.Node(
+ niu.Function(
+ input_names=['sesdir', 'bval_list'],
+ output_names=['out_file'],
+ function=concat_bvals
+ ),
+ imports=import_list,
+ name='concat_bvals')
+
+ def summarize_metadata(sesdir, metadata_list, bval_list):
+ import numpy as np
+ import json
+ import os
+ out_file = sesdir + '/' + os.path.basename(metadata_list[0]).split('_acq')[0] + '_metadata_concat_summary.json'
+
+ bval_vals = []
+ for bval in bval_list:
+ bval_vals.append(np.genfromtxt(bval).astype('bool').astype('int').tolist())
+
+ bval_config = [sum(bval) for bval in bval_vals]
+
+ metadata_dicts = []
+ i = 1
+ for json_file in metadata_list:
+ with open(json_file, "r") as f:
+ metadata_dict = json.load(f)
+ for param in list(metadata_dict.keys()):
+ metadata_dict[param + '_' + str(i)] = metadata_dict.pop(param)
+ metadata_dicts.append(metadata_dict)
+ i = i + 1
+
+ metadata_megadict = dict(metadata_dicts[0])
+ for dic in metadata_dicts:
+ metadata_megadict.update(dic)
+
+ metadata_megadict['vol_legend'] = bval_config
+
+ with open(out_file, 'w') as json_file:
+ json.dump(metadata_megadict, json_file)
+
+ return out_file
+
+ summarize_metadata = pe.Node(
+ niu.Function(
+ input_names=['sesdir', 'metadata_list', 'bval_list'],
+ output_names=['out_file'],
+ function=summarize_metadata
+ ),
+ imports=import_list,
+ name='summarize_metadata')
+
+ wf.connect([
+ (inputnode, concat_dwis, [('sesdir', 'sesdir'),
+ ('dwi_list', 'dwi_list')]),
+ (inputnode, concat_bvecs, [('sesdir', 'sesdir'),
+ ('bvec_list', 'bvec_list')]),
+ (inputnode, concat_bvals, [('sesdir', 'sesdir'),
+ ('bval_list', 'bval_list')]),
+ (inputnode, summarize_metadata, [('sesdir', 'sesdir'),
+ ('metadata_list', 'metadata_list'),
+ ('bval_list', 'bval_list')]),
+ (concat_dwis, outputnode, [('out_file', 'dwi_file')]),
+ (concat_bvecs, outputnode, [('out_file', 'bvec_file')]),
+ (concat_bvals, outputnode, [('out_file', 'bval_file')]),
+ (summarize_metadata, outputnode, [('out_file', 'metadata_file')])
+ ])
+
+ return wf
diff --git a/dmriprep/workflows/fieldmap/__init__.py b/dmriprep/workflows/fieldmap/__init__.py
new file mode 100644
index 00000000..6683dfb4
--- /dev/null
+++ b/dmriprep/workflows/fieldmap/__init__.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+from .base import init_sdc_wf
+from .pepolar import init_pepolar_wf
+from .fmap import init_fmap_wf
+from .phasediff import init_phase_wf, init_phdiff_wf
+from .brainsuite import init_brainsuite_wf
diff --git a/dmriprep/workflows/fieldmap/base.py b/dmriprep/workflows/fieldmap/base.py
new file mode 100644
index 00000000..604f184d
--- /dev/null
+++ b/dmriprep/workflows/fieldmap/base.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+
+from nipype.pipeline import engine as pe
+from nipype.interfaces import utility as niu
+
+FMAP_PRIORITY = {'epi': 0, 'fieldmap': 1, 'phasediff': 2, 'phase': 3, 'syn': 4}
+
+
+def init_sdc_wf(
+ subject_id,
+ fmaps,
+ metadata,
+ layout,
+ bet_mag
+ ):
+
+ sdc_wf = pe.Workflow(name='sdc_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(fields=['b0_stripped']), name='inputnode')
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=[
+ 'out_fmap',
+ 'out_topup',
+ 'bold_ref',
+ 'bold_mask',
+ 'bold_ref_brain',
+ 'out_warp',
+ 'syn_bold_ref',
+ 'method',
+ 'out_movpar',
+ 'out_enc_file'
+ ]
+ ),
+ name='outputnode',
+ )
+ # if synb0:
+ # from .pepolar import init_synb0_wf
+ #
+ # synb0_wf = init_synb0_wf(subject_id, metadata, synb0)
+ #
+ # sdc_prep_wf.connect(
+ # [
+ # (
+ # inputnode,
+ # synb0_wf,
+ # [('b0_stripped', 'inputnode.b0_stripped')],
+ # ),
+ # (
+ # synb0_wf,
+ # outputnode,
+ # [
+ # ('outputnode.out_topup', 'out_topup'),
+ # ('outputnode.out_movpar', 'out_movpar'),
+ # ('outputnode.out_enc_file', 'out_enc_file'),
+ # ('outputnode.out_fmap', 'out_fmap'),
+ # ],
+ # ),
+ # ]
+ # )
+ # else:
+ fmaps.sort(key=lambda fmap: FMAP_PRIORITY[fmap['suffix']])
+ try:
+ fmap = fmaps[0]
+ except:
+ return
+
+ if fmap['suffix'] == 'epi':
+ from .pepolar import init_pepolar_wf
+
+ epi_fmaps = [
+ (fmap_['epi'], fmap_['metadata']['PhaseEncodingDirection'])
+ for fmap_ in fmaps
+ if fmap_['suffix'] == 'epi'
+ ]
+
+ pepolar_wf = init_pepolar_wf(subject_id, metadata, epi_fmaps)
+
+ sdc_wf.connect([
+ (inputnode, pepolar_wf, [('b0_stripped', 'inputnode.b0_stripped')]),
+ (pepolar_wf, outputnode, [('outputnode.out_topup', 'out_topup'),
+ ('outputnode.out_movpar', 'out_movpar'),
+ ('outputnode.out_enc_file', 'out_enc_file'),
+ ('outputnode.out_fmap', 'out_fmap')])
+ ])
+
+ elif fmap['suffix'] == 'fieldmap':
+ from .fmap import init_fmap_wf
+
+ fmap_wf = init_fmap_wf(bet_mag)
+ fmap_wf.inputs.inputnode.fieldmap = fmap['fieldmap']
+ fmap_wf.inputs.inputnode.magnitude = fmap['magnitude']
+
+ sdc_wf.connect([
+ (inputnode, fmap_wf, [('b0_stripped', 'inputnode.b0_stripped')]),
+ (fmap_wf, outputnode, [('outputnode.out_fmap', 'out_fmap')])
+ ])
+
+ elif fmap['suffix'] in ('phasediff', 'phase'):
+ from .phasediff import init_phase_wf, init_phdiff_wf
+ from .fmap import init_fmap_wf
+
+ if fmap['suffix'] == 'phasediff':
+ phase_wf = init_phdiff_wf(bet_mag)
+ phase_wf.inputs.inputnode.phasediff = fmap['phasediff']
+ phase_wf.inputs.inputnode.magnitude1 = [
+ fmap_
+ for key, fmap_ in sorted(fmap.items())
+ if key.startswith('magnitude1')
+ ][0]
+ phase_wf.inputs.inputnode.phases_meta = layout.get_metadata(
+ phase_wf.inputs.inputnode.phasediff)
+
+ fmap_wf = init_fmap_wf(bet_mag)
+
+ sdc_wf.connect([
+ (inputnode, fmap_wf, [('b0_stripped', 'inputnode.b0_stripped')]),
+ (phase_wf, fmap_wf, [('outputnode.out_fmap', 'inputnode.fieldmap')]),
+ (phase_wf, fmap_wf, [('outputnode.out_mag', 'inputnode.magnitude')]),
+ (fmap_wf, outputnode, [('outputnode.out_fmap', 'out_fmap')])
+ ])
+
+ elif fmap['suffix'] == 'phase':
+ phase_wf = init_phase_wf(bet_mag)
+ phase_wf.inputs.inputnode.phasediff = [
+ fmap['phase1'],
+ fmap['phase2']
+ ]
+ phase_wf.inputs.inputnode.magnitude1 = [
+ fmap_
+ for key, fmap_ in sorted(fmap.items())
+ if key.startswith('magnitude1')
+ ][0]
+ phase_wf.inputs.inputnode.phases_meta = [
+ layout.get_metadata(i)
+ for i in phase_wf.inputs.inputnode.phasediff
+ ]
+
+ fmap_wf = init_fmap_wf()
+
+ sdc_wf.connect([
+ (inputnode, fmap_wf, [('b0_stripped', 'inputnode.b0_stripped')]),
+ (phase_wf, fmap_wf, [('outputnode.out_fmap', 'inputnode.fieldmap')]),
+ (phase_wf, fmap_wf, [('outputnode.out_mag', 'inputnode.magnitude')]),
+ (fmap_wf, outputnode, [('outputnode.out_fmap', 'out_fmap')])
+ ])
+ else:
+ print('No sdc correction')
+ return sdc_wf
diff --git a/dmriprep/workflows/fieldmap/fmap.py b/dmriprep/workflows/fieldmap/fmap.py
new file mode 100644
index 00000000..84443d01
--- /dev/null
+++ b/dmriprep/workflows/fieldmap/fmap.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+from nipype.pipeline import engine as pe
+from nipype.interfaces import fsl, utility as niu
+
+
+def init_fmap_wf(bet_mag):
+
+ wf = pe.Workflow(name='fmap_prep_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(fields=['fieldmap', 'magnitude', 'b0_stripped']),
+ name='inputnode',
+ )
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(fields=['out_fmap']), name='outputnode'
+ )
+
+ rad_to_hz = pe.Node(
+ fsl.BinaryMaths(operation='div', operand_value=6.28), name='radToHz'
+ )
+
+ mag_bet = pe.Node(fsl.BET(frac=bet_mag, mask=True, robust=True), name='mag_bet')
+
+ mag_flirt = pe.Node(fsl.FLIRT(dof=6), name='magFlirt')
+
+ fmap_flirt = pe.Node(fsl.FLIRT(apply_xfm=True), name='fmapFlirt')
+
+ wf.connect(
+ [
+ (inputnode, rad_to_hz, [('fieldmap', 'in_file')]),
+ (inputnode, mag_bet, [('magnitude', 'in_file')]),
+ (mag_bet, mag_flirt, [('out_file', 'in_file')]),
+ (inputnode, mag_flirt, [('b0_stripped', 'reference')]),
+ (rad_to_hz, fmap_flirt, [('out_file', 'in_file')]),
+ (inputnode, fmap_flirt, [('b0_stripped', 'reference')]),
+ (mag_flirt, fmap_flirt, [('out_matrix_file', 'in_matrix_file')]),
+ (fmap_flirt, outputnode, [('out_file', 'out_fmap')]),
+ ]
+ )
+
+ return wf
+
diff --git a/dmriprep/workflows/fieldmap/pepolar.py b/dmriprep/workflows/fieldmap/pepolar.py
new file mode 100644
index 00000000..03788b97
--- /dev/null
+++ b/dmriprep/workflows/fieldmap/pepolar.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+
+from nipype.pipeline import engine as pe
+from nipype.interfaces import fsl, utility as niu
+
+
+def init_pepolar_wf(subject_id, dwi_meta, epi_fmaps):
+
+ dwi_file_pe = dwi_meta['PhaseEncodingDirection']
+
+ file2dir = dict()
+
+ usable_fieldmaps_matching_pe = []
+ usable_fieldmaps_opposite_pe = []
+
+ for fmap, pe_dir in epi_fmaps:
+ if pe_dir == dwi_file_pe:
+ usable_fieldmaps_matching_pe.append(fmap)
+ file2dir[fmap] = pe_dir
+ elif pe_dir[0] == dwi_file_pe[0]:
+ usable_fieldmaps_opposite_pe.append(fmap)
+ file2dir[fmap] = pe_dir
+
+ if not usable_fieldmaps_opposite_pe:
+ raise Exception(
+ 'None of the discovered fieldmaps for '
+ 'participant {} has the right phase '
+ 'encoding direction'.format(subject_id)
+ )
+
+ wf = pe.Workflow(name='pepolar_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(fields=['b0_stripped']), name='inputnode'
+ )
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=['out_topup', 'out_movpar', 'out_fmap', 'out_enc_file']
+ ),
+ name='outputnode',
+ )
+
+ topup_wf = init_topup_wf()
+ topup_wf.inputs.inputnode.altepi_file = usable_fieldmaps_opposite_pe[0]
+ wf.add_nodes([inputnode])
+
+ if not usable_fieldmaps_matching_pe:
+ wf.connect(
+ [(inputnode, topup_wf, [('b0_stripped', 'inputnode.epi_file')])]
+ )
+ else:
+ topup_wf.inputs.inputnode.epi_file = usable_fieldmaps_matching_pe[0]
+
+ epi_list = [
+ topup_wf.inputs.inputnode.epi_file,
+ topup_wf.inputs.inputnode.altepi_file,
+ ]
+ dir_map = {
+ 'i': 'x',
+ 'i-': 'x-',
+ 'j': 'y',
+ 'j-': 'y-',
+ 'k': 'z',
+ 'k-': 'z-',
+ }
+ topup_wf.inputs.inputnode.encoding_directions = [
+ dir_map[file2dir[file]] for file in epi_list
+ ]
+
+ wf.connect(
+ [
+ (
+ topup_wf,
+ outputnode,
+ [
+ ('outputnode.out_fmap', 'out_fmap'),
+ ('outputnode.out_movpar', 'out_movpar'),
+ ('outputnode.out_base', 'out_topup'),
+ ('outputnode.out_enc_file', 'out_enc_file'),
+ ],
+ )
+ ]
+ )
+
+ return wf
+
+
+def init_synb0_wf(subject_id, dwi_meta, synb0):
+ dwi_file_pe = dwi_meta['PhaseEncodingDirection']
+
+ file2dir = dict()
+
+ usable_fieldmaps_matching_pe = []
+ usable_fieldmaps_opposite_pe = []
+
+ wf = pe.Workflow(name='synb0_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(fields=['b0_stripped']), name='inputnode'
+ )
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=['out_topup', 'out_movpar', 'out_fmap', 'out_enc_file']
+ ),
+ name='outputnode',
+ )
+
+ topup_wf = init_topup_wf(use_acqp=True)
+ topup_wf.inputs.inputnode.altepi_file = synb0
+ wf.add_nodes([inputnode])
+ wf.connect(
+ [(inputnode, topup_wf, [('b0_stripped', 'inputnode.epi_file')])]
+ )
+ topup_wf.inputs.inputnode.acqp = '/scratch/smansour/synb0_acqp.txt'
+
+ wf.connect(
+ [
+ (
+ topup_wf,
+ outputnode,
+ [
+ ('outputnode.out_fmap', 'out_fmap'),
+ ('outputnode.out_movpar', 'out_movpar'),
+ ('outputnode.out_base', 'out_topup'),
+ ],
+ )
+ ]
+ )
+
+ return wf
+
+
+def init_topup_wf(use_acqp=False):
+
+ wf = pe.Workflow(name='topup_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=[
+ 'epi_file',
+ 'altepi_file',
+ 'encoding_directions',
+ 'topup_name',
+ 'acqp',
+ ]
+ ),
+ name='inputnode',
+ )
+ inputnode.inputs.topup_name = 'topup_base'
+
+ epi_flirt = pe.Node(fsl.FLIRT(), name='epi_flirt')
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=['out_fmap', 'out_movpar', 'out_base', 'out_enc_file']
+ ),
+ name='outputnode',
+ )
+
+ list_merge = pe.Node(niu.Merge(numinputs=2), name='list_merge')
+
+ merge = pe.Node(fsl.Merge(dimension='t'), name='mergeAPPA')
+
+ topup = pe.Node(fsl.TOPUP(), name='topup')
+
+ get_base_movpar = lambda x: x.split('_movpar.txt')[0]
+
+ if use_acqp:
+ wf.connect([(inputnode, topup, [('acqp', 'encoding_file')])])
+ else:
+ topup.inputs.readout_times = [0.05, 0.05]
+ wf.connect(
+ [
+ (
+ inputnode,
+ topup,
+ [('encoding_directions', 'encoding_direction')],
+ )
+ ]
+ )
+
+ wf.connect(
+ [
+ (inputnode, list_merge, [('epi_file', 'in1')]),
+ (
+ inputnode,
+ epi_flirt,
+ [('altepi_file', 'in_file'), ('epi_file', 'reference')],
+ ),
+ (epi_flirt, list_merge, [('out_file', 'in2')]),
+ (list_merge, merge, [('out', 'in_files')]),
+ # (merge, resize, [('merged_file', 'in_file')]),
+ # (resize, topup, [('out_file', 'in_file')]),
+ (merge, topup, [('merged_file', 'in_file')]),
+ (
+ topup,
+ outputnode,
+ [
+ ('out_field', 'out_fmap'),
+ ('out_movpar', 'out_movpar'),
+ ('out_enc_file', 'out_enc_file'),
+ (('out_movpar', get_base_movpar), 'out_base'),
+ ],
+ ),
+ ]
+ )
+
+ return wf
diff --git a/dmriprep/workflows/fieldmap/phasediff.py b/dmriprep/workflows/fieldmap/phasediff.py
new file mode 100644
index 00000000..04c3af33
--- /dev/null
+++ b/dmriprep/workflows/fieldmap/phasediff.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+
+from nipype.pipeline import engine as pe
+from nipype.interfaces import fsl, ants, utility as niu
+from ...interfaces.fmap import Phases2Fieldmap
+
+
+def init_phase_wf(bet_mag):
+
+ wf = pe.Workflow(name='phase_prep_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=['magnitude1', 'phasediff', 'b0_stripped', 'phases_meta']),
+ name='inputnode')
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(fields=['out_fmap', 'out_mag']),
+ name='outputnode')
+
+ phases2fmap = pe.Node(Phases2Fieldmap(), name='phases2fmap')
+
+ phdiff_wf = init_phdiff_wf(bet_mag)
+
+ wf.connect([
+ (inputnode, phases2fmap, [('phases_meta', 'metadatas')]),
+ (inputnode, phases2fmap, [('phasediff', 'phase_files')]),
+ (inputnode, phdiff_wf, [('magnitude1', 'inputnode.magnitude1'),
+ ('phases_meta', 'inputnode.phases_meta')]),
+ (phases2fmap, phdiff_wf, [('out_file', 'inputnode.phasediff')]),
+ (phdiff_wf, outputnode, [('outputnode.out_fmap', 'out_fmap'),
+ ('outputnode.out_mag', 'out_mag')])
+ ])
+
+ return wf
+
+
+def init_phdiff_wf(bet_mag):
+
+ wf = pe.Workflow(name='phdiff_prep_wf')
+
+ inputnode = pe.Node(
+ niu.IdentityInterface(
+ fields=['magnitude1', 'phasediff', 'phases_meta']),
+ name='inputnode')
+
+ outputnode = pe.Node(
+ niu.IdentityInterface(fields=['out_fmap', 'out_mag']),
+ name='outputnode')
+
+ n4_correct = pe.Node(
+ ants.N4BiasFieldCorrection(dimension=3, copy_header=True),
+ name='n4_correct')
+
+ mag_bet = pe.Node(
+ fsl.BET(frac=bet_mag, robust=True, mask=True), name='mag_bet')
+
+ prep_fmap = pe.Node(
+ fsl.PrepareFieldmap(scanner='SIEMENS'), name='prep_fmap')
+
+ fslroi = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='fslroi_phase')
+
+ delta = pe.Node(
+ niu.Function(
+ input_names=['in_values'],
+ output_names=['out_value'],
+ function=delta_te),
+ name='delta')
+
+ wf.connect([
+ (inputnode, n4_correct, [('magnitude1', 'input_image')]),
+ (n4_correct, mag_bet, [('output_image', 'in_file')]),
+ (inputnode, delta, [('phases_meta', 'in_values')]),
+ (mag_bet, prep_fmap, [('out_file', 'in_magnitude')]),
+ (inputnode, prep_fmap, [('phasediff', 'in_phase')]),
+ (delta, prep_fmap, [('out_value', 'delta_TE')]),
+ (prep_fmap, fslroi, [('out_fieldmap', 'in_file')]),
+ (fslroi, outputnode, [('roi_file', 'out_fmap')]),
+ (mag_bet, outputnode, [('out_file', 'out_mag')])
+ ])
+
+ return wf
+
+
+def delta_te(in_values, te1=None, te2=None):
+ """
+ Read :math:`\Delta_\text{TE}` from BIDS metadata dict
+ """
+ if isinstance(in_values, float):
+ te2 = in_values
+ te1 = 0.0
+
+ if isinstance(in_values, dict):
+ te1 = in_values.get('EchoTime1')
+ te2 = in_values.get('EchoTime2')
+
+ if not all((te1, te2)):
+ te2 = in_values.get('EchoTimeDifference')
+ te1 = 0
+
+ if isinstance(in_values, list):
+ te2, te1 = in_values
+ if isinstance(te1, list):
+ te1 = te1[1]
+ if isinstance(te2, list):
+ te2 = te2[1]
+
+ # For convienience if both are missing we should give one error about them
+ if te1 is None and te2 is None:
+ raise RuntimeError(
+ 'EchoTime1 and EchoTime2 metadata fields not found. '
+ 'Please consult the BIDS specification.'
+ )
+ if te1 is None:
+ raise RuntimeError(
+ 'EchoTime1 metadata field not found. Please consult the BIDS specification.'
+ )
+ if te2 is None:
+ raise RuntimeError(
+ 'EchoTime2 metadata field not found. Please consult the BIDS specification.'
+ )
+
+ return 1000 * abs(float(te2) - float(te1))
diff --git a/docker/files/neurodebian.gpg b/docker/files/neurodebian.gpg
new file mode 100644
index 00000000..c546d45d
--- /dev/null
+++ b/docker/files/neurodebian.gpg
@@ -0,0 +1,71 @@
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mQGiBEQ7TOgRBADvaRsIZ3VZ6Qy7PlDpdMm97m0OfvouOj/HhjOM4M3ECbGn4cYh
+vN1gK586s3sUsUcNQ8LuWvNsYhxYsVTZymCReJMEDxod0U6/z/oIbpWv5svF3kpl
+ogA66Ju/6cZx62RiCSOkskI6A3Waj6xHyEo8AGOPfzbMoOOQ1TS1u9s2FwCgxziL
+wADvKYlDZnWM03QtqIJVD8UEAOks9Q2OqFoqKarj6xTRdOYIBVEp2jhozZUZmLmz
+pKL9E4NKGfixqxdVimFcRUGM5h7R2w7ORqXjCzpiPmgdv3jJLWDnmHLmMYRYQc8p
+5nqo8mxuO3zJugxBemWoacBDd1MJaH7nK20Hsk9L/jvU/qLxPJotMStTnwO+EpsK
+HlihA/9ZpvzR1QWNUd9nSuNR3byJhaXvxqQltsM7tLqAT4qAOJIcMjxr+qESdEbx
+NHM5M1Y21ZynrsQw+Fb1WHXNbP79vzOxHoZR0+OXe8uUpkri2d9iOocre3NUdpOO
+JHtl6cGGTFILt8tSuOVxMT/+nlo038JQB2jARe4B85O0tkPIPbQybmV1cm8uZGVi
+aWFuLm5ldCBhcmNoaXZlIDxtaWNoYWVsLmhhbmtlQGdtYWlsLmNvbT6IRgQQEQgA
+BgUCTVHJKwAKCRCNEUVjdcAkyOvzAJ0abJz+f2a6VZG1c9T8NHMTYh1atwCgt0EE
+3ZZd/2in64jSzu0miqhXbOKISgQQEQIACgUCSotRlwMFAXgACgkQ93+NsjFEvg8n
+JgCfWcdJbILBtpLZCocvOzlLPqJ0Fn0AoI4EpJRxoUnrtzBGUC1MqecU7WsDiGAE
+ExECACAFAkqLUWcCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRCl0y8BJkml
+qVklAJ4h2V6MdQkSAThF5c2Gkq6eSoIQYQCeM0DWyB9Bl+tTPSTYXwwZi2uoif20
+QmFwc3kuZ3NlLnVuaS1tYWdkZWJ1cmcuZGUgRGViaWFuIEFyY2hpdmUgPG1pY2hh
+ZWwuaGFua2VAZ21haWwuY29tPohGBBARAgAGBQJEO03FAAoJEPd/jbIxRL4PU18A
+n3tn7i4qdlMi8kHbYWFoabsKc9beAJ9sl/leZNCYNMGhz+u6BQgyeLKw94heBBMR
+AgAeBQJEO0zoAhsDBgsJCAcDAgMVAgMDFgIBAh4BAheAAAoJEKXTLwEmSaWpVdoA
+n27DvtZizNEbhz3wRUPQMiQjtqdvAJ9rS9YdPe5h5o5gHx3mw3BSkOttdYheBBMR
+AgAeBQJEO0zoAhsDBgsJCAcDAgMVAgMDFgIBAh4BAheAAAoJEKXTLwEmSaWpVdoA
+oLhwWL+E+2I9lrUf4Lf26quOK9vLAKC9ZpIF2tUirFFkBWnQvu13/TA0SokCHAQQ
+AQIABgUCTSNBgQAKCRDAc9Iof/uem4NpEACQ8jxmaCaS/qk/Y4GiwLA5bvKosG3B
+iARZ2v5UWqCZQ1tS56yKse/lCIzXQqU9BnYW6wOI2rvFf9meLfd8h96peG6oKscs
+fbclLDIf68bBvGBQaD0VYFi/Fk/rxmTQBOCQ3AJZs8O5rIM4gPGE0QGvSZ1h7VRw
+3Uyeg4jKXLIeJn2xEmOJgt3auAR2FyKbzHaX9JCoByJZ/eU23akNl9hgt7ePlpXo
+74KNYC58auuMUhCq3BQDB+II4ERYMcmFp1N5ZG05Cl6jcaRRHDXz+Ax6DWprRI1+
+RH/Yyae6LmKpeJNwd+vM14aawnNO9h8IAQ+aJ3oYZdRhGyybbin3giJ10hmWveg/
+Pey91Nh9vBCHdDkdPU0s9zE7z/PHT0c5ccZRukxfZfkrlWQ5iqu3V064ku5f4PBy
+8UPSkETcjYgDnrdnwqIAO+oVg/SFlfsOzftnwUrvwIcZlXAgtP6MEEAs/38e/JIN
+g4VrpdAy7HMGEUsh6Ah6lvGQr+zBnG44XwKfl7e0uCYkrAzUJRGM5vx9iXvFMcMu
+jv9EBNNBOU8/Y6MBDzGZhgaoeI27nrUvaveJXjAiDKAQWBLjtQjINZ8I9uaSGOul
+8kpbFavE4eS3+KhISrSHe4DuAa3dk9zI+FiPvXY1ZyfQBtNpR+gYFY6VxMbHhY1U
+lSLHO2eUIQLdYbRITmV1cm9EZWJpYW4gQXJjaGl2ZSBLZXkgPHBrZy1leHBwc3kt
+bWFpbnRhaW5lcnNAbGlzdHMuYWxpb3RoLmRlYmlhbi5vcmc+iEYEEBEIAAYFAk1R
+yQYACgkQjRFFY3XAJMgEWwCggx4Gqlcrt76TSMlbU94cESo55AEAoJ3asQEMpe8t
+QUX+5aikw3z1AUoCiEoEEBECAAoFAkqf/3cDBQF4AAoJEPd/jbIxRL4PxyMAoKUI
+RPWlHCj/+HSFfwhos68wcSwmAKChuC00qutDro+AOo+uuq6YoHXj+ohgBBMRAgAg
+BQJKn/8bAhsDBgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQpdMvASZJpalDggCe
+KF9KOgOPdQbFnKXl8KtHory4EEwAnA7jxgorE6kk2QHEXFSF8LzOOH4GiGMEExEC
+ACMCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSp//RgIZAQAKCRCl0y8BJkml
+qekFAKCRyt4+FoCzmBbRUUP3Cr8PzH++IgCgkno4vdjsWdyAey8e0KpITTXMFrmJ
+AhwEEAECAAYFAk0jQYEACgkQwHPSKH/7npsFfw/+P8B8hpM3+T1fgboBa4R32deu
+n8m6b8vZMXwuo/awQtMpzjem8JGXSUQm8iiX4hDtjq6ZoPrlN8T4jNmviBt/F5jI
+Jji/PYmhq+Zn9s++mfx+aF4IJrcHJWFkg/6kJzn4oSdl/YlvKf4VRCcQNtj4xV87
+GsdamnzU17XapLVMbSaVKh+6Af7ZLDerEH+iAq733HsYaTK+1xKmN7EFVXgS7bZ1
+9C4LTzc97bVHSywpT9yIrg9QQs/1kshfVIHDKyhjF6IwzSVbeGAIL3Oqo5zOMkWv
+7JlEIkkhTyl+FETxNMTMYjAk+Uei3kRodneq3YBF2uFYSEzrXQgHAyn37geiaMYj
+h8wu6a85nG1NS0SdxiZDIePmbvD9vWxFZUWYJ/h9ifsLivWcVXlvHoQ0emd+n2ai
+FhAck2xsuyHgnGIZMHww5IkQdu/TMqvbcR6d8Xulh+C4Tq7ppy+oTLADSBKII++p
+JQioYydRD529EUJgVlhyH27X6YAk3FuRD3zYZRYS2QECiKXvS665o3JRJ0ZSqNgv
+YOom8M0zz6bI9grnUoivMI4o7ISpE4ZwffEd37HVzmraaUHDXRhkulFSf1ImtXoj
+V9nNSM5p/+9eP7OioTZhSote6Vj6Ja1SZeRkXZK7BwqPbdO0VsYOb7G//ZiOlqs+
+paRr92G/pwBfj5Dq8EK5Ag0ERDtM9RAIAN0EJqBPvLN0tEin/y4Fe0R4n+E+zNXg
+bBsq4WidwyUFy3h/6u86FYvegXwUqVS2OsEs5MwPcCVJOfaEthF7I89QJnP9Nfx7
+V5I9yFB53o9ii38BN7X+9gSjpfwXOvf/wIDfggxX8/wRFel37GRB7TiiABRArBez
+s5x+zTXvT++WPhElySj0uY8bjVR6tso+d65K0UesvAa7PPWeRS+3nhqABSFLuTTT
+MMbnVXCGesBrYHlFVXClAYrSIOX8Ub/UnuEYs9+hIV7U4jKzRF9WJhIC1cXHPmOh
+vleAf/I9h/0KahD7HLYud40pNBo5tW8jSfp2/Q8TIE0xxshd51/xy4MAAwUH+wWn
+zsYVk981OKUEXul8JPyPxbw05fOd6gF4MJ3YodO+6dfoyIl3bewk+11KXZQALKaO
+1xmkAEO1RqizPeetoadBVkQBp5xPudsVElUTOX0pTYhkUd3iBilsCYKK1/KQ9KzD
+I+O/lRsm6L9lc6rV0IgPU00P4BAwR+x8Rw7TJFbuS0miR3lP1NSguz+/kpjxzmGP
+LyHJ+LVDYFkk6t0jPXhqFdUY6McUTBDEvavTGlVO062l9APTmmSMVFDsPN/rBes2
+rYhuuT+lDp+gcaS1UoaYCIm9kKOteQBnowX9V74Z+HKEYLtwILaSnNe6/fNSTvyj
+g0z+R+sPCY4nHewbVC+ISQQYEQIACQUCRDtM9QIbDAAKCRCl0y8BJkmlqbecAJ9B
+UdSKVg9H+fQNyP5sbOjj4RDtdACfXHrRHa2+XjJP0dhpvJ8IfvYnQsU=
+=fAJZ
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/docs/changes.rst b/docs/changes.rst
new file mode 100644
index 00000000..1a4f845b
--- /dev/null
+++ b/docs/changes.rst
@@ -0,0 +1,5 @@
+----------
+What's new
+----------
+
+.. include:: ../CHANGES.rst
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100755
index 00000000..463b3576
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# dmriprep documentation build configuration file, created by
+# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another
+# directory, add these directories to sys.path here. If the directory is
+# relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+#
+import os
+import sys
+from datetime import datetime
+
+sys.path.insert(0, os.path.abspath('..'))
+
+# -- General configuration ---------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.viewcode',
+ 'sphinxarg.ext', # argparse extension
+ 'nipype.sphinxext.plot_workflow'
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'dmriprep'
+author = 'The dMRIPrep developers'
+copyright = '2018-%s, %s' % (datetime.now().year, author)
+
+
+# The version info for the project you're documenting, acts as replacement
+# for |version| and |release|, also used in various other places throughout
+# the built documents.
+#
+# The short X.Y version.
+version = u'version'
+# The full version, including alpha/beta/rc tags.
+release = u'version'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set 'language' from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output -------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a
+# theme further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named 'default.css' will overwrite the builtin 'default.css'.
+html_static_path = ['_static']
+
+
+# -- Options for HTMLHelp output ---------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'dmriprepdoc'
+
+
+# -- Options for LaTeX output ------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'dmriprep.tex', 'dMRIPrep Documentation', author, 'manual')
+]
+
+
+# -- Options for manual page output ------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [(master_doc, 'dmriprep', u'dMRIPrep Documentation', [author], 1)]
+
+
+# -- Options for Texinfo output ----------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (
+ master_doc,
+ 'dmriprep',
+ 'dMRIPrep Documentation',
+ author,
+ 'dmriprep',
+ 'One line description of project.',
+ 'Miscellaneous',
+ )
+]
diff --git a/docs/dmriprep.interfaces.rst b/docs/dmriprep.interfaces.rst
new file mode 100644
index 00000000..70866b7d
--- /dev/null
+++ b/docs/dmriprep.interfaces.rst
@@ -0,0 +1,30 @@
+dmriprep.interfaces package
+===========================
+
+Submodules
+----------
+
+dmriprep.interfaces.fmap module
+-------------------------------
+
+.. automodule:: dmriprep.interfaces.fmap
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.interfaces.reportlets module
+-------------------------------------
+
+.. automodule:: dmriprep.interfaces.reportlets
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep.interfaces
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/dmriprep.rst b/docs/dmriprep.rst
new file mode 100644
index 00000000..459f093d
--- /dev/null
+++ b/docs/dmriprep.rst
@@ -0,0 +1,48 @@
+dmriprep package
+================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ dmriprep.interfaces
+ dmriprep.utils
+ dmriprep.viz
+ dmriprep.workflows
+
+Submodules
+----------
+
+dmriprep.cli module
+-------------------
+
+.. automodule:: dmriprep.cli
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.due module
+-------------------
+
+.. automodule:: dmriprep.due
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.qc module
+------------------
+
+.. automodule:: dmriprep.qc
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/dmriprep.utils.rst b/docs/dmriprep.utils.rst
new file mode 100644
index 00000000..d7abef5a
--- /dev/null
+++ b/docs/dmriprep.utils.rst
@@ -0,0 +1,30 @@
+dmriprep.utils package
+======================
+
+Submodules
+----------
+
+dmriprep.utils.bids module
+--------------------------
+
+.. automodule:: dmriprep.utils.bids
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.utils.hemisphere module
+--------------------------------
+
+.. automodule:: dmriprep.utils.hemisphere
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/dmriprep.viz.rst b/docs/dmriprep.viz.rst
new file mode 100644
index 00000000..d972ce7a
--- /dev/null
+++ b/docs/dmriprep.viz.rst
@@ -0,0 +1,22 @@
+dmriprep.viz package
+====================
+
+Submodules
+----------
+
+dmriprep.viz.utils module
+-------------------------
+
+.. automodule:: dmriprep.viz.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep.viz
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/dmriprep.workflows.dwi.rst b/docs/dmriprep.workflows.dwi.rst
new file mode 100644
index 00000000..015977ea
--- /dev/null
+++ b/docs/dmriprep.workflows.dwi.rst
@@ -0,0 +1,70 @@
+dmriprep.workflows.dwi package
+==============================
+
+Submodules
+----------
+
+dmriprep.workflows.dwi.artifacts module
+---------------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.artifacts
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.dwi.base module
+----------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.dwi.eddy module
+----------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.eddy
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.dwi.outputs module
+-------------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.outputs
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.dwi.qc module
+--------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.qc
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.dwi.tensor module
+------------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.tensor
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.dwi.util module
+----------------------------------
+
+.. automodule:: dmriprep.workflows.dwi.util
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep.workflows.dwi
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/dmriprep.workflows.fieldmap.rst b/docs/dmriprep.workflows.fieldmap.rst
new file mode 100644
index 00000000..cd4eda34
--- /dev/null
+++ b/docs/dmriprep.workflows.fieldmap.rst
@@ -0,0 +1,70 @@
+dmriprep.workflows.fieldmap package
+===================================
+
+Submodules
+----------
+
+dmriprep.workflows.fieldmap.ants module
+---------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.ants
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.fieldmap.base module
+---------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.fieldmap.brainsuite module
+---------------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.brainsuite
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.fieldmap.fmap module
+---------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.fmap
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.fieldmap.pepolar module
+------------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.pepolar
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.fieldmap.phasediff module
+--------------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.phasediff
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.fieldmap.synb0 module
+----------------------------------------
+
+.. automodule:: dmriprep.workflows.fieldmap.synb0
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep.workflows.fieldmap
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/dmriprep.workflows.rst b/docs/dmriprep.workflows.rst
new file mode 100644
index 00000000..c2d91d7e
--- /dev/null
+++ b/docs/dmriprep.workflows.rst
@@ -0,0 +1,38 @@
+dmriprep.workflows package
+==========================
+
+Subpackages
+-----------
+
+.. toctree::
+
+ dmriprep.workflows.dwi
+ dmriprep.workflows.fieldmap
+
+Submodules
+----------
+
+dmriprep.workflows.anatomical module
+------------------------------------
+
+.. automodule:: dmriprep.workflows.anatomical
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+dmriprep.workflows.base module
+------------------------------
+
+.. automodule:: dmriprep.workflows.base
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+
+Module contents
+---------------
+
+.. automodule:: dmriprep.workflows
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000..d35d1723
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,14 @@
+.. include:: links.rst
+.. include:: ../README.rst
+.. include:: license.rst
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Contents
+
+ installation
+ changes
+ usage
+ workflows
+ outputs
+ contributing
diff --git a/docs/installation.rst b/docs/installation.rst
new file mode 100644
index 00000000..2c94a180
--- /dev/null
+++ b/docs/installation.rst
@@ -0,0 +1,89 @@
+.. highlight:: shell
+
+============
+Installation
+============
+
+Docker Container
+================
+
+.. code-block:: console
+
+ $ git clone https://github.com/nipy/dmriprep
+ $ cd dmriprep
+ $ make docker
+
+If you don't want to log into the docker image:
+
+.. code-block:: console
+
+ $ docker run -ti -v $BIDS_INPUT_DIR:/inputs -v $OUTPUT_DIR:/outputs dmriprep:prod dmriprep /inputs /outputs
+
+If you want to log into the image:
+
+.. code-block:: console
+
+ $ docker run -ti -v $BIDS_INPUT_DIR:/inputs -v $OUTPUT_DIR:/outputs dmriprep:prod
+
+Run this inside the docker image:
+
+.. code-block:: console
+
+ $ dmriprep /inputs /output --participant-label 01
+
+Singularity Container
+=====================
+
+Preparing a Singularity image (Singularity version < 2.5)
+---------------------------------------------------------
+
+Running a Singularity Image
+---------------------------
+
+Manually Prepared Environment (Python 3.5+)
+===========================================
+
+To install dmriprep, run this command in your terminal:
+
+.. code-block:: console
+
+ $ pip install dmriprep
+
+This is the preferred method to install dmriprep, as it will always install the most recent stable release.
+
+If you don't have `pip`_ installed, this `Python installation guide`_ can guide
+you through the process.
+
+.. _pip: https://pip.pypa.io
+.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
+
+
+From sources
+------------
+
+The sources for dmriprep can be downloaded from the `Github repo`_.
+
+You can either clone the public repository:
+
+.. code-block:: console
+
+ $ git clone git://github.com/nipy/dmriprep
+
+Or download the `tarball`_:
+
+.. code-block:: console
+
+ $ curl -OL https://github.com/nipy/dmriprep/tarball/master
+
+Once you have a copy of the source, you can install it with:
+
+.. code-block:: console
+
+ $ python setup.py install
+
+
+.. _Github repo: https://github.com/nipy/dmriprep
+.. _tarball: https://github.com/nipy/dmriprep/tarball/master
+
+External Dependencies
+---------------------
diff --git a/docs/license.rst b/docs/license.rst
new file mode 100644
index 00000000..2c1de27b
--- /dev/null
+++ b/docs/license.rst
@@ -0,0 +1,11 @@
+License information
+-------------------
+
+We use the 3-clause BSD license; the full license may be found in the
+`LICENSE `_ file
+in the ``dmriprep`` distribution.
+
+All trademarks referenced herein are property of their respective holders.
+
+Copyright (c) 2018-2019, the dMRIPrep developers.
+All rights reserved.
diff --git a/docs/links.rst b/docs/links.rst
new file mode 100644
index 00000000..32dca760
--- /dev/null
+++ b/docs/links.rst
@@ -0,0 +1,6 @@
+.. _Nipype: http://nipype.readthedocs.io/en/latest/
+.. _BIDS: http://bids.neuroimaging.io
+.. _Usage: usage.html
+.. _Installation: installation.html
+.. _workflows: workflows.html
+.. _guidelines: contributing.html
diff --git a/docs/modules.rst b/docs/modules.rst
new file mode 100644
index 00000000..05dd5a95
--- /dev/null
+++ b/docs/modules.rst
@@ -0,0 +1,7 @@
+dmriprep
+========
+
+.. toctree::
+ :maxdepth: 4
+
+ dmriprep
diff --git a/docs/outputs.rst b/docs/outputs.rst
new file mode 100644
index 00000000..1b449523
--- /dev/null
+++ b/docs/outputs.rst
@@ -0,0 +1,46 @@
+Outputs of dMRIPrep
+===================
+
+dMRIPrep generates:
+
+ 1. **Visual QA (quality assurance) reports**
+
+ 2. **Pre-processed imaging data**
+
+Visual Reports
+--------------
+
+Preprocessed data (dMRIPrep *derivatives*)
+------------------------------------------
+
+Anatomical derivatives are placed in each subject's ``anat`` subfolder:
+
+Diffusion derivatives are stored in the ``dwi`` subfolder.
+
+.. code-block:: console
+
+ dmriprep/
+ sub-001/
+ ses-01/
+ dwi/
+ # preprocessed diffusion weighted images
+ sub-001_ses-01_desc-preproc.dwi.nii.gz
+ sub-001_ses-01_desc-preproc.dwi.bval
+ sub-001_ses-01_desc-preproc.dwi.bvec
+ sub-001_ses-01_desc-preproc.dwi.json
+ # mask
+ sub-001_ses-01_desc-brain_mask.nii.gz
+ # model-derived maps
+ sub-001_ses-01_model-DTI_desc-preproc_FA.nii.gz
+ sub-001_ses-01_model-DTI_desc-preproc_MD.nii.gz
+ sub-001_ses-01_model-DTI_desc-preproc_AD.nii.gz
+ sub-001_ses-01_model-DTI_desc-preproc_RD.nii.gz
+ # directionally-encoded colour (DEC) maps
+ sub-001_ses-01_model-DTI_desc-DEC_FA.nii.gz
+ # grey/white matter masks
+ sub-001_ses-01_desc-aparcaseg_dseg.nii.gz
+ sub-001_ses-01_desc-aseg_dseg.nii.gz
+ anat/
+ dataset_description.json
+ sub-001.html
+ index.html
diff --git a/docs/usage.rst b/docs/usage.rst
new file mode 100644
index 00000000..d4c3e902
--- /dev/null
+++ b/docs/usage.rst
@@ -0,0 +1,60 @@
+.. include:: links.rst
+
+Usage
+-----
+
+Execution and the BIDS format
+=============================
+
+The ``dmriprep`` workflow takes as principal input the path of the dataset
+that is to be processed.
+The input dataset is required to be in valid :abbr:`BIDS (Brain Imaging Data
+Structure)` format, and it must include at least one T1w structural image and
+a dwi series.
+
+.. code-block:: console
+
+ bids
+ └── sub-01
+ ├── anat
+ │ └── sub-01_T1w.json
+ │ └── sub-01_T1w.nii.gz
+ ├── dwi
+ │ ├── sub-01_dwi.bval
+ │ ├── sub-01_dwi.bvec
+ │ ├── sub-01_dwi.json
+ │ └── sub-01_dwi.nii.gz
+ └── fmap
+ ├── sub-01_acq-dwi_dir-AP_epi.json
+ ├── sub-01_acq-dwi_dir-AP_epi.nii.gz
+ ├── sub-01_acq-dwi_dir-PA_epi.json
+ └── sub-01_acq-dwi_dir-PA_epi.nii.gz
+
+We highly recommend that you validate your dataset with the free, online
+`BIDS Validator `_.
+
+The exact command to run ``dmriprep`` depends on the Installation_ method.
+The common parts of the command follow the `BIDS-Apps
+`_ definition.
+
+Example: ::
+
+ dmriprep data/bids out/ participant -w work/
+
+
+Command-Line Arguments
+======================
+
+.. click:: dmriprep.cli:main
+ :prog: dmriprep
+ :show-nested:
+
+Debugging
+=========
+
+Logs and crash files are output into the
+``/dmriprep/sub-/log`` directory.
+Information on how to customize and understand these files can be found on the
+`nipype errors and crashes `_
+and `nipype debugging `_
+pages.
diff --git a/docs/workflows.rst b/docs/workflows.rst
new file mode 100644
index 00000000..471f5f38
--- /dev/null
+++ b/docs/workflows.rst
@@ -0,0 +1,207 @@
+Processing pipeline details
+===========================
+
+dMRIprep adapts its pipeline depending on what data and metadata are available
+and are used as the input.
+
+A (very) high-level view of the simplest pipeline is presented below.
+
+.. workflow::
+ :graph2use: orig
+ :simple_form: yes
+
+ from collections import namedtuple, OrderedDict
+ from dmriprep.workflows.base import init_single_subject_wf
+ BIDSLayout = namedtuple('BIDSLayout', ['root'])
+ wf = init_single_subject_wf(subject_id='test', session_list=[], name='single_subject_wf', layout=BIDSLayout, output_dir='.', work_dir='.', ignore=['fieldmaps'], b0_thresh=5, output_resolution=(1, 1, 1), bet_dwi=0.3, bet_mag=0.3, omp_nthreads=1, synb0_dir='')
+
+T1w preprocessing
+-----------------
+:mod:`dmriprep.workflows.anatomical.init_anat_preproc_wf`
+
+The T1w preprocessing workflow skull strips the T1w scan for later use in
+tractography or susceptibility distortion correction if using ANTs or
+BrainSuite. It uses ``init_brain_extraction_wf`` from
+`niworkflows `_.
+
+.. workflow::
+ :graph2use: orig
+ :simple_form: yes
+
+ from niworkflows.anat.ants import init_brain_extraction_wf
+ wf = init_brain_extraction_wf()
+
+DWI preprocessing
+-----------------
+:mod:`dmriprep.workflows.dwi.base.init_dwi_preproc_wf`
+
+Preprocessing of DWI files is split into multiple sub-workflows described below.
+
+.. workflow::
+ :graph2use: orig
+ :simple_form: yes
+
+ from collections import namedtuple
+ from dmriprep.workflows.dwi import init_dwi_preproc_wf
+ BIDSLayout = namedtuple('BIDSLayout', ['root'])
+ wf = init_dwi_preproc_wf(layout=BIDSLayout('.'), subject_id='dmripreptest', dwi_file='/madeup/path/sub-01_dwi.nii.gz', metadata={'PhaseEncodingDirection': 'j-', 'TotalReadoutTime': 0.05}, b0_thresh=5, output_resolution=(1, 1, 1), bet_dwi=0.3, bet_mag=0.3, omp_nthreads=1, ignore=['fieldmaps'], synb0_dir='')
+
+Concatenating Scans
+^^^^^^^^^^^^^^^^^^^
+:mod:`dmriprep.workflows.dwi.util.init_dwi_concat_wf`
+
+By default, each scan in the ``dwi`` folder will get preprocessed separately.
+However, there are some cases where multiple scans should be concatenated before
+head motion, eddy current distortion and susceptibility distortion correction
+(eg. single-shell or multi-shell scan acquired in separate runs).
+
+.. code-block:: console
+
+ bids
+ └── sub-01
+ └── dwi
+ ├── sub-01_acq-multishelldir30b1000_dwi.nii.gz
+ ├── sub-01_acq-multishelldir30b3000_dwi.nii.gz
+ ├── sub-01_acq-multishelldir30b4500_dwi.nii.gz
+ └── sub-01_acq-singleshelldir60b1000_dwi.nii.gz
+
+In the above example, the multi-shell scans should be concatenated and the
+single-shell scan should be left alone. This is done by using the
+``--concat_dwis`` flag as shown below:
+
+``--concat_dwis multishelldir30b1000 multishelldir30b3000 multishelldir30b4500``
+
+The unique ``acq`` entities are passed on so this pipeline knows which scans to
+concatenate. The associated bvecs and bvals are also concatenated.
+
+.. workflow::
+ :graph2use: flat
+ :simple_form: no
+
+ from dmriprep.workflows.dwi import init_dwi_concat_wf
+ wf = init_dwi_concat_wf(ref_file='/madeup/path/sub-01_dwi.nii.gz')
+
+Artifact Removal
+^^^^^^^^^^^^^^^^
+:mod:`dmriprep.workflows.dwi.artifacts.init_dwi_artifacts_wf`
+
+By default, dMRIPrep performs denoising and unringing on each dwi scan. These
+steps can be skipped by passing ``--ignore denoising unringing`` in the command line.
+
+.. workflow::
+ :graph2use: flat
+ :simple_form: no
+
+ from dmriprep.workflows.dwi import init_dwi_artifacts_wf
+ wf = init_dwi_artifacts_wf(ignore=[], output_resolution=(1, 1, 1))
+
+Motion and Susceptibility Correction
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+:mod:`dmriprep.workflows.dwi.eddy.init_dwi_eddy_wf`
+
+.. workflow::
+ :graph2use: flat
+ :simple_form: no
+
+ from dmriprep.workflows.dwi import init_dwi_eddy_wf
+ wf = init_dwi_eddy_wf(1, 'topup')
+
+Tensor Estimates
+^^^^^^^^^^^^^^^^
+:mod:`dmriprep.workflows.dwi.tensor.init_dwi_tensor_wf`
+
+.. workflow::
+ :graph2use: flat
+ :simple_form: no
+
+ from dmriprep.workflows.dwi import init_dwi_tensor_wf
+ wf = init_dwi_tensor_wf()
+
+Susceptibility Distortion Correction (SDC)
+------------------------------------------
+
+Introduction
+^^^^^^^^^^^^
+
+Correction Methods
+^^^^^^^^^^^^^^^^^^
+
+1. topup
+2. fieldmap
+3. phasediff
+4. phase1/phase2
+5. nonlinear registration
+6. synthetic b0
+
+Topup
+~~~~~
+:mod:`dmriprep.workflows.fieldmap.pepolar.init_pepolar_wf`
+
+.. workflow::
+ :graph2use: flat
+ :simple_form: no
+
+ from dmriprep.workflows.fieldmap import init_pepolar_wf
+ wf = init_pepolar_wf('dmripreptest', 0.3, ['/madeup/path/sub-01_dir-AP_epi.nii.gz', '/madeup/path/sub-01_dir-PA_epi.nii.gz'])
+
+Fieldmap
+~~~~~~~~
+:mod:`dmriprep.workflows.fieldmap.fmap.init_fmap_wf`
+
+All of the fieldmap-related workflows end with ``init_fmap_wf``. Depending on
+the type of fieldmap, other upstream workflows are run. If a phasediff scan is
+given, the phasediff is converted to a proper fieldmap using ``fsl_prepare_fieldmap``.
+If phase1 and phase2 scans are given, they are first converted to a phasediff scan.
+The code for this conversion was derived from `sdcflows `_. It offers an alternative method of SDC by using deep learning on an anatomical image (T1).
+You can use it in this pipeline by generating the synb0s for the subject(s) and
+passing the bids-like directory containing them to the --synb0_dir parameter.
+To find out how to generate the synb0s, you can visit our
+`forked repo `_.
+Once you have a directory of synb0s (recommended to place as derivatives of
+bids folder, ex. bids/derivatives/synb0/sub-XX), then you are ready to run the
+pipeline using them! Just run dmriprep as you usually would, with ``bids_dir``
+and ``output_dir``, but now add ``--synb0_dir `` to your command.
+The synb0 acquisition parameters for topup and eddy will be automatically generated in the
+pipeline in the following format:
+
+.. code-block:: console
+
+ 0 -1 0
+ 0 1 0 0
+
+If you want to overwrite the total_readout_time with one of your own,
+simply add ``--total_readout `` to your command.
diff --git a/setup.cfg b/setup.cfg
index f632aae3..160ee558 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,9 +1,6 @@
[metadata]
url = https://github.com/nipreps/dmriprep
author = The dMRIPrep developers
-author_email = code@oscaresteban.es
-maintainer = Oscar Esteban
-maintainer_email = code@oscaresteban.es
description = dMRIPrep is a robust and easy-to-use pipeline for preprocessing of diverse dMRI data.
long_description = file:README.rst
long_description_content_type = text/x-rst; charset=UTF-8
@@ -20,18 +17,19 @@ classifiers =
[options]
python_requires = >=3.5
install_requires =
+ dipy ~=1.0.0
indexed_gzip >=0.8.8
nibabel >=2.2.1
nilearn !=0.5.0, !=0.5.1
- nipype >=1.2.0
+ nipype >=1.2.1
niworkflows ~= 0.10.1
+ numba ~= 0.44.1
numpy
pandas
psutil >=5.4
- pybids ~= 0.9.2
+ pybids ~= 0.9.3
pyyaml
scikit-image
- smriprep ~= 0.3.0
statsmodels
templateflow ~= 0.4.1
test_requires =
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..8b68ca39
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+"""Unit test package for dmriprep."""
diff --git a/tests/test_dmriprep.py b/tests/test_dmriprep.py
new file mode 100644
index 00000000..f789dd58
--- /dev/null
+++ b/tests/test_dmriprep.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Tests for `dmriprep` package."""
+
+import pytest
+from click.testing import CliRunner
+
+from dmriprep import cli
+
+
+@pytest.fixture
+def response():
+ """Sample pytest fixture.
+
+ See more at: http://doc.pytest.org/en/latest/fixture.html
+ """
+ # import requests
+ # return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
+
+
+def test_content(response):
+ """Sample pytest test function with the pytest fixture as an argument."""
+ # from bs4 import BeautifulSoup
+ # assert 'GitHub' in BeautifulSoup(response.content).title.string
+
+
+def test_command_line_interface():
+ """Test the CLI."""
+ runner = CliRunner()
+ result = runner.invoke(cli.main)
+ assert result.exit_code == 0
+ assert "cli.main" in result.output
+ help_result = runner.invoke(cli.main, ["--help"])
+ assert help_result.exit_code == 0
+ assert "--help Show this message and exit." in help_result.output
diff --git a/tests/test_utils.py b/tests/test_utils.py
new file mode 100644
index 00000000..271e3518
--- /dev/null
+++ b/tests/test_utils.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""Tests for `dmriprep` package."""
+
+import numpy as np
+
+from dmriprep.utils.hemisphere import is_hemispherical
+
+
+def uniform_points_on_sphere(npoints=1, hemisphere=True, rotate=(0, 0, 0)):
+ """Generate random uniform points on a unit (hemi)sphere."""
+ r = 1.0
+ if hemisphere:
+ theta = np.random.uniform(0, np.pi / 2, npoints)
+ else:
+ theta = np.random.uniform(0, np.pi, npoints)
+ phi = np.random.uniform(0, 2 * np.pi, npoints)
+
+ x = r * np.sin(theta) * np.cos(phi)
+ y = r * np.sin(theta) * np.sin(phi)
+ z = r * np.cos(theta)
+
+ vecs = np.stack([x, y, z])
+
+ rot_x = np.array(
+ [
+ [1.0, 0.0, 0.0],
+ [0.0, np.cos(rotate[0]), -np.sin(rotate[0])],
+ [0.0, np.sin(rotate[0]), np.cos(rotate[0])],
+ ]
+ )
+
+ rot_y = np.array(
+ [
+ [np.cos(rotate[1]), 0.0, np.sin(rotate[1])],
+ [0.0, 1.0, 0.0],
+ [-np.sin(rotate[1]), 0.0, np.cos(rotate[1])],
+ ]
+ )
+
+ rot_z = np.array(
+ [
+ [np.cos(rotate[2]), -np.sin(rotate[2]), 0.0],
+ [np.sin(rotate[2]), np.cos(rotate[2]), 0.0],
+ [0.0, 0.0, 1.0],
+ ]
+ )
+
+ vecs = np.dot(rot_z, np.dot(rot_y, np.dot(rot_x, vecs)))
+
+ return vecs.transpose()
+
+
+def test_is_hemispherical():
+ vecs = uniform_points_on_sphere(
+ npoints=100,
+ hemisphere=True,
+ rotate=(
+ np.random.uniform(0, np.pi),
+ np.random.uniform(0, np.pi),
+ np.random.uniform(0, np.pi),
+ ),
+ )
+
+ assert is_hemispherical(vecs)[0]
+ vecs = uniform_points_on_sphere(npoints=100, hemisphere=False)
+ assert not is_hemispherical(vecs)[0]
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..4347a530
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+envlist = py35, py36, py37, flake8
+
+[travis]
+python =
+ 3.7: py37
+ 3.6: py36
+ 3.5: py35
+
+[testenv:flake8]
+basepython = python
+deps = flake8
+commands = flake8 dmriprep
+
+[testenv]
+setenv =
+ PYTHONPATH = {toxinidir}
+
+commands = python setup.py test
diff --git a/update_changes.sh b/update_changes.sh
new file mode 100644
index 00000000..49c607d5
--- /dev/null
+++ b/update_changes.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# Collects the pull-requests since the latest release and
+# arranges them in the CHANGES.rst file.
+#
+# This is a script to be run before releasing a new version.
+# Authored by Oscar Esteban
+#
+# Usage /bin/bash update_changes.sh 1.0.1
+#
+# Copyright (c) 2015-2019, the CRN developers team.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# * Neither the name of dmriprep nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Setting # $ help set
+# set -u # Treat unset variables as an error when substituting.
+set -x # Print command traces before executing command.
+
+# Check whether the Upcoming release header is present
+head -1 CHANGES.rst | grep -q Upcoming
+UPCOMING=$?
+if [[ "$UPCOMING" == "0" ]]; then
+ head -n3 CHANGES.rst >> newchanges
+fi
+
+# Elaborate today's release header
+HEADER="$1 ($(date '+%B %d, %Y'))"
+echo $HEADER >> newchanges
+echo $( printf "%${#HEADER}s" | tr " " "=" ) >> newchanges
+echo "" >> newchanges
+
+# Search for PRs since previous release
+git log --grep="Merge pull request" `git describe --tags --abbrev=0`..HEAD --pretty='format: * %b %s' | sed 's/Merge pull request \#\([^\d]*\)\ from\ .*/(\#\1)/' >> newchanges
+echo "" >> newchanges
+echo "" >> newchanges
+
+# Add back the Upcoming header if it was present
+if [[ "$UPCOMING" == "0" ]]; then
+ tail -n+4 CHANGES.rst >> newchanges
+else
+ cat CHANGES.rst >> newchanges
+fi
+
+# Replace old CHANGES.rst with new file
+mv newchanges CHANGES.rst