diff --git a/.circleci/artifact_path b/.circleci/artifact_path new file mode 100644 index 0000000000..602b4f54eb --- /dev/null +++ b/.circleci/artifact_path @@ -0,0 +1 @@ +/0/tmp/gh-pages/docs/_build/no_version_html/html/index.html \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml index e2c3e55f07..a18f3885e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,88 +1,349 @@ +docs_deploy: &docs + docker: + - image: node:8.10.0 + working_directory: /tmp/gh-pages + steps: + - run: + name: Check whether this is the original repo + command: | + if [[ "$CIRCLE_PROJECT_USERNAME" != "poldracklab" ]]; then + echo "Not in poldracklab/sdcflows - skipping docs deploy." + circleci step halt + fi + - add_ssh_keys: + fingerprints: + - "46:48:1e:6d:00:0e:f2:f8:e5:aa:b9:aa:da:eb:59:4e" + - run: + name: Install gh-pages tool + command: | + npm install -g --silent gh-pages@2.0.1 + - checkout + - run: + name: Set git settings + command: | + git config user.email "crn.poldracklab@gmail.com" + git config user.name "Documentation Push" + - attach_workspace: + at: docs/_build + - run: + name: Disable jekyll builds + command: touch docs/_build/html/.nojekyll + - run: + name: Deploy docs to gh-pages branch + command: gh-pages --dotfiles --message "doc(update) [skip ci]" --dist docs/_build/html + version: 2 jobs: - build: + machine: + image: circleci/classic:201711-01 + working_directory: /tmp/src/sdcflows environment: - - TZ: "/usr/share/zoneinfo/America/Los_Angeles" - - docker: - - image: docker:18.01.0-ce-git - working_directory: /tmp/src/ + TZ: "/usr/share/zoneinfo/America/Los_Angeles" + SCRATCH: "/scratch" steps: - - run: - name: Install parallel gzip and python3 - command: apk add --no-cache pigz python3 - restore_cache: keys: - - docker-v1-{{ .Branch }}-{{ epoch }} - - docker-v1-{{ .Branch }}- - - docker-v1-master- - - docker-v1- + - build-v1-{{ .Branch }}-{{ epoch }} + - build-v1-{{ .Branch }}- + - build-v1-master- + - build-v1- paths: - - /tmp/cache/docker.tar.gz - - - checkout - - setup_remote_docker + - /tmp/docker - run: - name: Load Docker image layer cache - no_output_timeout: 30m + name: Set-up a Docker registry command: | - docker info - set +o pipefail - if [ -f /tmp/cache/docker.tar.gz ]; then - pigz -d --stdout /tmp/cache/docker.tar.gz | docker load + docker run -d -p 5000:5000 --restart=always --name=registry \ + -v /tmp/docker:/var/lib/registry registry:2 + - run: + name: Pull images + command: | + set +e + docker pull localhost:5000/ubuntu + success=$? + set -e + if [[ "$success" = "0" ]]; then + echo "Pulling from local registry" + docker tag localhost:5000/ubuntu ubuntu:xenial-20191010 + docker pull localhost:5000/sdcflows + docker tag localhost:5000/sdcflows poldracklab/sdcflows:latest + docker tag localhost:5000/sdcflows poldracklab/sdcflows + else + echo "Pulling from Docker Hub" + docker pull ubuntu:xenial-20191010 + docker tag ubuntu:xenial-20191010 localhost:5000/ubuntu + docker push localhost:5000/ubuntu + docker pull poldracklab/sdcflows:latest fi - docker images + - checkout - run: name: Build Docker image no_output_timeout: 60m command: | + export PY3=$(pyenv versions | grep '3\.' | + sed -e 's/.* 3\./3./' -e 's/ .*//') + pyenv local $PY3 + # Get version, update files. + THISVERSION=$( python3 get_version.py ) + if [[ ${THISVERSION:0:1} == "0" ]] ; then + echo "WARNING: latest git tag could not be found" + echo "Please, make sure you fetch all tags from upstream with" + echo "the command ``git fetch --tags --verbose`` and push" + echo "them to your fork with ``git push origin --tags``" + fi # Build docker image e=1 && for i in {1..5}; do - docker build \ + docker build --rm \ --cache-from=poldracklab/sdcflows \ - --rm=false \ -t poldracklab/sdcflows:latest \ --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` \ --build-arg VCS_REF=`git rev-parse --short HEAD` \ - --build-arg VERSION="${CIRCLE_TAG:-unstable}" . \ + --build-arg VERSION="${CIRCLE_TAG:-$THISVERSION}" . \ && e=0 && break || sleep 15 done && [ "$e" -eq "0" ] - run: - name: Docker save + name: Docker push to local registry no_output_timeout: 40m command: | - mkdir -p /tmp/cache - docker save ubuntu:xenial-20161213 poldracklab/sdcflows:latest \ - | pigz -2 -p 3 > /tmp/cache/docker.tar.gz + docker tag poldracklab/sdcflows:latest localhost:5000/sdcflows + docker push localhost:5000/sdcflows + - run: + name: Docker registry garbage collection + command: | + docker exec -it registry /bin/registry garbage-collect --delete-untagged \ + /etc/docker/registry/config.yml + - save_cache: + key: build-v1-{{ .Branch }}-{{ epoch }} + paths: + - /tmp/docker - persist_to_workspace: root: /tmp paths: - - cache/docker.tar.gz + - src/sdcflows + + cache_test_data: + machine: + image: circleci/classic:201711-01 + working_directory: /tmp/data + steps: + - restore_cache: + keys: + - data-v1-{{ .Branch }}- + - data-v1-master- + - data-v1- + - run: + name: Setup git-annex + command: | + mkdir -p /tmp/cache + if [[ ! -e "/tmp/cache/git-annex-standalone.tar.gz" ]]; then + wget -O- http://neuro.debian.net/lists/trusty.us-ca.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list + sudo apt-key adv --recv-keys --keyserver hkp://pool.sks-keyservers.net:80 0xA5D32F012649A5A9 + sudo apt update && sudo apt-get install -y --no-install-recommends git-annex-standalone + mkdir -p /tmp/cache + tar czvf /tmp/cache/git-annex-standalone.tar.gz /usr/bin/git-annex /usr/bin/git-annex-shell /usr/lib/git-annex.linux + else + sudo tar xzfv /tmp/cache/git-annex-standalone.tar.gz -C / + fi + git config --global user.name 'CRN' + git config --global user.email 'crn.poldracklab@gmail.com' + + - run: + name: Setup DataLad + command: | + pyenv global 3.5.2 + virtualenv venv + pip install --no-cache-dir -U pip + pip install --no-cache-dir -U datalad + - run: + name: Install ds001600 + command: | + datalad install https://github.com/OpenNeuroDatasets/ds001600.git + datalad update --merge ds001600/ + datalad get -r ds001600/* + - run: + name: Get testdata + command: | + if [[ ! -d /tmp/data/testdata ]]; then + wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 0 -q \ + -O testdata.zip "https://files.osf.io/v1/resources/9sy2a/providers/osfstorage/5d44b940bcd6d900198ed6be/?zip=" + unzip testdata.zip -d /tmp/data/testdata + fi + + - run: + name: Get clean copy of workdir + command: | + if [[ ! -f /tmp/data/workdir.tar.gz ]]; then + wget --retry-connrefused --waitretry=5 --read-timeout=20 --timeout=15 -t 0 -q \ + -O workdir.tar.gz "https://files.osf.io/v1/resources/9sy2a/providers/osfstorage/5dcabd60a1cd9e000c751b3c" + fi + + - run: + name: Store FreeSurfer license file + command: | + mkdir -p /tmp/data/ + echo "b2VzdGViYW5Ac3RhbmZvcmQuZWR1CjMwNzU2CiAqQ1MzYkJ5VXMxdTVNCiBGU2kvUGJsejJxR1V3Cg==" | base64 -d > /tmp/data/fslicense.txt + + - run: + name: Create Nipype config files + command: | + printf "[execution]\nstop_on_first_crash = true\n" > /tmp/data/nipype.cfg + echo "poll_sleep_duration = 0.01" >> /tmp/data/nipype.cfg + echo "hash_method = content" >> /tmp/data/nipype.cfg + echo "crashfile_format = txt" >> /tmp/data/nipype.cfg - save_cache: - key: docker-v1-{{ .Branch }}-{{ epoch }} + key: data-v1-{{ .Branch }}-{{ .BuildNum }} paths: - - /tmp/cache/docker.tar.gz + - "/opt/circleci/.pyenv/versions/3.5.2" + - /tmp/data + - /tmp/cache/git-annex-standalone.tar.gz + - persist_to_workspace: + root: /tmp + paths: + - data - deploy_docker: + test_sdcflows: machine: image: circleci/classic:201711-01 - working_directory: /tmp/src/ + working_directory: /tmp/tests steps: - attach_workspace: at: /tmp + - restore_cache: + keys: + - workdir-v2-{{ .Branch }}- + - workdir-v2-master- + - workdir-v2- + - restore_cache: + keys: + - build-v1-{{ .Branch }}-{{ epoch }} + - build-v1-{{ .Branch }}- + - build-v1-master- + - build-v1- + paths: + - /tmp/docker + - run: + name: Set-up a Docker registry + command: | + docker run -d -p 5000:5000 --restart=always --name=registry \ + -v /tmp/docker:/var/lib/registry registry:2 + - run: + name: Pull images from local registry + command: | + docker pull localhost:5000/sdcflows + docker tag localhost:5000/sdcflows poldracklab/sdcflows:latest + - run: + name: Refresh work directory? + command: | + set +e + do_refresh="$( git log --format=oneline -n 1 $CIRCLE_SHA1 | grep -i -E '\[fresh[ _]?workdir\]' )" + set -e + if [[ "x${do_refresh}" = "x" ]]; then + echo "Did not refresh the workdir." + else + rm -rf /tmp/work + mkdir -p /tmp/work + cd /tmp/work + tar xzfv /tmp/data/workdir.tar.gz + fi + - run: + name: Run tests + no_output_timeout: 2h + command: | + mkdir -p /tmp/work + docker run -it --rm -e TEST_DATA_HOME=/data/ -e TEST_OUTPUT_DIR=/out \ + -v /tmp/data/fslicense.txt:/opt/freesurfer/license.txt:ro -e FS_LICENSE=/opt/freesurfer/license.txt \ + -v /tmp/work:/work -e TEST_WORK_DIR=/work \ + -v /tmp/data/nipype.cfg:/home/sdcflows/.nipype/nipype.cfg \ + -v /tmp/data:/data:ro -v /tmp/src:/src -v /tmp/tests:/out \ + -w /work poldracklab/sdcflows:latest \ + pytest -v --junit-xml=/out/pytest.xml /src/sdcflows/sdcflows + - save_cache: + key: workdir-v2-{{ .Branch }}-{{ .BuildNum }} + paths: + - /tmp/work + - store_artifacts: + path: /tmp/tests + - store_test_results: + path: /tmp/tests + + build_docs: + docker: + - image: python:3.7.4 + working_directory: /tmp/gh-pages + environment: + - FSLOUTPUTTYPE: NIFTI + - SUBJECTS_DIR: /tmp/subjects + steps: + - checkout + - run: + name: Create subjects folder + command: mkdir -p $SUBJECTS_DIR + - run: + name: Install Graphviz + command: apt update && apt -y install graphviz - run: - name: Load Docker image layer cache - no_output_timeout: 30m + name: Install deps + command: pip install --no-cache-dir -r docs/requirements.txt + - run: + name: Build only this commit + command: make -C docs SPHINXOPTS="-W" BUILDDIR="_build/no_version_html" html + - store_artifacts: + path: ./docs/_build/no_version_html + - run: + name: Stop or generate versioned docs? command: | - docker info - set +o pipefail - if [ -f /tmp/cache/docker.tar.gz ]; then - sudo apt update && sudo apt -y install pigz - pigz -d --stdout /tmp/cache/docker.tar.gz | docker load - docker images + set +e + force_versioned="$( git log --format=oneline -n 1 $CIRCLE_SHA1 | grep -i -E '\[docs?[ _]?versions?\]' )" + set -e + if [[ "x${CIRCLE_TAG}" = "x" && "${CIRCLE_BRANCH}" != "master" && "x${force_versioned}" = "x" ]]; then + echo "Not a tag or master branch - skipping versioned docs." + circleci step halt fi + - restore_cache: + keys: + - docs-v1-{{ .Branch }}-{{ .Revision }} + - docs-v1-{{ .Branch }}- + - docs-v1-master + - docs-v1- + paths: + - ./docs/_build/_html + - run: + name: Generate Versioned Docs + command: make -f ./docs/Makefile versioned CURBRANCH=${CIRCLE_TAG:-$CIRCLE_BRANCH} + - save_cache: + key: docs-v1-{{ .Branch }}-{{ .Revision }} + paths: + - ./docs/_build/_html + - persist_to_workspace: + root: docs/_build + paths: html + - store_artifacts: + path: ./docs/_build/html + + deploy_docker: + machine: + image: circleci/classic:201711-01 + working_directory: /tmp/src/ + steps: + - restore_cache: + keys: + - build-v1-{{ .Branch }}-{{ epoch }} + - build-v1-{{ .Branch }}- + - build-v1-master- + - build-v1- + paths: + - /tmp/docker + - run: + name: Set-up a Docker registry + command: | + docker run -d -p 5000:5000 --restart=always --name=registry \ + -v /tmp/docker:/var/lib/registry registry:2 + - run: + name: Pull images from local registry + command: | + docker pull localhost:5000/sdcflows + docker tag localhost:5000/sdcflows poldracklab/sdcflows:latest - run: name: Deploy to Docker Hub no_output_timeout: 40m @@ -199,23 +460,57 @@ jobs: source /tmp/build/bin/activate twine upload dist/sdcflows* + deploy_docs_tag: + <<: *docs + + deploy_docs_master: + <<: *docs + workflows: version: 2 build_deploy: jobs: - build: filters: + branches: + ignore: + - /docs?\/.*/ + tags: + only: /.*/ + - cache_test_data: + filters: + branches: + ignore: + - /docs?\/.*/ tags: only: /.*/ + + - test_sdcflows: + requires: + - build + - cache_test_data + filters: + branches: + ignore: + - /docs?\/.*/ + tags: + only: /.*/ + - test_package: filters: + branches: + ignore: + - /docs?\/.*/ + - /tests?\/.*/ tags: only: /.*/ - deploy_pypi: requires: - build + - build_docs - test_package + - test_sdcflows filters: branches: ignore: /.*/ @@ -224,10 +519,39 @@ workflows: - deploy_docker: requires: - - build - deploy_pypi filters: branches: ignore: /.*/ tags: only: /.*/ + + - build_docs: + filters: + branches: + ignore: + - /tests?\/.*/ + - /ds005\/.*/ + - /ds054\/.*/ + tags: + only: /.*/ + + - deploy_docs_master: + requires: + - test_sdcflows + - test_package + - build_docs + filters: + branches: + only: /master/ + tags: + ignore: /.*/ + + - deploy_docs_tag: + requires: + - deploy_docker + filters: + branches: + ignore: /.*/ + tags: + only: /.*/ diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000000..110b4bae5e --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,4 @@ +coverage: + range: "0...100" + ignore: # files and folders that will be removed during processing + - "sdcflows/_version.py" diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000000..1a7acb7238 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,12 @@ +[run] +branch = True +omit = + */tests/* + sdcflows/conftest.py + sdcflows/_version.py + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + raise NotImplementedError + warnings\.warn diff --git a/.dockerignore b/.dockerignore index b5bfff59d9..ae1b997a15 100644 --- a/.dockerignore +++ b/.dockerignore @@ -24,6 +24,8 @@ src/ # git .gitignore +.github/**/* +.github .git/**/* .git @@ -34,9 +36,12 @@ out/**/* out/ # CI, etc. -.circleci .circleci/**/* -.zenodo.json -.travis.yml +.circleci +.codecov.yml +.coveragerc +.pep8speaks.yml .readthedocs.yml +.travis.yml +.zenodo.json CONTRIBUTING.md \ No newline at end of file diff --git a/.gitignore b/.gitignore index 894a44cc06..96474def60 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ # Byte-compiled / optimized / DLL files +pip-wheel-metadata __pycache__/ *.py[cod] *$py.class @@ -23,7 +24,8 @@ wheels/ *.egg-info/ .installed.cfg *.egg -MANIFEST +docs/_build +docs/api # PyInstaller # Usually these files are written by a python script from a template diff --git a/.pep8speaks.yml b/.pep8speaks.yml new file mode 100644 index 0000000000..252b49c93f --- /dev/null +++ b/.pep8speaks.yml @@ -0,0 +1,12 @@ +scanner: + diff_only: True # Only show errors caused by the patch + linter: flake8 + +message: # Customize the comment made by the bot + opened: # Messages when a new PR is submitted + header: "Hello @{name}, thank you for submitting the Pull Request!" + footer: "To test for issues locally, `pip install flake8` and then run `flake8 sdcflows`." + updated: # Messages when new commits are added to the PR + header: "Hello @{name}, Thank you for updating!" + footer: "To test for issues locally, `pip install flake8` and then run `flake8 sdcflows`." + no_errors: "Cheers! There are no style issues detected in this Pull Request. :beers: " diff --git a/Dockerfile b/Dockerfile index 9ecdcbc969..d86eb83d84 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Use Ubuntu 16.04 LTS -FROM ubuntu:xenial-20161213 +FROM ubuntu:xenial-20191010 # Pre-cache neurodebian key COPY .docker/files/neurodebian.gpg /usr/local/etc/neurodebian.gpg @@ -19,6 +19,44 @@ RUN apt-get update && \ git && \ apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Installing freesurfer +RUN curl -sSL https://surfer.nmr.mgh.harvard.edu/pub/dist/freesurfer/6.0.1/freesurfer-Linux-centos6_x86_64-stable-pub-v6.0.1.tar.gz | tar zxv --no-same-owner -C /opt \ + --exclude='freesurfer/diffusion' \ + --exclude='freesurfer/docs' \ + --exclude='freesurfer/fsfast' \ + --exclude='freesurfer/lib/cuda' \ + --exclude='freesurfer/lib/qt' \ + --exclude='freesurfer/matlab' \ + --exclude='freesurfer/mni/share/man' \ + --exclude='freesurfer/subjects/fsaverage_sym' \ + --exclude='freesurfer/subjects/fsaverage3' \ + --exclude='freesurfer/subjects/fsaverage4' \ + --exclude='freesurfer/subjects/cvs_avg35' \ + --exclude='freesurfer/subjects/cvs_avg35_inMNI152' \ + --exclude='freesurfer/subjects/bert' \ + --exclude='freesurfer/subjects/lh.EC_average' \ + --exclude='freesurfer/subjects/rh.EC_average' \ + --exclude='freesurfer/subjects/sample-*.mgz' \ + --exclude='freesurfer/subjects/V1_average' \ + --exclude='freesurfer/trctrain' + +ENV FSL_DIR="/usr/share/fsl/5.0" \ + OS="Linux" \ + FS_OVERRIDE=0 \ + FIX_VERTEX_AREA="" \ + FSF_OUTPUT_FORMAT="nii.gz" \ + FREESURFER_HOME="/opt/freesurfer" +ENV SUBJECTS_DIR="$FREESURFER_HOME/subjects" \ + FUNCTIONALS_DIR="$FREESURFER_HOME/sessions" \ + MNI_DIR="$FREESURFER_HOME/mni" \ + LOCAL_DIR="$FREESURFER_HOME/local" \ + MINC_BIN_DIR="$FREESURFER_HOME/mni/bin" \ + MINC_LIB_DIR="$FREESURFER_HOME/mni/lib" \ + MNI_DATAPATH="$FREESURFER_HOME/mni/data" +ENV PERL5LIB="$MINC_LIB_DIR/perl5/5.8.5" \ + MNI_PERL5LIB="$MINC_LIB_DIR/perl5/5.8.5" \ + PATH="$FREESURFER_HOME/bin:$FSFAST_HOME/bin:$FREESURFER_HOME/tktools:$MINC_BIN_DIR:$PATH" + # Installing Neurodebian packages (FSL, AFNI, git) RUN curl -sSL "http://neuro.debian.net/lists/$( lsb_release -c | cut -f2 ).us-ca.full" >> /etc/apt/sources.list.d/neurodebian.sources.list && \ apt-key add /usr/local/etc/neurodebian.gpg && \ @@ -32,8 +70,7 @@ RUN apt-get update && \ git-annex-standalone && \ apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -ENV FSL_DIR="/usr/share/fsl/5.0" \ - FSLDIR="/usr/share/fsl/5.0" \ +ENV FSLDIR="/usr/share/fsl/5.0" \ FSLOUTPUTTYPE="NIFTI_GZ" \ FSLMULTIFILEQUIT="TRUE" \ POSSUMDIR="/usr/share/fsl/5.0" \ @@ -117,7 +154,6 @@ ENV IS_DOCKER_8395080871=1 RUN ldconfig WORKDIR /tmp/ -ENTRYPOINT ["/usr/local/miniconda/bin/sdcflows"] ARG BUILD_DATE ARG VCS_REF diff --git a/README.md b/README.md deleted file mode 100644 index 2c0ec0184a..0000000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# sdcflows -Susceptibility Distortion Correction (SDC) workflows for EPI MR schemes diff --git a/long_description.rst b/README.rst similarity index 66% rename from long_description.rst rename to README.rst index ff2c5b6899..69024041eb 100644 --- a/long_description.rst +++ b/README.rst @@ -1,3 +1,14 @@ +SDCflows +-------- + +.. image:: https://circleci.com/gh/poldracklab/sdcflows.svg?style=svg + :target: https://circleci.com/gh/poldracklab/sdcflows + +.. image:: https://img.shields.io/pypi/v/sdcflows.svg + :target: https://pypi.python.org/pypi/sdcflows/ + :alt: Latest Version + + SDCflows (susceptibility distortion correction workflows) is a library of workflows to correct for distortions typically remaining after reconstruction in images acquired with EPI (echo-planar imaging) MR schemes. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..5fc6ad0cbb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,184 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build +CURBRANCH = master +PYTHONPATH = $(PWD) + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + + +clean: + rm -rf $(BUILDDIR)/* + rm -rf reference/* + +html: + PYTHONPATH=$(PYTHONPATH) $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sdcflows.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sdcflows.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/sdcflows" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sdcflows" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." + +versioned: + PYTHONPATH=$(PYTHONPATH) sphinx-versioning -vv -l ./docs/conf.py build -r $(CURBRANCH) ./docs/ docs/$(BUILDDIR)/html/ diff --git a/docs/_static/RTD-advanced-conf.png b/docs/_static/RTD-advanced-conf.png new file mode 100644 index 0000000000..ae066a7417 Binary files /dev/null and b/docs/_static/RTD-advanced-conf.png differ diff --git a/docs/_static/git.png b/docs/_static/git.png new file mode 100644 index 0000000000..dcd8420210 Binary files /dev/null and b/docs/_static/git.png differ diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..e90144f809 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,216 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +from packaging.version import Version + +from sdcflows import __version__, __copyright__, __packagename__ + +sys.path.append(os.path.abspath('sphinxext')) + +# -- Project information ----------------------------------------------------- +project = __packagename__ +copyright = __copyright__ +author = 'The SDCflows Developers' + +# The short X.Y version +version = Version(__version__).public +# The full version, including alpha/beta/rc tags +release = version + + +# -- General configuration --------------------------------------------------- +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', + 'nipype.sphinxext.plot_workflow', + 'sphinxcontrib.apidoc', + 'sphinxcontrib.napoleon' +] + +autodoc_mock_imports = [ + 'matplotlib', + 'nilearn', + 'nipy', + 'nitime', + 'numpy', + 'pandas', + 'seaborn', + 'skimage', + 'svgutils', + 'transforms3d', +] + +# Accept custom section names to be parsed for numpy-style docstrings +# of parameters. +# Requires pinning sphinxcontrib-napoleon to a specific commit while +# https://github.com/sphinx-contrib/napoleon/pull/10 is merged. +napoleon_use_param = False +napoleon_custom_sections = [ + ('Inputs', 'Parameters'), + ('Outputs', 'Parameters'), +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'api/modules.rst'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = None + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'sdcflowsdoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'smriprep.tex', 'sMRIPrep Documentation', + 'The sMRIPrep Developers', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'smriprep', 'sMRIPrep Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'smriprep', 'sMRIPrep Documentation', + author, 'sMRIPrep', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + + +# -- Extension configuration ------------------------------------------------- + +apidoc_module_dir = '../sdcflows' +apidoc_output_dir = 'api' +apidoc_excluded_paths = ['conftest.py', '*/tests/*', 'tests/*', 'data/*'] +apidoc_separate_modules = True +apidoc_extra_args = ['--module-first', '-d 1', '-T'] + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/': None} + +# -- Options for versioning extension ---------------------------------------- +scv_show_banner = True diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..84067f8a54 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,9 @@ +.. include:: links.rst +.. include:: ../README.rst + +Contents +-------- +.. toctree:: + :maxdepth: 3 + + api/sdcflows diff --git a/docs/links.rst b/docs/links.rst new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/reference/.gitignore b/docs/reference/.gitignore new file mode 100644 index 0000000000..30d85567b5 --- /dev/null +++ b/docs/reference/.gitignore @@ -0,0 +1 @@ +*.rst diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000..309f50d57e --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,14 @@ +git+https://github.com/AleksandarPetrov/napoleon.git@0dc3f28a309ad602be5f44a9049785a1026451b3#egg=sphinxcontrib-napoleon +git+https://github.com/rwblair/sphinxcontrib-versioning.git@39b40b0b84bf872fc398feff05344051bbce0f63#egg=sphinxcontrib-versioning +nbsphinx +niflow-nipype1-workflows ~= 0.0.1 +nipype>=1.3.0rc1 +niworkflows ~= 1.0.0rc1 +packaging +pydot>=1.2.3 +pydotplus +sphinx-argparse +sphinx>=2.1.2 +sphinx_rtd_theme +sphinxcontrib-apidoc ~= 0.3.0 +templateflow \ No newline at end of file diff --git a/docs/sphinxext/docscrape.py b/docs/sphinxext/docscrape.py new file mode 100644 index 0000000000..470badd335 --- /dev/null +++ b/docs/sphinxext/docscrape.py @@ -0,0 +1,570 @@ +"""Extract reference documentation from the NumPy source tree. + +""" +from __future__ import division, absolute_import, print_function + +import inspect +import textwrap +import re +import pydoc +from warnings import warn +import collections +import sys + + +class Reader(object): + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\n'. + + """ + if isinstance(data, list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + + def is_empty(line): + return not line.strip() + + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self, n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class NumpyDocString(collections.Mapping): + def __init__(self, docstring, config={}): + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Yields': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def __getitem__(self, key): + return self._parsed_data[key] + + def __setitem__(self, key, val): + if key not in self._parsed_data: + warn("Unknown section %s" % key) + else: + self._parsed_data[key] = val + + def __iter__(self): + return iter(self._parsed_data) + + def __len__(self): + return len(self._parsed_data) + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self, doc): + i = 0 + j = 0 + for i, line in enumerate(doc): + if line.strip(): + break + + for j, line in enumerate(doc[::-1]): + if line.strip(): + break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self, content): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + + params.append((arg_name, arg_type, desc)) + + return params + + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" + r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) + + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'""" + m = self._name_rgx.match(text) + if m: + g = m.groups() + if g[1] is None: + return g[3], None + else: + return g[2], g[1] + raise ValueError("%s is not a item name" % text) + + def push_item(name, rest): + if not name: + return + name, role = parse_item_name(name) + items.append((name, list(rest), role)) + del rest[:] + + current_func = None + rest = [] + + for line in content: + if not line.strip(): + continue + + m = self._name_rgx.match(line) + if m and line[m.end():].strip().startswith(':'): + push_item(current_func, rest) + current_func, line = line[:m.end()], line[m.end():] + rest = [line.split(':', 1)[1].strip()] + if not rest[0]: + rest = [] + elif not line.startswith(' '): + push_item(current_func, rest) + current_func = None + if ',' in line: + for func in line.split(','): + if func.strip(): + push_item(func, []) + elif line.strip(): + current_func = line + elif current_func is not None: + rest.append(line.strip()) + push_item(current_func, rest) + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + sections = list(self._read_sections()) + section_names = set([section for section, content in sections]) + + has_returns = 'Returns' in section_names + has_yields = 'Yields' in section_names + # We could do more tests, but we are not. Arbitrarily. + if has_returns and has_yields: + msg = 'Docstring contains both a Returns and Yields section.' + raise ValueError(msg) + + for (section, content) in sections: + if not section.startswith('..'): + section = (s.capitalize() for s in section.split(' ')) + section = ' '.join(section) + if section in ('Parameters', 'Returns', 'Yields', 'Raises', + 'Warns', 'Other Parameters', 'Attributes', + 'Methods'): + self[section] = self._parse_param_list(content) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*', '\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param, param_type, desc in self[name]: + if param_type: + out += ['%s : %s' % (param, param_type)] + else: + out += [param] + out += self._str_indent(desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: + return [] + out = [] + out += self._str_header("See Also") + last_had_desc = True + for func, desc, role in self['See Also']: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + if desc or last_had_desc: + out += [''] + out += [link] + else: + out[-1] += ", %s" % link + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + out += ['.. index:: %s' % idx.get('default', '')] + for section, references in idx.items(): + if section == 'default': + continue + out += [' :%s: %s' % (section, ', '.join(references))] + return out + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Yields', + 'Other Parameters', 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes', 'References', 'Examples'): + out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) + out += self._str_index() + return '\n'.join(out) + + +def indent(str, indent=4): + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) + + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func', doc=None, config={}): + self._f = func + self._role = role # e.g. "func" or "meth" + + if doc is None: + if func is None: + raise ValueError("No function or docstring given") + doc = inspect.getdoc(func) or '' + NumpyDocString.__init__(self, doc) + + if not self['Signature'] and func is not None: + func, func_name = self.get_func() + try: + # try to read signature + if sys.version_info[0] >= 3: + argspec = inspect.getfullargspec(func) + else: + argspec = inspect.getargspec(func) + argspec = inspect.formatargspec(*argspec) + argspec = argspec.replace('*', '\*') + signature = '%s%s' % (func_name, argspec) + except TypeError as e: + signature = '%s()' % func_name + self['Signature'] = signature + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + signature = self['Signature'].replace('*', '\*') + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), + func_name) + + out += super(FunctionDoc, self).__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, + config={}): + if not inspect.isclass(cls) and cls is not None: + raise ValueError("Expected a class or None, but got %r" % cls) + self._cls = cls + + self.show_inherited_members = config.get( + 'show_inherited_class_members', True) + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + + if doc is None: + if cls is None: + raise ValueError("No class or documentation string given") + doc = pydoc.getdoc(cls) + + NumpyDocString.__init__(self, doc) + + if config.get('show_class_members', True): + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append((name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list + + @property + def methods(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, collections.Callable) + and self._is_show_member(name))] + + @property + def properties(self): + if self._cls is None: + return [] + return [name for name, func in inspect.getmembers(self._cls) + if (not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isgetsetdescriptor(func)) + and self._is_show_member(name))] + + def _is_show_member(self, name): + if self.show_inherited_members: + return True # show all class members + if name not in self._cls.__dict__: + return False # class member is inherited, we do not show it + return True diff --git a/docs/sphinxext/docscrape_sphinx.py b/docs/sphinxext/docscrape_sphinx.py new file mode 100644 index 0000000000..e44e770ef8 --- /dev/null +++ b/docs/sphinxext/docscrape_sphinx.py @@ -0,0 +1,227 @@ +import re, inspect, textwrap, pydoc +import sphinx +from docscrape import NumpyDocString, FunctionDoc, ClassDoc + +class SphinxDocString(NumpyDocString): + def __init__(self, docstring, config={}): + self.use_plots = config.get('use_plots', False) + NumpyDocString.__init__(self, docstring, config=config) + + # string conversion routines + def _str_header(self, name, symbol='`'): + return ['.. rubric:: ' + name, ''] + + def _str_field_list(self, name): + return [':' + name + ':'] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + return [''] + if self['Signature']: + return ['``%s``' % self['Signature']] + [''] + else: + return [''] + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Extended Summary'] + [''] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_field_list(name) + out += [''] + for param,param_type,desc in self[name]: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + out += [''] + out += self._str_indent(desc,8) + out += [''] + return out + + @property + def _obj(self): + if hasattr(self, '_cls'): + return self._cls + elif hasattr(self, '_f'): + return self._f + return None + + def _str_member_list(self, name): + """ + Generate a member listing, autosummary:: table where possible, + and a table where not. + + """ + out = [] + if self[name]: + out += ['.. rubric:: %s' % name, ''] + prefix = getattr(self, '_name', '') + + if prefix: + prefix = '~%s.' % prefix + + autosum = [] + others = [] + for param, param_type, desc in self[name]: + param = param.strip() + if not self._obj or hasattr(self._obj, param): + autosum += [" %s%s" % (prefix, param)] + else: + others.append((param, param_type, desc)) + + if autosum: + out += ['.. autosummary::', ' :toctree:', ''] + out += autosum + + if others: + maxlen_0 = max([len(x[0]) for x in others]) + maxlen_1 = max([len(x[1]) for x in others]) + hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 + fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) + n_indent = maxlen_0 + maxlen_1 + 4 + out += [hdr] + for param, param_type, desc in others: + out += [fmt % (param.strip(), param_type)] + out += self._str_indent(desc, n_indent) + out += [hdr] + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += [''] + content = textwrap.dedent("\n".join(self[name])).split("\n") + out += content + out += [''] + return out + + def _str_see_also(self, func_role): + out = [] + if self['See Also']: + see_also = super(SphinxDocString, self)._str_see_also(func_role) + out = ['.. seealso::', ''] + out += self._str_indent(see_also[2:]) + return out + + def _str_warnings(self): + out = [] + if self['Warnings']: + out = ['.. warning::', ''] + out += self._str_indent(self['Warnings']) + return out + + def _str_index(self): + idx = self['index'] + out = [] + if len(idx) == 0: + return out + + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.iteritems(): + if section == 'default': + continue + elif section == 'refguide': + out += [' single: %s' % (', '.join(references))] + else: + out += [' %s: %s' % (section, ','.join(references))] + return out + + def _str_references(self): + out = [] + if self['References']: + out += self._str_header('References') + if isinstance(self['References'], str): + self['References'] = [self['References']] + out.extend(self['References']) + out += [''] + # Latex collects all references to a separate bibliography, + # so we need to insert links to it + if sphinx.__version__ >= "0.6": + out += ['.. only:: latex',''] + else: + out += ['.. latexonly::',''] + items = [] + for line in self['References']: + m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) + if m: + items.append(m.group(1)) + out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] + return out + + def _str_examples(self): + examples_str = "\n".join(self['Examples']) + + if (self.use_plots and 'import matplotlib' in examples_str + and 'plot::' not in examples_str): + out = [] + out += self._str_header('Examples') + out += ['.. plot::', ''] + out += self._str_indent(self['Examples']) + out += [''] + return out + else: + return self._str_section('Examples') + + def __str__(self, indent=0, func_role="obj"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Other Parameters', + 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_warnings() + out += self._str_see_also(func_role) + out += self._str_section('Notes') + out += self._str_references() + out += self._str_examples() + for param_list in ('Attributes', 'Methods'): + out += self._str_member_list(param_list) + out = self._str_indent(out,indent) + return '\n'.join(out) + +class SphinxFunctionDoc(SphinxDocString, FunctionDoc): + def __init__(self, obj, doc=None, config={}): + self.use_plots = config.get('use_plots', False) + FunctionDoc.__init__(self, obj, doc=doc, config=config) + +class SphinxClassDoc(SphinxDocString, ClassDoc): + def __init__(self, obj, doc=None, func_doc=None, config={}): + self.use_plots = config.get('use_plots', False) + ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) + +class SphinxObjDoc(SphinxDocString): + def __init__(self, obj, doc=None, config={}): + self._f = obj + SphinxDocString.__init__(self, doc, config=config) + +def get_doc_object(obj, what=None, doc=None, config={}): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif callable(obj): + what = 'function' + else: + what = 'object' + if what == 'class': + return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, + config=config) + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, doc=doc, config=config) + else: + if doc is None: + doc = pydoc.getdoc(obj) + return SphinxObjDoc(obj, doc, config=config) diff --git a/docs/sphinxext/github.py b/docs/sphinxext/github.py new file mode 100644 index 0000000000..519e146d19 --- /dev/null +++ b/docs/sphinxext/github.py @@ -0,0 +1,155 @@ +"""Define text roles for GitHub + +* ghissue - Issue +* ghpull - Pull Request +* ghuser - User + +Adapted from bitbucket example here: +https://bitbucket.org/birkenfeld/sphinx-contrib/src/tip/bitbucket/sphinxcontrib/bitbucket.py + +Authors +------- + +* Doug Hellmann +* Min RK +""" +# +# Original Copyright (c) 2010 Doug Hellmann. All rights reserved. +# + +from docutils import nodes, utils +from docutils.parsers.rst.roles import set_classes + +def make_link_node(rawtext, app, type, slug, options): + """Create a link to a github resource. + + :param rawtext: Text being replaced with link node. + :param app: Sphinx application context + :param type: Link type (issues, changeset, etc.) + :param slug: ID of the thing to link to + :param options: Options dictionary passed to role func. + """ + + try: + base = app.config.github_project_url + if not base: + raise AttributeError + if not base.endswith('/'): + base += '/' + except AttributeError as err: + raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) + + ref = base + type + '/' + slug + '/' + set_classes(options) + prefix = "#" + if type == 'pull': + prefix = "PR " + prefix + node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref, + **options) + return node + +def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + """Link to a GitHub issue. + + Returns 2 part tuple containing list of nodes to insert into the + document and a list of system messages. Both are allowed to be + empty. + + :param name: The role name used in the document. + :param rawtext: The entire markup snippet, with role. + :param text: The text marked with the role. + :param lineno: The line number where rawtext appears in the input. + :param inliner: The inliner instance that called us. + :param options: Directive options for customization. + :param content: The directive content for customization. + """ + + try: + issue_num = int(text) + if issue_num <= 0: + raise ValueError + except ValueError: + msg = inliner.reporter.error( + 'GitHub issue number must be a number greater than or equal to 1; ' + '"%s" is invalid.' % text, line=lineno) + prb = inliner.problematic(rawtext, rawtext, msg) + return [prb], [msg] + app = inliner.document.settings.env.app + #app.info('issue %r' % text) + if 'pull' in name.lower(): + category = 'pull' + elif 'issue' in name.lower(): + category = 'issues' + else: + msg = inliner.reporter.error( + 'GitHub roles include "ghpull" and "ghissue", ' + '"%s" is invalid.' % name, line=lineno) + prb = inliner.problematic(rawtext, rawtext, msg) + return [prb], [msg] + node = make_link_node(rawtext, app, category, str(issue_num), options) + return [node], [] + +def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + """Link to a GitHub user. + + Returns 2 part tuple containing list of nodes to insert into the + document and a list of system messages. Both are allowed to be + empty. + + :param name: The role name used in the document. + :param rawtext: The entire markup snippet, with role. + :param text: The text marked with the role. + :param lineno: The line number where rawtext appears in the input. + :param inliner: The inliner instance that called us. + :param options: Directive options for customization. + :param content: The directive content for customization. + """ + app = inliner.document.settings.env.app + #app.info('user link %r' % text) + ref = 'https://www.github.com/' + text + node = nodes.reference(rawtext, text, refuri=ref, **options) + return [node], [] + +def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + """Link to a GitHub commit. + + Returns 2 part tuple containing list of nodes to insert into the + document and a list of system messages. Both are allowed to be + empty. + + :param name: The role name used in the document. + :param rawtext: The entire markup snippet, with role. + :param text: The text marked with the role. + :param lineno: The line number where rawtext appears in the input. + :param inliner: The inliner instance that called us. + :param options: Directive options for customization. + :param content: The directive content for customization. + """ + app = inliner.document.settings.env.app + #app.info('user link %r' % text) + try: + base = app.config.github_project_url + if not base: + raise AttributeError + if not base.endswith('/'): + base += '/' + except AttributeError as err: + raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) + + ref = base + text + node = nodes.reference(rawtext, text[:6], refuri=ref, **options) + return [node], [] + + +def setup(app): + """Install the plugin. + + :param app: Sphinx application context. + """ + app.info('Initializing GitHub plugin') + app.add_role('ghissue', ghissue_role) + app.add_role('ghpull', ghissue_role) + app.add_role('ghuser', ghuser_role) + app.add_role('ghcommit', ghcommit_role) + app.add_config_value('github_project_url', None, 'env') + return diff --git a/docs/sphinxext/math_dollar.py b/docs/sphinxext/math_dollar.py new file mode 100644 index 0000000000..ad415deb90 --- /dev/null +++ b/docs/sphinxext/math_dollar.py @@ -0,0 +1,63 @@ +import re + +def dollars_to_math(source): + r""" + Replace dollar signs with backticks. + + More precisely, do a regular expression search. Replace a plain + dollar sign ($) by a backtick (`). Replace an escaped dollar sign + (\$) by a dollar sign ($). Don't change a dollar sign preceded or + followed by a backtick (`$ or $`), because of strings like + "``$HOME``". Don't make any changes on lines starting with + spaces, because those are indented and hence part of a block of + code or examples. + + This also doesn't replaces dollar signs enclosed in curly braces, + to avoid nested math environments, such as :: + + $f(n) = 0 \text{ if $n$ is prime}$ + + Thus the above line would get changed to + + `f(n) = 0 \text{ if $n$ is prime}` + """ + s = "\n".join(source) + if s.find("$") == -1: + return + # This searches for "$blah$" inside a pair of curly braces -- + # don't change these, since they're probably coming from a nested + # math environment. So for each match, we replace it with a temporary + # string, and later on we substitute the original back. + global _data + _data = {} + def repl(matchobj): + global _data + s = matchobj.group(0) + t = "___XXX_REPL_%d___" % len(_data) + _data[t] = s + return t + s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s) + # matches $...$ + dollars = re.compile(r"(?= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') + + +def mangle_docstrings(app, what, name, obj, options, lines, + reference_offset=[0]): + + cfg = {'use_plots': app.config.numpydoc_use_plots, + 'show_class_members': app.config.numpydoc_show_class_members, + 'show_inherited_class_members': + app.config.numpydoc_show_inherited_class_members, + 'class_members_toctree': app.config.numpydoc_class_members_toctree} + + u_NL = sixu('\n') + if what == 'module': + # Strip top title + pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*' + title_re = re.compile(sixu(pattern), re.I | re.S) + lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) + else: + doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg) + if sys.version_info[0] >= 3: + doc = str(doc) + else: + doc = unicode(doc) + lines[:] = doc.split(u_NL) + + if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and + obj.__name__): + if hasattr(obj, '__module__'): + v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) + else: + v = dict(full_name=obj.__name__) + lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] + lines += [sixu(' %s') % x for x in + (app.config.numpydoc_edit_link % v).split("\n")] + + # replace reference numbers so that there are no duplicates + references = [] + for line in lines: + line = line.strip() + m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) + if m: + references.append(m.group(1)) + + # start renaming from the longest string, to avoid overwriting parts + references.sort(key=lambda x: -len(x)) + if references: + for i, line in enumerate(lines): + for r in references: + if re.match(sixu('^\\d+$'), r): + new_r = sixu("R%d") % (reference_offset[0] + int(r)) + else: + new_r = sixu("%s%d") % (r, reference_offset[0]) + lines[i] = lines[i].replace(sixu('[%s]_') % r, + sixu('[%s]_') % new_r) + lines[i] = lines[i].replace(sixu('.. [%s]') % r, + sixu('.. [%s]') % new_r) + + reference_offset[0] += len(references) + + +def mangle_signature(app, what, name, obj, options, sig, retann): + # Do not try to inspect classes that don't define `__init__` + if (inspect.isclass(obj) and + (not hasattr(obj, '__init__') or + 'initializes x; see ' in pydoc.getdoc(obj.__init__))): + return '', '' + + if not (isinstance(obj, collections.Callable) or + hasattr(obj, '__argspec_is_invalid_')): + return + + if not hasattr(obj, '__doc__'): + return + + doc = SphinxDocString(pydoc.getdoc(obj)) + if doc['Signature']: + sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) + return sig, sixu('') + + +def setup(app, get_doc_object_=get_doc_object): + if not hasattr(app, 'add_config_value'): + return # probably called by nose, better bail out + + global get_doc_object + get_doc_object = get_doc_object_ + + app.connect('autodoc-process-docstring', mangle_docstrings) + app.connect('autodoc-process-signature', mangle_signature) + app.add_config_value('numpydoc_edit_link', None, False) + app.add_config_value('numpydoc_use_plots', None, False) + app.add_config_value('numpydoc_show_class_members', True, True) + app.add_config_value('numpydoc_show_inherited_class_members', True, True) + app.add_config_value('numpydoc_class_members_toctree', True, True) + + # Extra mangling domains + app.add_domain(NumpyPythonDomain) + app.add_domain(NumpyCDomain) + +# ------------------------------------------------------------------------------ +# Docstring-mangling domains +# ------------------------------------------------------------------------------ + +from docutils.statemachine import ViewList +from sphinx.domains.c import CDomain +from sphinx.domains.python import PythonDomain + + +class ManglingDomainBase(object): + directive_mangling_map = {} + + def __init__(self, *a, **kw): + super(ManglingDomainBase, self).__init__(*a, **kw) + self.wrap_mangling_directives() + + def wrap_mangling_directives(self): + for name, objtype in list(self.directive_mangling_map.items()): + self.directives[name] = wrap_mangling_directive( + self.directives[name], objtype) + + +class NumpyPythonDomain(ManglingDomainBase, PythonDomain): + name = 'np' + directive_mangling_map = { + 'function': 'function', + 'class': 'class', + 'exception': 'class', + 'method': 'function', + 'classmethod': 'function', + 'staticmethod': 'function', + 'attribute': 'attribute', + } + indices = [] + + +class NumpyCDomain(ManglingDomainBase, CDomain): + name = 'np-c' + directive_mangling_map = { + 'function': 'function', + 'member': 'attribute', + 'macro': 'function', + 'type': 'class', + 'var': 'object', + } + + +def wrap_mangling_directive(base_directive, objtype): + class directive(base_directive): + def run(self): + env = self.state.document.settings.env + + name = None + if self.arguments: + m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) + name = m.group(2).strip() + + if not name: + name = self.arguments[0] + + lines = list(self.content) + mangle_docstrings(env.app, objtype, name, None, None, lines) + self.content = ViewList(lines, self.content.parent) + + return base_directive.run(self) + + return directive diff --git a/docs/tools/LICENSE.txt b/docs/tools/LICENSE.txt new file mode 100644 index 0000000000..9e1d415af8 --- /dev/null +++ b/docs/tools/LICENSE.txt @@ -0,0 +1,7 @@ +These files were obtained from + +https://www.mail-archive.com/sphinx-dev@googlegroups.com/msg02472.html + +and were released under a BSD/MIT license by Fernando Perez, Matthew Brett and +the PyMVPA folks. Further cleanups by the scikit-image crew. + diff --git a/docs/tools/apigen.py b/docs/tools/apigen.py new file mode 100644 index 0000000000..758671fa3b --- /dev/null +++ b/docs/tools/apigen.py @@ -0,0 +1,509 @@ +""" +Attempt to generate templates for module reference with Sphinx + +To include extension modules, first identify them as valid in the +``_uri2path`` method, then handle them in the ``_parse_module_with_import`` +script. + +Notes +----- +This parsing is based on import and introspection of modules. +Previously functions and classes were found by parsing the text of .py files. + +Extension modules should be discovered and included as well. + +This is a modified version of a script originally shipped with the PyMVPA +project, then adapted for use first in NIPY and then in skimage. PyMVPA +is an MIT-licensed project. +""" + +# Stdlib imports +import os +import re +from inspect import getmodule + +from types import BuiltinFunctionType, FunctionType + +# suppress print statements (warnings for empty files) +DEBUG = True + +class ApiDocWriter(object): + ''' Class for automatic detection and parsing of API docs + to Sphinx-parsable reST format''' + + # only separating first two levels + rst_section_levels = ['*', '=', '-', '~', '^'] + + def __init__(self, + package_name, + rst_extension='.txt', + package_skip_patterns=None, + module_skip_patterns=None, + other_defines = True + ): + ''' Initialize package for parsing + + Parameters + ---------- + package_name : string + Name of the top-level package. *package_name* must be the + name of an importable package + rst_extension : string, optional + Extension for reST files, default '.rst' + package_skip_patterns : None or sequence of {strings, regexps} + Sequence of strings giving URIs of packages to be excluded + Operates on the package path, starting at (including) the + first dot in the package path, after *package_name* - so, + if *package_name* is ``sphinx``, then ``sphinx.util`` will + result in ``.util`` being passed for searching by these + regexps. If is None, gives default. Default is: + ['\.tests$'] + module_skip_patterns : None or sequence + Sequence of strings giving URIs of modules to be excluded + Operates on the module name including preceding URI path, + back to the first dot after *package_name*. For example + ``sphinx.util.console`` results in the string to search of + ``.util.console`` + If is None, gives default. Default is: + ['\.setup$', '\._'] + other_defines : {True, False}, optional + Whether to include classes and functions that are imported in a + particular module but not defined there. + ''' + if package_skip_patterns is None: + package_skip_patterns = ['\\.tests$'] + if module_skip_patterns is None: + module_skip_patterns = ['\\.setup$', '\\._'] + self.package_name = package_name + self.rst_extension = rst_extension + self.package_skip_patterns = package_skip_patterns + self.module_skip_patterns = module_skip_patterns + self.other_defines = other_defines + + def get_package_name(self): + return self._package_name + + def set_package_name(self, package_name): + ''' Set package_name + + >>> docwriter = ApiDocWriter('sphinx') + >>> import sphinx + >>> docwriter.root_path == sphinx.__path__[0] + True + >>> docwriter.package_name = 'docutils' + >>> import docutils + >>> docwriter.root_path == docutils.__path__[0] + True + ''' + # It's also possible to imagine caching the module parsing here + self._package_name = package_name + root_module = self._import(package_name) + self.root_path = root_module.__path__[-1] + self.written_modules = None + + package_name = property(get_package_name, set_package_name, None, + 'get/set package_name') + + def _import(self, name): + ''' Import namespace package ''' + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + + def _get_object_name(self, line): + ''' Get second token in line + >>> docwriter = ApiDocWriter('sphinx') + >>> docwriter._get_object_name(" def func(): ") + 'func' + >>> docwriter._get_object_name(" class Klass(object): ") + 'Klass' + >>> docwriter._get_object_name(" class Klass: ") + 'Klass' + ''' + name = line.split()[1].split('(')[0].strip() + # in case we have classes which are not derived from object + # ie. old style classes + return name.rstrip(':') + + def _uri2path(self, uri): + ''' Convert uri to absolute filepath + + Parameters + ---------- + uri : string + URI of python module to return path for + + Returns + ------- + path : None or string + Returns None if there is no valid path for this URI + Otherwise returns absolute file system path for URI + + Examples + -------- + >>> docwriter = ApiDocWriter('sphinx') + >>> import sphinx + >>> modpath = sphinx.__path__[0] + >>> res = docwriter._uri2path('sphinx.builder') + >>> res == os.path.join(modpath, 'builder.py') + True + >>> res = docwriter._uri2path('sphinx') + >>> res == os.path.join(modpath, '__init__.py') + True + >>> docwriter._uri2path('sphinx.does_not_exist') + + ''' + if uri == self.package_name: + return os.path.join(self.root_path, '__init__.py') + path = uri.replace(self.package_name + '.', '') + path = path.replace('.', os.path.sep) + path = os.path.join(self.root_path, path) + # XXX maybe check for extensions as well? + if os.path.exists(path + '.py'): # file + path += '.py' + elif os.path.exists(os.path.join(path, '__init__.py')): + path = os.path.join(path, '__init__.py') + else: + return None + return path + + def _path2uri(self, dirpath): + ''' Convert directory path to uri ''' + package_dir = self.package_name.replace('.', os.path.sep) + relpath = dirpath.replace(self.root_path, package_dir) + if relpath.startswith(os.path.sep): + relpath = relpath[1:] + return relpath.replace(os.path.sep, '.') + + def _parse_module(self, uri): + ''' Parse module defined in *uri* ''' + filename = self._uri2path(uri) + if filename is None: + print(filename, 'erk') + # nothing that we could handle here. + return ([],[]) + + f = open(filename, 'rt') + functions, classes = self._parse_lines(f) + f.close() + return functions, classes + + def _parse_module_with_import(self, uri): + """Look for functions and classes in an importable module. + + Parameters + ---------- + uri : str + The name of the module to be parsed. This module needs to be + importable. + + Returns + ------- + functions : list of str + A list of (public) function names in the module. + classes : list of str + A list of (public) class names in the module. + """ + mod = __import__(uri, fromlist=[uri]) + # find all public objects in the module. + obj_strs = [obj for obj in dir(mod) if not obj.startswith('_')] + functions = [] + classes = [] + for obj_str in obj_strs: + # find the actual object from its string representation + if obj_str not in mod.__dict__: + continue + obj = mod.__dict__[obj_str] + # Check if function / class defined in module + if not self.other_defines and not getmodule(obj) == mod: + continue + # figure out if obj is a function or class + if hasattr(obj, 'func_name') or \ + isinstance(obj, BuiltinFunctionType) or \ + isinstance(obj, FunctionType): + functions.append(obj_str) + else: + try: + issubclass(obj, object) + classes.append(obj_str) + except TypeError: + # not a function or class + pass + return functions, classes + + def _parse_lines(self, linesource): + ''' Parse lines of text for functions and classes ''' + functions = [] + classes = [] + for line in linesource: + if line.startswith('def ') and line.count('('): + # exclude private stuff + name = self._get_object_name(line) + if not name.startswith('_'): + functions.append(name) + elif line.startswith('class '): + # exclude private stuff + name = self._get_object_name(line) + if not name.startswith('_'): + classes.append(name) + else: + pass + functions.sort() + classes.sort() + return functions, classes + + def generate_api_doc(self, uri): + '''Make autodoc documentation template string for a module + + Parameters + ---------- + uri : string + python location of module - e.g 'sphinx.builder' + + Returns + ------- + head : string + Module name, table of contents. + body : string + Function and class docstrings. + ''' + # get the names of all classes and functions + functions, classes = self._parse_module_with_import(uri) + if not len(functions) and not len(classes) and DEBUG: + print('WARNING: Empty -', uri) # dbg + + # Make a shorter version of the uri that omits the package name for + # titles + uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) + + head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' + body = '' + + # Set the chapter title to read 'module' for all modules except for the + # main packages + if '.' in uri_short: + title = 'Module: :mod:`' + uri_short + '`' + head += title + '\n' + self.rst_section_levels[2] * len(title) + else: + title = ':mod:`' + uri_short + '`' + head += title + '\n' + self.rst_section_levels[1] * len(title) + + head += '\n.. automodule:: ' + uri + '\n' + head += '\n.. currentmodule:: ' + uri + '\n' + body += '\n.. currentmodule:: ' + uri + '\n\n' + for c in classes: + body += '\n:class:`' + c + '`\n' \ + + self.rst_section_levels[3] * \ + (len(c)+9) + '\n\n' + body += '\n.. autoclass:: ' + c + '\n' + # must NOT exclude from index to keep cross-refs working + body += ' :members:\n' \ + ' :undoc-members:\n' \ + ' :show-inheritance:\n' \ + '\n' \ + ' .. automethod:: __init__\n\n' + head += '.. autosummary::\n\n' + for f in classes + functions: + head += ' ' + f + '\n' + head += '\n' + + for f in functions: + # must NOT exclude from index to keep cross-refs working + body += f + '\n' + body += self.rst_section_levels[3] * len(f) + '\n' + body += '\n.. autofunction:: ' + f + '\n\n' + + return head, body + + def _survives_exclude(self, matchstr, match_type): + ''' Returns True if *matchstr* does not match patterns + + ``self.package_name`` removed from front of string if present + + Examples + -------- + >>> dw = ApiDocWriter('sphinx') + >>> dw._survives_exclude('sphinx.okpkg', 'package') + True + >>> dw.package_skip_patterns.append('^\\.badpkg$') + >>> dw._survives_exclude('sphinx.badpkg', 'package') + False + >>> dw._survives_exclude('sphinx.badpkg', 'module') + True + >>> dw._survives_exclude('sphinx.badmod', 'module') + True + >>> dw.module_skip_patterns.append('^\\.badmod$') + >>> dw._survives_exclude('sphinx.badmod', 'module') + False + ''' + if match_type == 'module': + patterns = self.module_skip_patterns + elif match_type == 'package': + patterns = self.package_skip_patterns + else: + raise ValueError('Cannot interpret match type "%s"' + % match_type) + # Match to URI without package name + L = len(self.package_name) + if matchstr[:L] == self.package_name: + matchstr = matchstr[L:] + for pat in patterns: + try: + pat.search + except AttributeError: + pat = re.compile(pat) + if pat.search(matchstr): + return False + + return True + + def discover_modules(self): + ''' Return module sequence discovered from ``self.package_name`` + + + Parameters + ---------- + None + + Returns + ------- + mods : sequence + Sequence of module names within ``self.package_name`` + + Examples + -------- + >>> dw = ApiDocWriter('sphinx') + >>> mods = dw.discover_modules() + >>> 'sphinx.util' in mods + True + >>> dw.package_skip_patterns.append('\.util$') + >>> 'sphinx.util' in dw.discover_modules() + False + >>> + ''' + modules = [self.package_name] + # raw directory parsing + for dirpath, dirnames, filenames in os.walk(self.root_path): + # Check directory names for packages + root_uri = self._path2uri(os.path.join(self.root_path, + dirpath)) + + # Normally, we'd only iterate over dirnames, but since + # dipy does not import a whole bunch of modules we'll + # include those here as well (the *.py filenames). + filenames = [f[:-3] for f in filenames if + f.endswith('.py') and not f.startswith('__init__')] + for filename in filenames: + package_uri = '/'.join((dirpath, filename)) + + for subpkg_name in dirnames + filenames: + package_uri = '.'.join((root_uri, subpkg_name)) + package_path = self._uri2path(package_uri) + if (package_path and + self._survives_exclude(package_uri, 'package')): + modules.append(package_uri) + + return sorted(modules) + + def write_modules_api(self, modules, outdir): + # upper-level modules + main_module = modules[0].split('.')[0] + ulms = ['.'.join(m.split('.')[:2]) if m.count('.') >= 1 + else m.split('.')[0] for m in modules] + + from collections import OrderedDict + module_by_ulm = OrderedDict() + + for v, k in zip(modules, ulms): + if k in module_by_ulm: + module_by_ulm[k].append(v) + else: + module_by_ulm[k] = [v] + + written_modules = [] + + for ulm, mods in module_by_ulm.items(): + print("Generating docs for %s:" % ulm) + document_head = [] + document_body = [] + + for m in mods: + print(" -> " + m) + head, body = self.generate_api_doc(m) + + document_head.append(head) + document_body.append(body) + + out_module = ulm + self.rst_extension + outfile = os.path.join(outdir, out_module) + fileobj = open(outfile, 'wt') + + fileobj.writelines(document_head + document_body) + fileobj.close() + written_modules.append(out_module) + + self.written_modules = written_modules + + def write_api_docs(self, outdir): + """Generate API reST files. + + Parameters + ---------- + outdir : string + Directory name in which to store files + We create automatic filenames for each module + + Returns + ------- + None + + Notes + ----- + Sets self.written_modules to list of written modules + """ + if not os.path.exists(outdir): + os.mkdir(outdir) + # compose list of modules + modules = self.discover_modules() + self.write_modules_api(modules,outdir) + + def write_index(self, outdir, froot='gen', relative_to=None): + """Make a reST API index file from written files + + Parameters + ---------- + path : string + Filename to write index to + outdir : string + Directory to which to write generated index file + froot : string, optional + root (filename without extension) of filename to write to + Defaults to 'gen'. We add ``self.rst_extension``. + relative_to : string + path to which written filenames are relative. This + component of the written file path will be removed from + outdir, in the generated index. Default is None, meaning, + leave path as it is. + """ + if self.written_modules is None: + raise ValueError('No modules written') + # Get full filename path + path = os.path.join(outdir, froot+self.rst_extension) + # Path written into index is relative to rootpath + if relative_to is not None: + relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '') + else: + relpath = outdir + idx = open(path,'wt') + w = idx.write + w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') + + title = "API Reference" + w(title + "\n") + w("=" * len(title) + "\n\n") + w('.. toctree::\n\n') + for f in self.written_modules: + w(' %s\n' % os.path.join(relpath,f)) + idx.close() diff --git a/docs/tools/buildmodref.py b/docs/tools/buildmodref.py new file mode 100755 index 0000000000..e2a7b9f614 --- /dev/null +++ b/docs/tools/buildmodref.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +"""Script to auto-generate API docs. +""" +from __future__ import print_function, division + +# stdlib imports +import sys +import re + +# local imports +from apigen import ApiDocWriter + +# version comparison +from distutils.version import LooseVersion as V + +#***************************************************************************** + +def abort(error): + print('*WARNING* API documentation not generated: %s' % error) + exit() + + +def writeapi(package, outdir, source_version, other_defines=True): + # Check that the package is available. If not, the API documentation is not + # (re)generated and existing API documentation sources will be used. + + try: + __import__(package) + except ImportError: + abort("Can not import " + package) + + module = sys.modules[package] + + # Check that the source version is equal to the installed + # version. If the versions mismatch the API documentation sources + # are not (re)generated. This avoids automatic generation of documentation + # for older or newer versions if such versions are installed on the system. + + installed_version = V(module.__version__) + if source_version != installed_version: + abort("Installed version does not match source version") + + docwriter = ApiDocWriter(package, rst_extension='.rst', + other_defines=other_defines) + + docwriter.package_skip_patterns += [r'\.%s$' % package, + r'.*test.*$', + r'\.version.*$'] + docwriter.write_api_docs(outdir) + docwriter.write_index(outdir, 'index', relative_to=outdir) + print('%d files written' % len(docwriter.written_modules)) + + +if __name__ == '__main__': + package = sys.argv[1] + outdir = sys.argv[2] + try: + other_defines = sys.argv[3] + except IndexError: + other_defines = True + else: + other_defines = other_defines in ('True', 'true', '1') + + writeapi(package, outdir, other_defines=other_defines) diff --git a/sdcflows/__init__.py b/sdcflows/__init__.py index 63de387669..cbe97dc87a 100644 --- a/sdcflows/__init__.py +++ b/sdcflows/__init__.py @@ -1,6 +1,14 @@ -from .__about__ import ( # noqa +"""SDCflows - :abbr:`SDC (susceptibility distortion correction)` by DUMMIES, for dummies.""" +from .__about__ import ( __version__, __copyright__, __credits__, __packagename__, ) + +__all__ = [ + '__version__', + '__copyright__', + '__credits__', + '__packagename__', +] diff --git a/sdcflows/cli/run.py b/sdcflows/cli/run.py index 255b4389c1..96394ddd29 100644 --- a/sdcflows/cli/run.py +++ b/sdcflows/cli/run.py @@ -31,11 +31,17 @@ def get_parser(): # Options that affect how pyBIDS is configured g_bids = parser.add_argument_group('Options for filtering BIDS queries') g_bids.add_argument('--participant-label', action='store', type=str, - nargs='*', help='process only particular subjects') + nargs='*', dest='subject', help='process only particular subjects') g_bids.add_argument('--task', action='store', type=str, nargs='*', help='select a specific task to be processed') + g_bids.add_argument('--dir', action='store', type=str, nargs='*', + help='select a specific direction entity to be processed') + g_bids.add_argument('--acq', action='store', type=str, nargs='*', dest='acquisition', + help='select a specific acquisition entity to be processed') g_bids.add_argument('--run', action='store', type=int, nargs='*', help='select a specific run identifier to be processed') + g_bids.add_argument('--suffix', action='store', type=str, nargs='*', default='bold', + help='select a specific run identifier to be processed') g_perfm = parser.add_argument_group('Options to handle performance') g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0, @@ -56,8 +62,9 @@ def main(): """Entry point""" from os import cpu_count from multiprocessing import set_start_method - from bids.layout import BIDSLayout + # from bids.layout import BIDSLayout from nipype import logging as nlogging + from ..workflows.base import init_sdc_wf set_start_method('forkserver') opts = get_parser().parse_args() @@ -88,21 +95,18 @@ def main(): if not nthreads or nthreads < 1: nthreads = cpu_count() - derivatives_dir = opts.derivatives_dir.resolve() - bids_dir = opts.bids_dir or derivatives_dir.parent + # output_dir = opts.output_dir.resolve() + # bids_dir = opts.bids_dir or output_dir.parent # Get absolute path to BIDS directory - bids_dir = opts.bids_dir.resolve() - layout = BIDSLayout(str(bids_dir), validate=False, derivatives=str(derivatives_dir)) - query = {'domains': 'derivatives', 'desc': 'preproc', - 'suffix': 'bold', 'extensions': ['.nii', '.nii.gz']} - - if opts.participant_label: - query['subject'] = '|'.join(opts.participant_label) - if opts.run: - query['run'] = '|'.join(opts.run) - if opts.task: - query['task'] = '|'.join(opts.task) + # bids_dir = opts.bids_dir.resolve() + # layout = BIDSLayout(str(bids_dir), validate=False, derivatives=str(output_dir)) + # query = {'suffix': opts.suffix, 'extension': ['.nii', '.nii.gz']} + + # for entity in ('subject', 'task', 'dir', 'acquisition', 'run'): + # arg = getattr(opts, entity, None) + # if arg is not None: + # query[entity] = arg if __name__ == '__main__': diff --git a/sdcflows/conftest.py b/sdcflows/conftest.py new file mode 100644 index 0000000000..1ffdb109d8 --- /dev/null +++ b/sdcflows/conftest.py @@ -0,0 +1,44 @@ +"""py.test configuration""" +import os +from pathlib import Path +import numpy +import pytest +from bids.layout import BIDSLayout + +test_data_env = os.getenv('TEST_DATA_HOME', str(Path.home() / 'sdcflows-tests')) +test_output_dir = os.getenv('TEST_OUTPUT_DIR') +test_workdir = os.getenv('TEST_WORK_DIR') + +layouts = {p.name: BIDSLayout(str(p), validate=False, derivatives=True) + for p in Path(test_data_env).glob('*') if p.is_dir()} + + +def pytest_report_header(config): + msg = "Datasets found: %s" % ', '.join([v.root for v in layouts.values()]) + if test_output_dir is not None: + msg += '\nOutput folder: %s' % Path(test_output_dir).resolve() + return msg + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + doctest_namespace['os'] = os + doctest_namespace['Path'] = Path + for key, val in list(layouts.items()): + doctest_namespace[key] = Path(val.root) + + +@pytest.fixture +def workdir(): + return None if test_workdir is None else Path(test_workdir) + + +@pytest.fixture +def output_path(): + return None if test_output_dir is None else Path(test_output_dir) + + +@pytest.fixture +def bids_layouts(): + return layouts diff --git a/sdcflows/interfaces/fmap.py b/sdcflows/interfaces/fmap.py index 79fe14c74b..98b43acee1 100644 --- a/sdcflows/interfaces/fmap.py +++ b/sdcflows/interfaces/fmap.py @@ -1,19 +1,17 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ -Interfaces to deal with the various types of fieldmap sources +Interfaces to deal with the various types of fieldmap sources. - .. testsetup:: - - >>> tmpdir = getfixture('tmpdir') - >>> tmp = tmpdir.chdir() # changing to a temporary directory - >>> nb.Nifti1Image(np.zeros((90, 90, 60)), None, None).to_filename( - ... tmpdir.join('epi.nii.gz').strpath) +.. testsetup:: + >>> tmpdir = getfixture('tmpdir') + >>> tmp = tmpdir.chdir() # changing to a temporary directory + >>> nb.Nifti1Image(np.zeros((90, 90, 60)), None, None).to_filename( + ... tmpdir.join('epi.nii.gz').strpath) """ - +from copy import deepcopy import numpy as np import nibabel as nb from nipype import logging @@ -25,7 +23,103 @@ LOGGER = logging.getLogger('nipype.interface') -class FieldEnhanceInputSpec(BaseInterfaceInputSpec): +class _ProcessPhasesInputSpec(BaseInterfaceInputSpec): + phase1_file = File(exists=True, mandatory=True, desc='phase1 file') + phase2_file = File(exists=True, mandatory=True, desc='phase2 file') + phase1_metadata = traits.Dict(mandatory=True, desc='phase1 metadata dict') + phase2_metadata = traits.Dict(mandatory=True, desc='phase2 metadata dict') + + +class _ProcessPhasesOutputSpec(TraitedSpec): + short_te_phase_image = File(exists=True, desc='short TE phase image scaled for unwrapping') + short_te_phase_metadata = traits.Dict(desc='short TE phase image metadata') + long_te_phase_image = File(exists=True, desc='long TE phase image scaled for unwrapping') + long_te_phase_metadata = traits.Dict(desc='long TE phase image metadata') + phasediff_metadata = traits.Dict(desc='the phasediff metadata') + + +class ProcessPhases(SimpleInterface): + """ + Process phase1, phase2 images so they can be unwrapped. + + Steps are taken from https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE/Guide + + """ + + input_spec = _ProcessPhasesInputSpec + output_spec = _ProcessPhasesOutputSpec + + def _run_interface(self, runtime): + images = (self.inputs.phase1_file, self.inputs.phase2_file) + metadatas = (self.inputs.phase1_metadata, self.inputs.phase2_metadata) + echo_times = [meta.get("EchoTime") for meta in metadatas] + if None in echo_times or echo_times[0] == echo_times[1]: + raise ValueError('Echo time metadata are missing or invalid') + + # Order the images by echo time + short_echo_index = echo_times.index(min(echo_times)) + long_echo_index = echo_times.index(max(echo_times)) + short_echo_image = images[short_echo_index] + long_echo_image = images[long_echo_index] + + # Rescale and save the short TE image + se_phase_image = nb.load(short_echo_image) + se_phase_data = se_phase_image.get_fdata() + se_rescaled_data = rescale_phase_image(se_phase_data) + se_rescaled_output = fname_presuffix(short_echo_image, suffix='_shortTE_scaled', + newpath=runtime.cwd) + nb.Nifti1Image(se_rescaled_data, se_phase_image.affine, se_phase_image.header + ).to_filename(se_rescaled_output) + self._results['short_te_phase_image'] = se_rescaled_output + self._results['short_te_phase_metadata'] = metadatas[short_echo_index] + + # Rescale and save the long TE image + le_phase_image = nb.load(long_echo_image) + le_phase_data = le_phase_image.get_fdata() + le_rescaled_data = rescale_phase_image(le_phase_data) + le_rescaled_output = fname_presuffix(long_echo_image, suffix='_longTE_scaled', + newpath=runtime.cwd) + nb.Nifti1Image(le_rescaled_data, le_phase_image.affine, le_phase_image.header + ).to_filename(le_rescaled_output) + self._results['long_te_phase_image'] = le_rescaled_output + self._results['long_te_phase_metadata'] = metadatas[long_echo_index] + + merged_metadata = deepcopy(metadatas[0]) + del merged_metadata['EchoTime'] + merged_metadata['EchoTime1'] = float(echo_times[short_echo_index]) + merged_metadata['EchoTime2'] = float(echo_times[long_echo_index]) + self._results['phasediff_metadata'] = merged_metadata + return runtime + + +class _DiffPhasesInputSpec(BaseInterfaceInputSpec): + long_te_phase_image = File(exists=True, mandatory=True, + desc="Phase image with longest echo time") + short_te_phase_image = File(exists=True, mandatory=True, + desc="Phase image with shortest echo time") + + +class _DiffPhasesOutputSpec(TraitedSpec): + phasediff_file = File(exists=True) + + +class DiffPhases(SimpleInterface): + """Substract two input phase maps.""" + input_spec = _DiffPhasesInputSpec + output_spec = _DiffPhasesOutputSpec + + def _run_interface(self, runtime): + # Save the subtraction image + phasediff_output = fname_presuffix(self.inputs.long_te_phase_image, suffix='_phasediff', + newpath=runtime.cwd) + phasediff_image = _subtract_phases(short_te_phase_image=self.inputs.short_te_phase_image, + long_te_phase_image=self.inputs.long_te_phase_image) + phasediff_image.to_filename(phasediff_output) + self._results['phasediff_file'] = phasediff_output + return runtime + + +class _FieldEnhanceInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') in_mask = File(exists=True, desc='brain mask') in_magnitude = File(exists=True, desc='input magnitude') @@ -37,18 +131,16 @@ class FieldEnhanceInputSpec(BaseInterfaceInputSpec): num_threads = traits.Int(1, usedefault=True, nohash=True, desc='number of jobs') -class FieldEnhanceOutputSpec(TraitedSpec): +class _FieldEnhanceOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') out_unwrapped = File(desc='unwrapped fieldmap') class FieldEnhance(SimpleInterface): - """ - The FieldEnhance interface wraps a workflow to massage the input fieldmap - and return it masked, despiked, etc. - """ - input_spec = FieldEnhanceInputSpec - output_spec = FieldEnhanceOutputSpec + """Massage the input fieldmap (masking, despiking, etc.).""" + + input_spec = _FieldEnhanceInputSpec + output_spec = _FieldEnhanceOutputSpec def _run_interface(self, runtime): from scipy import ndimage as sim @@ -134,22 +226,21 @@ def _run_interface(self, runtime): return runtime -class FieldToRadSInputSpec(BaseInterfaceInputSpec): +class _FieldToRadSInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') fmap_range = traits.Float(desc='range of input field map') -class FieldToRadSOutputSpec(TraitedSpec): +class _FieldToRadSOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') fmap_range = traits.Float(desc='range of input field map') class FieldToRadS(SimpleInterface): - """ - The FieldToRadS converts from arbitrary units to rad/s - """ - input_spec = FieldToRadSInputSpec - output_spec = FieldToRadSOutputSpec + """Convert from arbitrary units to rad/s.""" + + input_spec = _FieldToRadSInputSpec + output_spec = _FieldToRadSOutputSpec def _run_interface(self, runtime): fmap_range = None @@ -160,21 +251,20 @@ def _run_interface(self, runtime): return runtime -class FieldToHzInputSpec(BaseInterfaceInputSpec): +class _FieldToHzInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') range_hz = traits.Float(mandatory=True, desc='range of input field map') -class FieldToHzOutputSpec(TraitedSpec): +class _FieldToHzOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') class FieldToHz(SimpleInterface): - """ - The FieldToHz converts from arbitrary units to Hz - """ - input_spec = FieldToHzInputSpec - output_spec = FieldToHzOutputSpec + """Convert from arbitrary units to Hz.""" + + input_spec = _FieldToHzInputSpec + output_spec = _FieldToHzOutputSpec def _run_interface(self, runtime): self._results['out_file'] = _tohz( @@ -182,21 +272,20 @@ def _run_interface(self, runtime): return runtime -class Phasediff2FieldmapInputSpec(BaseInterfaceInputSpec): +class _Phasediff2FieldmapInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, desc='input fieldmap') metadata = traits.Dict(mandatory=True, desc='BIDS metadata dictionary') -class Phasediff2FieldmapOutputSpec(TraitedSpec): +class _Phasediff2FieldmapOutputSpec(TraitedSpec): out_file = File(desc='the output fieldmap') class Phasediff2Fieldmap(SimpleInterface): - """ - Convert a phase difference map into a fieldmap in Hz - """ - input_spec = Phasediff2FieldmapInputSpec - output_spec = Phasediff2FieldmapOutputSpec + """Convert a phase difference map into a fieldmap in Hz.""" + + input_spec = _Phasediff2FieldmapInputSpec + output_spec = _Phasediff2FieldmapOutputSpec def _run_interface(self, runtime): self._results['out_file'] = phdiff2fmap( @@ -207,10 +296,7 @@ def _run_interface(self, runtime): def _despike2d(data, thres, neigh=None): - """ - despiking as done in FSL fugue - """ - + """Despike axial slices, as done in FSL's ``epiunwarp``.""" if neigh is None: neigh = [-1, 0, 1] nslices = data.shape[-1] @@ -263,8 +349,10 @@ def _unwrap(fmap_data, mag_file, mask=None): def get_ees(in_meta, in_file=None): - """ - Calculate the *effective echo spacing* :math:`t_\\text{ees}` + r""" + Extract the *effective echo spacing* :math:`t_\text{ees}` from BIDS. + + Calculate the *effective echo spacing* :math:`t_\text{ees}` for an input :abbr:`EPI (echo-planar imaging)` scan. @@ -279,18 +367,18 @@ def get_ees(in_meta, in_file=None): >>> get_ees(meta) 0.00059 - If the *total readout time* :math:`T_\\text{ro}` (``TotalReadoutTime`` + If the *total readout time* :math:`T_\text{ro}` (``TotalReadoutTime`` BIDS field) is provided, then the effective echo spacing can be - calculated reading the number of voxels :math:`N_\\text{PE}` along the + calculated reading the number of voxels :math:`N_\text{PE}` along the readout direction and the parallel acceleration factor of the EPI .. math :: - = T_\\text{ro} \\, (N_\\text{PE} / f_\\text{acc} - 1)^{-1} + = T_\text{ro} \, (N_\text{PE} / f_\text{acc} - 1)^{-1} where :math:`N_y` is the number of pixels along the phase-encoding direction - :math:`y`, and :math:`f_\\text{acc}` is the parallel imaging acceleration factor + :math:`y`, and :math:`f_\text{acc}` is the parallel imaging acceleration factor (:abbr:`GRAPPA (GeneRalized Autocalibrating Partial Parallel Acquisition)`, :abbr:`ARC (Autocalibrating Reconstruction for Cartesian imaging)`, etc.). @@ -300,9 +388,9 @@ def get_ees(in_meta, in_file=None): >>> get_ees(meta, in_file='epi.nii.gz') 0.00059 - Some vendors, like Philips, store different parameter names - (see http://dbic.dartmouth.edu/pipermail/mrusers/attachments/\ -20141112/eb1d20e6/attachment.pdf): + Some vendors, like Philips, store different parameter names (see + http://dbic.dartmouth.edu/pipermail/mrusers/attachments/20141112/eb1d20e6/attachment.pdf + ): >>> meta = {'WaterFatShift': 8.129, ... 'MagneticFieldStrength': 3, @@ -344,11 +432,12 @@ def get_ees(in_meta, in_file=None): def get_trt(in_meta, in_file=None): - """ + r""" + Extract the *total readout time* :math:`t_\text{RO}` from BIDS. + Calculate the *total readout time* for an input :abbr:`EPI (echo-planar imaging)` scan. - There are several procedures to calculate the total readout time. The basic one is that a ``TotalReadoutTime`` field is set in the JSON sidecar. The following examples @@ -359,15 +448,15 @@ def get_trt(in_meta, in_file=None): >>> get_trt(meta) 0.02596 - If the *effective echo spacing* :math:`t_\\text{ees}` + If the *effective echo spacing* :math:`t_\text{ees}` (``EffectiveEchoSpacing`` BIDS field) is provided, then the total readout time can be calculated reading the number - of voxels along the readout direction :math:`T_\\text{ro}` - and the parallel acceleration factor of the EPI :math:`f_\\text{acc}`. + of voxels along the readout direction :math:`T_\text{ro}` + and the parallel acceleration factor of the EPI :math:`f_\text{acc}`. .. math :: - T_\\text{ro} = t_\\text{ees} \\, (N_\\text{PE} / f_\\text{acc} - 1) + T_\text{ro} = t_\text{ees} \, (N_\text{PE} / f_\text{acc} - 1) >>> meta = {'EffectiveEchoSpacing': 0.00059, ... 'PhaseEncodingDirection': 'j-', @@ -385,7 +474,6 @@ def get_trt(in_meta, in_file=None): 0.018721183563864822 """ - # Use case 1: TRT is defined trt = in_meta.get('TotalReadoutTime', None) if trt is not None: @@ -423,12 +511,13 @@ def _get_pe_index(meta): def _torads(in_file, fmap_range=None, newpath=None): """ - Convert a field map to rad/s units + Convert a field map to rad/s units. If fmap_range is None, the range of the fieldmap will be automatically calculated. Use fmap_range=0.5 to convert from Hz to rad/s + """ from math import pi import nibabel as nb @@ -448,7 +537,7 @@ def _torads(in_file, fmap_range=None, newpath=None): def _tohz(in_file, range_hz, newpath=None): - """Convert a field map to Hz units""" + """Convert a field map to Hz units.""" from math import pi import nibabel as nb from nipype.utils.filemanip import fname_presuffix @@ -465,8 +554,9 @@ def _tohz(in_file, range_hz, newpath=None): def phdiff2fmap(in_file, delta_te, newpath=None): r""" - Converts the input phase-difference map into a fieldmap in Hz, - using the eq. (1) of [Hutton2002]_: + Convert the input phase-difference map into a fieldmap in Hz. + + Uses eq. (1) of [Hutton2002]_: .. math:: @@ -480,8 +570,14 @@ def phdiff2fmap(in_file, delta_te, newpath=None): \Delta B_0 (\text{Hz}) = \frac{\Delta \Theta}{2\pi \Delta\text{TE}} + References + ---------- + .. [Hutton2002] Hutton et al., Image Distortion Correction in fMRI: A Quantitative + Evaluation, NeuroImage 16(1):217-240, 2002. doi:`10.1006/nimg.2001.1054 + `_. + + """ - import math import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix @@ -489,15 +585,45 @@ def phdiff2fmap(in_file, delta_te, newpath=None): out_file = fname_presuffix(in_file, suffix='_fmap', newpath=newpath) image = nb.load(in_file) - data = (image.get_data().astype(np.float32) / (2. * math.pi * delta_te)) + data = (image.get_data().astype(np.float32) / (2. * np.pi * delta_te)) nii = nb.Nifti1Image(data, image.affine, image.header) nii.set_data_dtype(np.float32) nii.to_filename(out_file) return out_file +def rescale_phase_image(phase_data): + """ + Ensure that phase images are in a usable range for unwrapping. + + From the `FUGUE User guide + `__: + + | If you have separate phase volumes that are in integer format then do: + + .. code-block:: shell + + fslmaths orig_phase0 -mul 3.14159 -div 2048 phase0_rad -odt float + fslmaths orig_phase1 -mul 3.14159 -div 2048 phase1_rad -odt float + + | Note that the value of 2048 needs to be adjusted for each different + | site/scanner/sequence in order to be correct. The final range of the + | phase0_rad image should be approximately 0 to 6.28. If this is not the + | case then this scaling is wrong. If you have separate phase volumes are + | not in integer format, you must still check that the units are in radians, + | and if not scale them appropriately using fslmaths. + + .. + + """ + imax = phase_data.max() + imin = phase_data.min() + scaled = (phase_data - imin) / (imax - imin) + return 2 * np.pi * scaled + + def _delta_te(in_values, te1=None, te2=None): - """Read :math:`\Delta_\text{TE}` from BIDS metadata dict""" + r"""Read :math:`\Delta_\text{TE}` from BIDS metadata dict.""" if isinstance(in_values, float): te2 = in_values te1 = 0. @@ -529,3 +655,13 @@ def _delta_te(in_values, te1=None, te2=None): 'EchoTime2 metadata field not found. Please consult the BIDS specification.') return abs(float(te2) - float(te1)) + + +def _subtract_phases(short_te_phase_image, long_te_phase_image): + """Subtract two unwrapped phase images to produce a phasediff image.""" + short_te_img = nb.load(short_te_phase_image) + long_te_img = nb.load(long_te_phase_image) + phasediff_data = long_te_img.get_fdata() - short_te_img.get_fdata() + phasediff_img = nb.Nifti1Image(phasediff_data, short_te_img.affine, + header=short_te_img.header) + return phasediff_img diff --git a/sdcflows/interfaces/reportlets.py b/sdcflows/interfaces/reportlets.py new file mode 100644 index 0000000000..90b7b99fa4 --- /dev/null +++ b/sdcflows/interfaces/reportlets.py @@ -0,0 +1,66 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Interfaces to generate speciality reportlets.""" +from nilearn.image import threshold_img, load_img +from niworkflows import NIWORKFLOWS_LOG +from niworkflows.viz.utils import cuts_from_bbox, compose_view +from nipype.interfaces.base import File, isdefined +from nipype.interfaces.mixins import reporting + +from ..viz.utils import plot_registration, coolwarm_transparent + + +class _FieldmapReportletInputSpec(reporting.ReportCapableInputSpec): + reference = File(exists=True, mandatory=True, desc='input reference') + fieldmap = File(exists=True, mandatory=True, desc='input fieldmap') + mask = File(exists=True, desc='brain mask') + out_report = File('report.svg', usedefault=True, + desc='filename for the visual report') + + +class FieldmapReportlet(reporting.ReportCapableInterface): + """An abstract mixin to registration nipype interfaces.""" + + _n_cuts = 7 + input_spec = _FieldmapReportletInputSpec + output_spec = reporting.ReportCapableOutputSpec + + def __init__(self, **kwargs): + """Instantiate FieldmapReportlet.""" + self._n_cuts = kwargs.pop('n_cuts', self._n_cuts) + super(FieldmapReportlet, self).__init__(generate_report=True, **kwargs) + + def _run_interface(self, runtime): + return runtime + + def _generate_report(self): + """Generate a reportlet.""" + NIWORKFLOWS_LOG.info('Generating visual report') + + refnii = load_img(self.inputs.reference) + fmapnii = load_img(self.inputs.fieldmap) + contour_nii = load_img(self.inputs.mask) if isdefined(self.inputs.mask) else None + mask_nii = threshold_img(refnii, 1e-3) + cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) + fmapdata = fmapnii.get_fdata() + vmax = max(fmapdata.max(), abs(fmapdata.min())) + + # Call composer + compose_view( + plot_registration(refnii, 'fixed-image', + estimate_brightness=True, + cuts=cuts, + label='reference', + contour=contour_nii, + compress=False), + plot_registration(fmapnii, 'moving-image', + estimate_brightness=True, + cuts=cuts, + label='fieldmap (Hz)', + contour=contour_nii, + compress=False, + plot_params={'cmap': coolwarm_transparent(), + 'vmax': vmax, + 'vmin': -vmax}), + out_file=self._out_report + ) diff --git a/sdcflows/viz/__init__.py b/sdcflows/viz/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sdcflows/viz/utils.py b/sdcflows/viz/utils.py new file mode 100644 index 0000000000..d17cb38e36 --- /dev/null +++ b/sdcflows/viz/utils.py @@ -0,0 +1,80 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Visualization tooling.""" + + +def plot_registration(anat_nii, div_id, plot_params=None, + order=('z', 'x', 'y'), cuts=None, + estimate_brightness=False, label=None, contour=None, + compress='auto'): + """ + Plot the foreground and background views. + + Default order is: axial, coronal, sagittal + """ + from uuid import uuid4 + + from lxml import etree + from nilearn.plotting import plot_anat + from svgutils.transform import SVGFigure + from niworkflows.viz.utils import robust_set_limits, extract_svg, SVGNS + + plot_params = plot_params or {} + + # Use default MNI cuts if none defined + if cuts is None: + raise NotImplementedError # TODO + + out_files = [] + if estimate_brightness: + plot_params = robust_set_limits(anat_nii.get_data().reshape(-1), + plot_params) + + # Plot each cut axis + for i, mode in enumerate(list(order)): + plot_params['display_mode'] = mode + plot_params['cut_coords'] = cuts[mode] + if i == 0: + plot_params['title'] = label + else: + plot_params['title'] = None + + # Generate nilearn figure + display = plot_anat(anat_nii, **plot_params) + if contour is not None: + display.add_contours(contour, colors='g', levels=[0.5], + linewidths=0.5) + + svg = extract_svg(display, compress=compress) + display.close() + + # Find and replace the figure_1 id. + xml_data = etree.fromstring(svg) + find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) + find_text(xml_data)[0].set('id', '%s-%s-%s' % (div_id, mode, uuid4())) + + svg_fig = SVGFigure() + svg_fig.root = xml_data + out_files.append(svg_fig) + + return out_files + + +def coolwarm_transparent(): + """Modify the coolwarm color scale to have full transparency around the middle.""" + import numpy as np + import matplotlib.pylab as pl + from matplotlib.colors import ListedColormap + + # Choose colormap + cmap = pl.cm.coolwarm + + # Get the colormap colors + my_cmap = cmap(np.arange(cmap.N)) + + # Set alpha + alpha = np.ones(cmap.N) + alpha[128:160] = np.linspace(0, 1, len(alpha[128:160])) + alpha[96:128] = np.linspace(1, 0, len(alpha[96:128])) + my_cmap[:, -1] = alpha + return ListedColormap(my_cmap) diff --git a/sdcflows/workflows/__init__.py b/sdcflows/workflows/__init__.py index f90f217faf..80f9302559 100644 --- a/sdcflows/workflows/__init__.py +++ b/sdcflows/workflows/__init__.py @@ -1,55 +1,37 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ +Fieldmap estimation and unwarping workflows. -.. _sdc_estimation : +.. _sdc_base : -Fieldmap estimation and unwarping workflows -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Automatic selection of the appropriate susceptibility-distortion correction (SDC) method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +If the dataset metadata indicate that more than one field map acquisition is +``IntendedFor`` (see the `BIDS Specification +`__), +the following priority will be used: -.. automodule:: sdcflows.workflows.base - :members: - :undoc-members: - :show-inheritance: + 1. :ref:`sdc_pepolar` (or **blip-up/blip-down**) -.. automodule:: sdcflows.workflows.fmap - :members: - :undoc-members: - :show-inheritance: + 2. :ref:`sdc_direct_b0` -.. automodule:: sdcflows.workflows.phdiff - :members: - :undoc-members: - :show-inheritance: + 3. :ref:`sdc_phasediff` -.. automodule:: sdcflows.workflows.pepolar - :members: - :undoc-members: - :show-inheritance: + 4. :ref:`sdc_fieldmapless` -.. automodule:: sdcflows.workflows.syn - :members: - :undoc-members: - :show-inheritance: -.. automodule:: sdcflows.workflows.unwarp - :members: - :undoc-members: - :show-inheritance: +Table of behavior (fieldmap use-cases): +=============== =========== ============= =============== +Fieldmaps found ``use_syn`` ``force_syn`` Action +=============== =========== ============= =============== +True * True Fieldmaps + SyN +True * False Fieldmaps +False * True SyN +False True False SyN +False False False HMC only +=============== =========== ============= =============== """ - -from .base import init_sdc_wf -from .unwarp import init_sdc_unwarp_wf, init_fmap_unwarp_report_wf -from .pepolar import init_pepolar_unwarp_wf -from .syn import init_syn_sdc_wf - -__all__ = [ - 'init_sdc_wf', - 'init_sdc_unwarp_wf', - 'init_fmap_unwarp_report_wf', - 'init_pepolar_unwarp_wf', - 'init_syn_sdc_wf', -] diff --git a/sdcflows/workflows/base.py b/sdcflows/workflows/base.py index 7103d83e43..8a10716859 100644 --- a/sdcflows/workflows/base.py +++ b/sdcflows/workflows/base.py @@ -1,64 +1,34 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" -.. _sdc_base : - -Automatic selection of the appropriate SDC method -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If the dataset metadata indicate tha more than one field map acquisition is -``IntendedFor`` (see BIDS Specification section 8.9) the following priority will -be used: - - 1. :ref:`sdc_pepolar` (or **blip-up/blip-down**) - - 2. :ref:`sdc_direct_b0` - - 3. :ref:`sdc_phasediff` - - 4. :ref:`sdc_fieldmapless` - - -Table of behavior (fieldmap use-cases): - -=============== =========== ============= =============== -Fieldmaps found ``use_syn`` ``force_syn`` Action -=============== =========== ============= =============== -True * True Fieldmaps + SyN -True * False Fieldmaps -False * True SyN -False True False SyN -False False False HMC only -=============== =========== ============= =============== - - -""" +"""SDC workflows coordination.""" +from collections import defaultdict from nipype.pipeline import engine as pe from nipype.interfaces import utility as niu from nipype import logging from niworkflows.engine.workflows import LiterateWorkflow as Workflow -from niworkflows.interfaces.utility import KeySelect # Fieldmap workflows from .pepolar import init_pepolar_unwarp_wf -from .syn import init_syn_sdc_wf -from .unwarp import init_sdc_unwarp_wf LOGGER = logging.getLogger('nipype.workflow') FMAP_PRIORITY = { 'epi': 0, 'fieldmap': 1, 'phasediff': 2, - 'syn': 3 + 'phase1': 3, + 'phase2': 3, + 'syn': 4, } + DEFAULT_MEMORY_MIN_GB = 0.01 -def init_sdc_wf(fmaps, bold_meta, omp_nthreads=1, - debug=False, fmap_bspline=False, fmap_demean=True): +def init_sdc_wf(distorted_ref, omp_nthreads=1, debug=False, ignore=None): """ + Build a :abbr:`SDC (susceptibility distortion correction)` workflow. + This workflow implements the heuristics to choose a :abbr:`SDC (susceptibility distortion correction)` strategy. When no field map information is present within the BIDS inputs, @@ -71,99 +41,85 @@ def init_sdc_wf(fmaps, bold_meta, omp_nthreads=1, distortion correction)` method applied is that with the highest priority. - .. workflow:: - :graph2use: orig - :simple_form: yes - - from sdcflows.workflows.base import init_sdc_wf - wf = init_sdc_wf( - fmaps=[{ - 'suffix': 'phasediff', - 'phasediff': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_phasediff.nii.gz', - 'magnitude1': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude1.nii.gz', - 'magnitude2': 'sub-03/ses-2/fmap/sub-03_ses-2_run-1_magnitude2.nii.gz', - }], - bold_meta={ - 'RepetitionTime': 2.0, - 'SliceTiming': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], - 'PhaseEncodingDirection': 'j', - }, - ) - - **Parameters** - - fmaps : list of pybids dicts - A list of dictionaries with the available fieldmaps - (and their metadata using the key ``'metadata'`` for the - case of *epi* fieldmaps) - bold_meta : dict - BIDS metadata dictionary corresponding to the BOLD run - omp_nthreads : int - Maximum number of threads an individual process may use - fmap_bspline : bool - **Experimental**: Fit B-Spline field using least-squares - fmap_demean : bool - Demean voxel-shift map during unwarp - debug : bool - Enable debugging outputs - - **Inputs** - bold_ref - A BOLD reference calculated at a previous stage - bold_ref_brain - Same as above, but brain-masked - bold_mask - Brain mask for the BOLD run - t1_brain - T1w image, brain-masked, for the fieldmap-less SyN method - std2anat_xfm - List of standard-to-T1w transforms generated during spatial - normalization (only for the fieldmap-less SyN method). - template : str - Name of template from which prior knowledge will be mapped - into the subject's T1w reference - (only for the fieldmap-less SyN method) - templates : str - Name of templates that index the ``std2anat_xfm`` input list - (only for the fieldmap-less SyN method). - - - **Outputs** - bold_ref - An unwarped BOLD reference - bold_mask - The corresponding new mask after unwarping - bold_ref_brain - Brain-extracted, unwarped BOLD reference - out_warp - The deformation field to unwarp the susceptibility distortions - syn_bold_ref - If ``--force-syn``, an unwarped BOLD reference with this - method (for reporting purposes) + Parameters + ---------- + distorted_ref : pybids.BIDSFile + A BIDSFile object with suffix ``bold``, ``sbref`` or ``dwi``. + omp_nthreads : int + Maximum number of threads an individual process may use + debug : bool + Enable debugging outputs + + Inputs + ------ + distorted_ref + A reference image calculated at a previous stage + ref_brain + Same as above, but brain-masked + ref_mask + Brain mask for the run + t1_brain + T1w image, brain-masked, for the fieldmap-less SyN method + std2anat_xfm + List of standard-to-T1w transforms generated during spatial + normalization (only for the fieldmap-less SyN method). + template : str + Name of template from which prior knowledge will be mapped + into the subject's T1w reference + (only for the fieldmap-less SyN method) + templates : str + Name of templates that index the ``std2anat_xfm`` input list + (only for the fieldmap-less SyN method). + + Outputs + ------- + distorted_ref + An unwarped BOLD reference + bold_mask + The corresponding new mask after unwarping + bold_ref_brain + Brain-extracted, unwarped BOLD reference + out_warp + The deformation field to unwarp the susceptibility distortions + syn_bold_ref + If ``--force-syn``, an unwarped BOLD reference with this + method (for reporting purposes) """ + if ignore is None: + ignore = tuple() + + if not isinstance(ignore, (list, tuple)): + ignore = tuple(ignore) - # TODO: To be removed (filter out unsupported fieldmaps): - fmaps = [fmap for fmap in fmaps if fmap['suffix'] in FMAP_PRIORITY] + fmaps = defaultdict(list, []) + for associated in distorted_ref.get_associations(kind='InformedBy'): + if associated.suffix in FMAP_PRIORITY: + fmaps[associated.suffix].append(associated) - workflow = Workflow(name='sdc_wf' if fmaps else 'sdc_bypass_wf') + workflow = Workflow(name='sdc_wf' if distorted_ref else 'sdc_bypass_wf') inputnode = pe.Node(niu.IdentityInterface( - fields=['bold_ref', 'bold_ref_brain', 'bold_mask', + fields=['distorted_ref', 'ref_brain', 'ref_mask', 't1_brain', 'std2anat_xfm', 'template', 'templates']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( - fields=['bold_ref', 'bold_mask', 'bold_ref_brain', - 'out_warp', 'syn_bold_ref', 'method']), + fields=['output_ref', 'ref_mask', 'ref_brain', + 'out_warp', 'syn_ref', 'method']), name='outputnode') # No fieldmaps - forward inputs to outputs - if not fmaps: + if not fmaps or 'fieldmaps' in ignore: + workflow.__postdesc__ = """\ +Susceptibility distortion correction (SDC) has been skipped because the +dataset does not contain extra field map acquisitions correctly described +with metadata, and the experimental SDC-SyN method was not explicitly selected. +""" outputnode.inputs.method = 'None' workflow.connect([ - (inputnode, outputnode, [('bold_ref', 'bold_ref'), - ('bold_mask', 'bold_mask'), - ('bold_ref_brain', 'bold_ref_brain')]), + (inputnode, outputnode, [('distorted_ref', 'output_ref'), + ('ref_mask', 'ref_mask'), + ('ref_brain', 'ref_brain')]), ]) return workflow @@ -173,111 +129,107 @@ def init_sdc_wf(fmaps, bold_meta, omp_nthreads=1, co-registration with the anatomical reference. """ - # In case there are multiple fieldmaps prefer EPI - fmaps.sort(key=lambda fmap: FMAP_PRIORITY[fmap['suffix']]) - fmap = fmaps[0] - # PEPOLAR path - if fmap['suffix'] == 'epi': + if 'epi' in fmaps: outputnode.inputs.method = 'PEB/PEPOLAR (phase-encoding based / PE-POLARity)' # Get EPI polarities and their metadata - epi_fmaps = [(fmap_['epi'], fmap_['metadata']["PhaseEncodingDirection"]) - for fmap_ in fmaps if fmap_['suffix'] == 'epi'] sdc_unwarp_wf = init_pepolar_unwarp_wf( - bold_meta=bold_meta, - epi_fmaps=epi_fmaps, + bold_meta=distorted_ref.get_metadata(), + epi_fmaps=[(fmap, fmap.get_metadata()["PhaseEncodingDirection"]) + for fmap in fmaps['epi']], omp_nthreads=omp_nthreads, name='pepolar_unwarp_wf') workflow.connect([ (inputnode, sdc_unwarp_wf, [ - ('bold_ref', 'inputnode.in_reference'), + ('distorted_ref', 'inputnode.in_reference'), ('bold_mask', 'inputnode.in_mask'), ('bold_ref_brain', 'inputnode.in_reference_brain')]), ]) # FIELDMAP path - if fmap['suffix'] in ['fieldmap', 'phasediff']: - outputnode.inputs.method = 'FMB (%s-based)' % fmap['suffix'] - # Import specific workflows here, so we don't break everything with one - # unused workflow. - if fmap['suffix'] == 'fieldmap': - from .fmap import init_fmap_wf - fmap_estimator_wf = init_fmap_wf( - omp_nthreads=omp_nthreads, - fmap_bspline=fmap_bspline) - # set inputs - fmap_estimator_wf.inputs.inputnode.fieldmap = fmap['fieldmap'] - fmap_estimator_wf.inputs.inputnode.magnitude = fmap['magnitude'] - - if fmap['suffix'] == 'phasediff': - from .phdiff import init_phdiff_wf - fmap_estimator_wf = init_phdiff_wf(omp_nthreads=omp_nthreads) - # set inputs - fmap_estimator_wf.inputs.inputnode.phasediff = fmap['phasediff'] - fmap_estimator_wf.inputs.inputnode.magnitude = [ - fmap_ for key, fmap_ in sorted(fmap.items()) - if key.startswith("magnitude") - ] - - sdc_unwarp_wf = init_sdc_unwarp_wf( - omp_nthreads=omp_nthreads, - fmap_demean=fmap_demean, - debug=debug, - name='sdc_unwarp_wf') - sdc_unwarp_wf.inputs.inputnode.metadata = bold_meta - - workflow.connect([ - (inputnode, sdc_unwarp_wf, [ - ('bold_ref', 'inputnode.in_reference'), - ('bold_ref_brain', 'inputnode.in_reference_brain'), - ('bold_mask', 'inputnode.in_mask')]), - (fmap_estimator_wf, sdc_unwarp_wf, [ - ('outputnode.fmap', 'inputnode.fmap'), - ('outputnode.fmap_ref', 'inputnode.fmap_ref'), - ('outputnode.fmap_mask', 'inputnode.fmap_mask')]), - ]) - - # FIELDMAP-less path - if any(fm['suffix'] == 'syn' for fm in fmaps): - # Select template - sdc_select_std = pe.Node(KeySelect( - fields=['std2anat_xfm']), - name='sdc_select_std', run_without_submitting=True) - - syn_sdc_wf = init_syn_sdc_wf( - bold_pe=bold_meta.get('PhaseEncodingDirection', None), - omp_nthreads=omp_nthreads) - - workflow.connect([ - (inputnode, sdc_select_std, [ - ('template', 'key'), - ('templates', 'keys'), - ('std2anat_xfm', 'std2anat_xfm')]), - (sdc_select_std, syn_sdc_wf, [ - ('std2anat_xfm', 'inputnode.std2anat_xfm')]), - (inputnode, syn_sdc_wf, [ - ('t1_brain', 'inputnode.t1_brain'), - ('bold_ref', 'inputnode.bold_ref'), - ('bold_ref_brain', 'inputnode.bold_ref_brain'), - ('template', 'inputnode.template')]), - ]) - - # XXX Eliminate branch when forcing isn't an option - if fmap['suffix'] == 'syn': # No fieldmaps, but --use-syn - outputnode.inputs.method = 'FLB ("fieldmap-less", SyN-based)' - sdc_unwarp_wf = syn_sdc_wf - else: # --force-syn was called when other fieldmap was present - sdc_unwarp_wf.__desc__ = None - workflow.connect([ - (syn_sdc_wf, outputnode, [ - ('outputnode.out_reference', 'syn_bold_ref')]), - ]) + # elif 'fieldmap' in fmaps: + # # Import specific workflows here, so we don't break everything with one + # # unused workflow. + # suffices = {f.suffix for f in fmaps['fieldmap']} + # if 'fieldmap' in suffices: + # from .fmap import init_fmap_wf + # outputnode.inputs.method = 'FMB (fieldmap-based)' + # fmap_estimator_wf = init_fmap_wf( + # omp_nthreads=omp_nthreads, + # fmap_bspline=False) + # # set inputs + # fmap_estimator_wf.inputs.inputnode.fieldmap = fmap['fieldmap'] + # fmap_estimator_wf.inputs.inputnode.magnitude = fmap['magnitude'] + + # if fmap['suffix'] == 'phasediff': + # from .phdiff import init_phdiff_wf + # fmap_estimator_wf = init_phdiff_wf(omp_nthreads=omp_nthreads) + # # set inputs + # fmap_estimator_wf.inputs.inputnode.phasediff = fmap['phasediff'] + # fmap_estimator_wf.inputs.inputnode.magnitude = [ + # fmap_ for key, fmap_ in sorted(fmap.items()) + # if key.startswith("magnitude") + # ] + + # sdc_unwarp_wf = init_sdc_unwarp_wf( + # omp_nthreads=omp_nthreads, + # fmap_demean=fmap_demean, + # debug=debug, + # name='sdc_unwarp_wf') + # sdc_unwarp_wf.inputs.inputnode.metadata = bold_meta + + # workflow.connect([ + # (inputnode, sdc_unwarp_wf, [ + # ('distorted_ref', 'inputnode.in_reference'), + # ('bold_ref_brain', 'inputnode.in_reference_brain'), + # ('bold_mask', 'inputnode.in_mask')]), + # (fmap_estimator_wf, sdc_unwarp_wf, [ + # ('outputnode.fmap', 'inputnode.fmap'), + # ('outputnode.fmap_ref', 'inputnode.fmap_ref'), + # ('outputnode.fmap_mask', 'inputnode.fmap_mask')]), + # ]) + + # # FIELDMAP-less path + # if any(fm['suffix'] == 'syn' for fm in fmaps): + # # Select template + # sdc_select_std = pe.Node(KeySelect( + # fields=['std2anat_xfm']), + # name='sdc_select_std', run_without_submitting=True) + + # syn_sdc_wf = init_syn_sdc_wf( + # bold_pe=bold_meta.get('PhaseEncodingDirection', None), + # omp_nthreads=omp_nthreads) + + # workflow.connect([ + # (inputnode, sdc_select_std, [ + # ('template', 'key'), + # ('templates', 'keys'), + # ('std2anat_xfm', 'std2anat_xfm')]), + # (sdc_select_std, syn_sdc_wf, [ + # ('std2anat_xfm', 'inputnode.std2anat_xfm')]), + # (inputnode, syn_sdc_wf, [ + # ('t1_brain', 'inputnode.t1_brain'), + # ('distorted_ref', 'inputnode.distorted_ref'), + # ('bold_ref_brain', 'inputnode.bold_ref_brain'), + # ('template', 'inputnode.template')]), + # ]) + + # # XXX Eliminate branch when forcing isn't an option + # if fmap['suffix'] == 'syn': # No fieldmaps, but --use-syn + # outputnode.inputs.method = 'FLB ("fieldmap-less", SyN-based)' + # sdc_unwarp_wf = syn_sdc_wf + # else: # --force-syn was called when other fieldmap was present + # sdc_unwarp_wf.__desc__ = None + # workflow.connect([ + # (syn_sdc_wf, outputnode, [ + # ('outputnode.out_reference', 'syn_bold_ref')]), + # ]) workflow.connect([ (sdc_unwarp_wf, outputnode, [ ('outputnode.out_warp', 'out_warp'), - ('outputnode.out_reference', 'bold_ref'), + ('outputnode.out_reference', 'distorted_ref'), ('outputnode.out_reference_brain', 'bold_ref_brain'), ('outputnode.out_mask', 'bold_mask')]), ]) diff --git a/sdcflows/workflows/fmap.py b/sdcflows/workflows/fmap.py index 8e12cde4bc..bdd3e6e938 100644 --- a/sdcflows/workflows/fmap.py +++ b/sdcflows/workflows/fmap.py @@ -1,121 +1,129 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ +Fieldmap-based estimation of susceptibility distortions. .. _sdc_direct_b0 : Direct B0 mapping sequences ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - When the fieldmap is directly measured with a prescribed sequence (such as :abbr:`SE (spiral echo)`), we only need to calculate the corresponding B-Spline coefficients to adapt the fieldmap to the TOPUP tool. -This procedure is described with more detail `here `__. +This procedure is described with more detail `here +`__. -This corresponds to the section 8.9.3 --fieldmap image (and one magnitude image)-- -of the BIDS specification. +This corresponds to `this section of the BIDS specification +`__. """ from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu, fsl, ants -from nipype.workflows.dmri.fsl.utils import demean_image, cleanup_edge_pipeline +from nipype.interfaces import utility as niu, fsl from niworkflows.engine.workflows import LiterateWorkflow as Workflow -from niworkflows.interfaces.bids import DerivativesDataSink from niworkflows.interfaces.images import IntraModalMerge -from niworkflows.interfaces.masks import BETRPT +from .gre import init_prepare_magnitude_wf, init_fmap_postproc_wf -from ..interfaces.fmap import ( - FieldEnhance, FieldToRadS, FieldToHz -) +from ..interfaces.fmap import FieldToRadS, FieldToHz def init_fmap_wf(omp_nthreads, fmap_bspline, name='fmap_wf'): """ - Fieldmap workflow - when we have a sequence that directly measures the fieldmap - we just need to mask it (using the corresponding magnitude image) to remove the - noise in the surrounding air region, and ensure that units are Hz. - - .. workflow :: - :graph2use: orig - :simple_form: yes - - from sdcflows.workflows.fmap import init_fmap_wf - wf = init_fmap_wf(omp_nthreads=6, fmap_bspline=False) + Estimate the fieldmap based on a field-mapping MRI acquisition. + + When we have a sequence that directly measures the fieldmap, + we just need to mask it (using the corresponding magnitude image) + to remove the noise in the surrounding air region, and ensure that + units are Hz. + + This is split into separate workflows. The first prepares the magnitude image(s) + by aligning and averaging them, bias-correcting the average and applying + a skull-strip. The phase images are unwrapped using PRELUDE, scaled to Hz + and cleaned up using a configurable combination of smoothing, BSpline + regularization, local median filtering and edge-cleanup. + + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.fmap import init_fmap_wf + wf = init_fmap_wf(omp_nthreads=6, fmap_bspline=False) + + Parameters + ---------- + omp_nthreads : int + Maximum number of threads an individual process may use. + fmap_bspline : bool + Whether the fieldmap estimate will be smoothed using BSpline basis. + name : str + Unique name of this workflow. + + Inputs + ------ + magnitude : str + Path to the corresponding magnitude image for anatomical reference. + fieldmap : str + Path to the fieldmap acquisition, a B0 nonuniformity map (aka fieldmap) estimated + elsewhere. (``*_fieldmap.nii[.gz]`` of BIDS). + + Outputs + ------- + fmap : str + Path to the estimated fieldmap. + fmap_ref : str + Path to a preprocessed magnitude image reference. + fmap_mask : str + Path to a binary brain mask corresponding to the ``fmap`` and ``fmap_ref`` + pair. """ - workflow = Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface( fields=['magnitude', 'fieldmap']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface(fields=['fmap', 'fmap_ref', 'fmap_mask']), name='outputnode') - # Merge input magnitude images - magmrg = pe.Node(IntraModalMerge(), name='magmrg') + prepare_magnitude_wf = init_prepare_magnitude_wf(omp_nthreads=omp_nthreads) + # Merge input fieldmap images fmapmrg = pe.Node(IntraModalMerge(zero_based_avg=False, hmc=False), name='fmapmrg') - # de-gradient the fields ("bias/illumination artifact") - n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True), - name='n4_correct', n_procs=omp_nthreads) - bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True), - name='bet') - ds_report_fmap_mask = pe.Node(DerivativesDataSink( - desc='brain', suffix='mask'), name='ds_report_fmap_mask', - run_without_submitting=True) + fmap_postproc_wf = init_fmap_postproc_wf(omp_nthreads=omp_nthreads, + fmap_bspline=fmap_bspline) workflow.connect([ - (inputnode, magmrg, [('magnitude', 'in_files')]), + (inputnode, prepare_magnitude_wf, [ + ('magnitude', 'inputnode.magnitude'), + ('fieldmap', 'inputnode.source_file')]), (inputnode, fmapmrg, [('fieldmap', 'in_files')]), - (magmrg, n4_correct, [('out_file', 'input_image')]), - (n4_correct, bet, [('output_image', 'in_file')]), - (bet, outputnode, [('mask_file', 'fmap_mask'), - ('out_file', 'fmap_ref')]), - (inputnode, ds_report_fmap_mask, [('fieldmap', 'source_file')]), - (bet, ds_report_fmap_mask, [('out_report', 'in_file')]), + (prepare_magnitude_wf, outputnode, [ + ('outputnode.fmap_mask', 'fmap_mask'), + ('outputnode.fmap_ref', 'fmap_ref')]), + (prepare_magnitude_wf, fmap_postproc_wf, [ + ('outputnode.fmap_mask', 'inputnode.fmap_mask'), + ('outputnode.fmap_ref', 'inputnode.fmap_ref')]), ]) if fmap_bspline: - # despike_threshold=1.0, mask_erode=1), - fmapenh = pe.Node(FieldEnhance(unwrap=False, despike=False), - name='fmapenh', mem_gb=4, n_procs=omp_nthreads) - + # Send a GE fieldmap directly to FieldEnhance workflow.connect([ - (bet, fmapenh, [('mask_file', 'in_mask'), - ('out_file', 'in_magnitude')]), - (fmapmrg, fmapenh, [('out_file', 'in_file')]), - (fmapenh, outputnode, [('out_file', 'fmap')]), + (fmapmrg, fmap_postproc_wf, [('out_file', 'inputnode.fmap')]) ]) - else: torads = pe.Node(FieldToRadS(), name='torads') prelude = pe.Node(fsl.PRELUDE(), name='prelude') tohz = pe.Node(FieldToHz(), name='tohz') - - denoise = pe.Node(fsl.SpatialFilter(operation='median', kernel_shape='sphere', - kernel_size=3), name='denoise') - demean = pe.Node(niu.Function(function=demean_image), name='demean') - cleanup_wf = cleanup_edge_pipeline(name='cleanup_wf') - - applymsk = pe.Node(fsl.ApplyMask(), name='applymsk') - workflow.connect([ - (bet, prelude, [('mask_file', 'mask_file'), - ('out_file', 'magnitude_file')]), + (prepare_magnitude_wf, prelude, [ + ('outputnode.fmap_mask', 'mask_file'), + ('outputnode.fmap_ref', 'magnitude_file')]), (fmapmrg, torads, [('out_file', 'in_file')]), (torads, tohz, [('fmap_range', 'range_hz')]), (torads, prelude, [('out_file', 'phase_file')]), (prelude, tohz, [('unwrapped_phase_file', 'in_file')]), - (tohz, denoise, [('out_file', 'in_file')]), - (denoise, demean, [('out_file', 'in_file')]), - (demean, cleanup_wf, [('out', 'inputnode.in_file')]), - (bet, cleanup_wf, [('mask_file', 'inputnode.in_mask')]), - (cleanup_wf, applymsk, [('outputnode.out_file', 'in_file')]), - (bet, applymsk, [('mask_file', 'mask_file')]), - (applymsk, outputnode, [('out_file', 'fmap')]), + (tohz, fmap_postproc_wf, [('out_file', 'inputnode.fmap')]) ]) return workflow diff --git a/sdcflows/workflows/gre.py b/sdcflows/workflows/gre.py new file mode 100644 index 0000000000..6113eeebce --- /dev/null +++ b/sdcflows/workflows/gre.py @@ -0,0 +1,167 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Processing phase-difference (aka :abbr:`GRE (gradient-recalled echo)`) fieldmaps. + +.. _gre-fieldmaps: + +Workflows for processing :abbr:`GRE (gradient recalled echo)` fieldmaps +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Workflows for preparing the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmap +images and cleaning up the fieldmaps created from the phases or phasediff. + +""" + +from nipype.pipeline import engine as pe +from nipype.interfaces import utility as niu, fsl, ants +from niflow.nipype1.workflows.dmri.fsl.utils import demean_image, cleanup_edge_pipeline +from niworkflows.engine.workflows import LiterateWorkflow as Workflow +from niworkflows.interfaces.images import IntraModalMerge +from niworkflows.interfaces.masks import BETRPT +from ..interfaces.fmap import FieldEnhance + + +def init_prepare_magnitude_wf(omp_nthreads, name='magnitude_wf'): + """ + Prepare the magnitude part of :abbr:`GRE (gradient-recalled echo)` fieldmaps. + + Average (if not done already) the magnitude part of the + :abbr:`GRE (gradient recalled echo)` images, run N4 to + correct for B1 field nonuniformity, and skull-strip the + preprocessed magnitude. + + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.fmap import init_prepare_magnitude_wf + wf = init_prepare_magnitude_wf(omp_nthreads=6) + + Parameters + ---------- + omp_nthreads : int + Maximum number of threads an individual process may use + name : str + Name of workflow (default: ``magnitude_wf``) + + Inputs + ------ + magnitude : pathlike + Path to the corresponding magnitude path(s). + source_file : pathlike + Path to the corresponding phase-difference file. + + Outputs + ------- + fmap_ref : pathlike + Path to the fieldmap reference calculated in this workflow. + fmap_mask : pathlike + Path to a binary brain mask corresponding to the reference above. + + """ + workflow = Workflow(name=name) + inputnode = pe.Node( + niu.IdentityInterface(fields=['magnitude', 'source_file']), name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['fmap_ref', 'fmap_mask', 'mask_report']), + name='outputnode') + + # Merge input magnitude images + magmrg = pe.Node(IntraModalMerge(), name='magmrg') + + # de-gradient the fields ("bias/illumination artifact") + n4_correct = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True), + name='n4_correct', n_procs=omp_nthreads) + bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True), + name='bet') + + workflow.connect([ + (inputnode, magmrg, [('magnitude', 'in_files')]), + (magmrg, n4_correct, [('out_file', 'input_image')]), + (n4_correct, bet, [('output_image', 'in_file')]), + (bet, outputnode, [('mask_file', 'fmap_mask'), + ('out_file', 'fmap_ref'), + ('out_report', 'mask_report')]), + ]) + return workflow + + +def init_fmap_postproc_wf(omp_nthreads, fmap_bspline, median_kernel_size=3, + name='fmap_postproc_wf'): + """ + Postprocess a B0 map estimated elsewhere. + + This workflow denoises (mostly via smoothing) a B0 fieldmap. + + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.fmap import init_fmap_postproc_wf + wf = init_fmap_postproc_wf(omp_nthreads=6, fmap_bspline=False) + + Parameters + ---------- + omp_nthreads : int + Maximum number of threads an individual process may use + fmap_bspline : bool + Whether the fieldmap should be smoothed and extrapolated to off-brain regions + using B-Spline basis. + median_kernel_size : int + Size of the kernel when smoothing is done with a median filter. + name : str + Name of workflow (default: ``fmap_postproc_wf``) + + Inputs + ------ + fmap_mask : pathlike + A brain binary mask corresponding to this fieldmap. + fmap_ref : pathlike + A preprocessed magnitude/reference image for the fieldmap. + fmap : pathlike + A B0-field nonuniformity map (aka fieldmap) estimated elsewhere. + + Outputs + ------- + out_fmap : pathlike + Postprocessed fieldmap. + + """ + workflow = Workflow(name=name) + inputnode = pe.Node(niu.IdentityInterface( + fields=['fmap_mask', 'fmap_ref', 'fmap']), name='inputnode') + outputnode = pe.Node(niu.IdentityInterface(fields=['out_fmap']), + name='outputnode') + if fmap_bspline: + # despike_threshold=1.0, mask_erode=1), + fmapenh = pe.Node( + FieldEnhance(unwrap=False, despike=False), + name='fmapenh', mem_gb=4, n_procs=omp_nthreads) + + workflow.connect([ + (inputnode, fmapenh, [('fmap_mask', 'in_mask'), + ('fmap_ref', 'in_magnitude'), + ('fmap_hz', 'in_file')]), + (fmapenh, outputnode, [('out_file', 'out_fmap')]), + ]) + + else: + denoise = pe.Node(fsl.SpatialFilter(operation='median', kernel_shape='sphere', + kernel_size=median_kernel_size), name='denoise') + demean = pe.Node(niu.Function(function=demean_image), name='demean') + cleanup_wf = cleanup_edge_pipeline(name='cleanup_wf') + applymsk = pe.Node(fsl.ApplyMask(), name='applymsk') + + workflow.connect([ + (inputnode, denoise, [('fmap', 'in_file')]), + (denoise, demean, [('out_file', 'in_file')]), + (demean, cleanup_wf, [('out', 'inputnode.in_file')]), + (inputnode, cleanup_wf, [('fmap_mask', 'inputnode.in_mask')]), + (cleanup_wf, applymsk, [('outputnode.out_file', 'in_file')]), + (inputnode, applymsk, [('fmap_mask', 'mask_file')]), + (applymsk, outputnode, [('out_file', 'out_fmap')]), + ]) + + return workflow diff --git a/sdcflows/workflows/pepolar.py b/sdcflows/workflows/pepolar.py index a3b02a9764..409024f1d7 100644 --- a/sdcflows/workflows/pepolar.py +++ b/sdcflows/workflows/pepolar.py @@ -1,103 +1,106 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ +Datasets with multiple phase encoded directions. + .. _sdc_pepolar : -Phase Encoding POLARity (*PEPOLAR*) techniques -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:abbr:`PEPOLAR (Phase Encoding POLARity)` techniques +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This corresponds to `this section of the BIDS specification +`__. """ import pkg_resources as pkgr -from nipype.pipeline import engine as pe -from nipype.interfaces import afni, ants, fsl, utility as niu from niworkflows.engine.workflows import LiterateWorkflow as Workflow from niworkflows.interfaces import CopyHeader from niworkflows.interfaces.freesurfer import StructuralReference from niworkflows.interfaces.registration import ANTSApplyTransformsRPT - from niworkflows.func.util import init_enhance_and_skullstrip_bold_wf +from nipype.pipeline import engine as pe +from nipype.interfaces import afni, ants, utility as niu + -def init_pepolar_unwarp_wf(bold_meta, epi_fmaps, omp_nthreads=1, +def init_pepolar_unwarp_wf(omp_nthreads=1, matched_pe=False, name="pepolar_unwarp_wf"): """ + Create the PE-Polar field estimation workflow. + This workflow takes in a set of EPI files with opposite phase encoding direction than the target file and calculates a displacements field (in other words, an ANTs-compatible warp file). - This procedure works if there is only one '_epi' file is present + This procedure works if there is only one ``_epi`` file is present (as long as it has the opposite phase encoding direction to the target file). The target file will be used to estimate the field distortion. - However, if there is another '_epi' file present with a matching + However, if there is another ``_epi`` file present with a matching phase encoding direction to the target it will be used instead. - Currently, different phase encoding dimension in the target file and the - '_epi' file(s) (for example 'i' and 'j') is not supported. + Currently, different phase encoding directions in the target file and the + ``_epi`` file(s) (for example, ``i`` and ``j``) is not supported. The warp field correcting for the distortions is estimated using AFNI's - 3dQwarp, with displacement estimation limited to the target file phase + ``3dQwarp``, with displacement estimation limited to the target file phase encoding direction. It also calculates a new mask for the input dataset that takes into account the distortions. - .. workflow :: - :graph2use: orig - :simple_form: yes - - from sdcflows.workflows.pepolar import init_pepolar_unwarp_wf - wf = init_pepolar_unwarp_wf( - bold_meta={'PhaseEncodingDirection': 'j'}, - epi_fmaps=[('/dataset/sub-01/fmap/sub-01_epi.nii.gz', 'j-')], - omp_nthreads=8) - + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.pepolar import init_pepolar_unwarp_wf + wf = init_pepolar_unwarp_wf() + + Parameters + ---------- + matched_pe : bool + Whether the input ``fmaps_epi`` will contain images with matched + PE blips or not. Please use :func:`sdcflows.workflows.pepolar.check_pes` + to determine whether they exist or not. + name : str + Name for this workflow + omp_nthreads : int + Parallelize internal tasks across the number of CPUs given by this option. Inputs - - in_reference - the reference image - in_reference_brain - the reference image skullstripped - in_mask - a brain mask corresponding to ``in_reference`` + ------ + fmaps_epi : list of tuple(pathlike, str) + The list of EPI images that will be used in PE-Polar correction, and + their corresponding ``PhaseEncodingDirection`` metadata. + The workflow will use the ``bold_pe_dir`` input to separate out those + EPI acquisitions with opposed PE blips and those with matched PE blips + (the latter could be none, and ``in_reference_brain`` would then be + used). The workflow raises a ``ValueError`` when no images with + opposed PE blips are found. + bold_pe_dir : str + The baseline PE direction. + in_reference : pathlike + The baseline reference image (must correspond to ``bold_pe_dir``). + in_reference_brain : pathlike + The reference image above, but skullstripped. + in_mask : pathlike + Not used, present only for consistency across fieldmap estimation + workflows. Outputs - - out_reference - the ``in_reference`` after unwarping - out_reference_brain - the ``in_reference`` after unwarping and skullstripping - out_warp - the corresponding :abbr:`DFM (displacements field map)` compatible with - ANTs - out_mask - mask of the unwarped input file + ------- + out_reference : pathlike + The ``in_reference`` after unwarping + out_reference_brain : pathlike + The ``in_reference`` after unwarping and skullstripping + out_warp : pathlike + The corresponding :abbr:`DFM (displacements field map)` compatible with + ANTs. + out_mask : pathlike + Mask of the unwarped input file """ - bold_file_pe = bold_meta["PhaseEncodingDirection"] - - args = '-noXdis -noYdis -noZdis' - rm_arg = {'i': '-noXdis', - 'j': '-noYdis', - 'k': '-noZdis'}[bold_file_pe[0]] - args = args.replace(rm_arg, '') - - usable_fieldmaps_matching_pe = [] - usable_fieldmaps_opposite_pe = [] - for fmap, fmap_pe in epi_fmaps: - if fmap_pe == bold_file_pe: - usable_fieldmaps_matching_pe.append(fmap) - elif fmap_pe[0] == bold_file_pe[0]: - usable_fieldmaps_opposite_pe.append(fmap) - - if not usable_fieldmaps_opposite_pe: - raise Exception("None of the discovered fieldmaps has the right " - "phase encoding direction. Possibly a problem with " - "metadata. If not, rerun with '--ignore fieldmaps' to " - "skip distortion correction step.") - workflow = Workflow(name=name) workflow.__desc__ = """\ A deformation field to correct for susceptibility distortions was estimated @@ -106,41 +109,21 @@ def init_pepolar_unwarp_wf(bold_meta, epi_fmaps, omp_nthreads=1, """.format(afni_ver=''.join(['%02d' % v for v in afni.Info().version() or []])) inputnode = pe.Node(niu.IdentityInterface( - fields=['in_reference', 'in_reference_brain', 'in_mask']), name='inputnode') + fields=['fmaps_epi', 'in_reference', 'in_reference_brain', + 'in_mask', 'bold_pe_dir']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_reference', 'out_reference_brain', 'out_warp', 'out_mask']), name='outputnode') - prepare_epi_opposite_wf = init_prepare_epi_wf(omp_nthreads=omp_nthreads, - name="prepare_epi_opposite_wf") - prepare_epi_opposite_wf.inputs.inputnode.fmaps = usable_fieldmaps_opposite_pe - - qwarp = pe.Node(afni.QwarpPlusMinus(pblur=[0.05, 0.05], - blur=[-1, -1], - noweight=True, - minpatch=9, - nopadWARP=True, - environ={'OMP_NUM_THREADS': '%d' % omp_nthreads}, - args=args), - name='qwarp', n_procs=omp_nthreads) - - workflow.connect([ - (inputnode, prepare_epi_opposite_wf, [('in_reference_brain', 'inputnode.ref_brain')]), - (prepare_epi_opposite_wf, qwarp, [('outputnode.out_file', 'base_file')]), - ]) + prepare_epi_wf = init_prepare_epi_wf(omp_nthreads=omp_nthreads, + matched_pe=matched_pe, + name="prepare_epi_wf") - if usable_fieldmaps_matching_pe: - prepare_epi_matching_wf = init_prepare_epi_wf(omp_nthreads=omp_nthreads, - name="prepare_epi_matching_wf") - prepare_epi_matching_wf.inputs.inputnode.fmaps = usable_fieldmaps_matching_pe - - workflow.connect([ - (inputnode, prepare_epi_matching_wf, [('in_reference_brain', 'inputnode.ref_brain')]), - (prepare_epi_matching_wf, qwarp, [('outputnode.out_file', 'in_file')]), - ]) - else: - workflow.connect([(inputnode, qwarp, [('in_reference_brain', 'in_file')])]) + qwarp = pe.Node(afni.QwarpPlusMinus( + pblur=[0.05, 0.05], blur=[-1, -1], noweight=True, minpatch=9, nopadWARP=True, + environ={'OMP_NUM_THREADS': '%d' % omp_nthreads}), + name='qwarp', n_procs=omp_nthreads) to_ants = pe.Node(niu.Function(function=_fix_hdr), name='to_ants', mem_gb=0.01) @@ -153,10 +136,18 @@ def init_pepolar_unwarp_wf(bold_meta, epi_fmaps, omp_nthreads=1, interpolation='LanczosWindowedSinc'), name='unwarp_reference') - enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf(omp_nthreads=omp_nthreads) + enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf( + omp_nthreads=omp_nthreads) workflow.connect([ + (inputnode, qwarp, [(('bold_pe_dir', _qwarp_args), 'args')]), (inputnode, cphdr_warp, [('in_reference', 'hdr_file')]), + (inputnode, prepare_epi_wf, [ + ('fmaps_epi', 'inputnode.maps_pe'), + ('bold_pe_dir', 'inputnode.epi_pe'), + ('in_reference_brain', 'inputnode.ref_brain')]), + (prepare_epi_wf, qwarp, [('outputnode.opposed_pe', 'base_file'), + ('outputnode.matched_pe', 'in_file')]), (qwarp, cphdr_warp, [('source_warp', 'in_file')]), (cphdr_warp, to_ants, [('out_file', 'in_file')]), (to_ants, unwarp_reference, [('out', 'transforms')]), @@ -174,48 +165,68 @@ def init_pepolar_unwarp_wf(bold_meta, epi_fmaps, omp_nthreads=1, return workflow -def init_prepare_epi_wf(omp_nthreads, name="prepare_epi_wf"): +def init_prepare_epi_wf(omp_nthreads, matched_pe=False, + name="prepare_epi_wf"): """ - This workflow takes in a set of EPI files with with the same phase - encoding direction and returns a single 3D volume ready to be used in - field distortion estimation. + Prepare opposed-PE EPI images for PE-POLAR SDC. + + This workflow takes in a set of EPI files and returns two 3D volumes with + matching and opposed PE directions, ready to be used in field distortion + estimation. The procedure involves: estimating a robust template using FreeSurfer's - 'mri_robust_template', bias field correction using ANTs N4BiasFieldCorrection - and AFNI 3dUnifize, skullstripping using FSL BET and AFNI 3dAutomask, + ``mri_robust_template``, bias field correction using ANTs ``N4BiasFieldCorrection`` + and AFNI ``3dUnifize``, skullstripping using FSL BET and AFNI ``3dAutomask``, and rigid coregistration to the reference using ANTs. - .. workflow :: - :graph2use: orig - :simple_form: yes - - from sdcflows.workflows.pepolar import init_prepare_epi_wf - wf = init_prepare_epi_wf(omp_nthreads=8) - + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.pepolar import init_prepare_epi_wf + wf = init_prepare_epi_wf(omp_nthreads=8) + + Parameters + ---------- + matched_pe : bool + Whether the input ``fmaps_epi`` will contain images with matched + PE blips or not. Please use :func:`sdcflows.workflows.pepolar.check_pes` + to determine whether they exist or not. + name : str + Name for this workflow + omp_nthreads : int + Parallelize internal tasks across the number of CPUs given by this option. Inputs - - fmaps - list of 3D or 4D NIfTI images - ref_brain - coregistration reference (skullstripped and bias field corrected) + ------ + epi_pe : str + Phase-encoding direction of the EPI image to be corrected. + maps_pe : list of tuple(pathlike, str) + list of 3D or 4D NIfTI images + ref_brain + coregistration reference (skullstripped and bias field corrected) Outputs - - out_file - single 3D NIfTI file + ------- + opposed_pe : pathlike + single 3D NIfTI file + matched_pe : pathlike + single 3D NIfTI file """ - inputnode = pe.Node(niu.IdentityInterface(fields=['fmaps', 'ref_brain']), + inputnode = pe.Node(niu.IdentityInterface(fields=['epi_pe', 'maps_pe', 'ref_brain']), name='inputnode') - outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), + outputnode = pe.Node(niu.IdentityInterface(fields=['opposed_pe', 'matched_pe']), name='outputnode') - split = pe.MapNode(fsl.Split(dimension='t'), iterfield='in_file', - name='split') + ants_settings = pkgr.resource_filename('sdcflows', + 'data/translation_rigid.json') + + split = pe.Node(niu.Function(function=_split_epi_lists), name='split') - merge = pe.Node( + merge_op = pe.Node( StructuralReference(auto_detect_sensitivity=True, initial_timepoint=1, fixed_timepoint=True, # Align to first image @@ -224,36 +235,74 @@ def init_prepare_epi_wf(omp_nthreads, name="prepare_epi_wf"): no_iteration=True, subsample_threshold=200, out_file='template.nii.gz'), - name='merge') + name='merge_op') - enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf( - omp_nthreads=omp_nthreads) + ref_op_wf = init_enhance_and_skullstrip_bold_wf( + omp_nthreads=omp_nthreads, name='ref_op_wf') - ants_settings = pkgr.resource_filename('sdcflows', - 'data/translation_rigid.json') - fmap2ref_reg = pe.Node(ants.Registration(from_file=ants_settings, - output_warped_image=True), - name='fmap2ref_reg', n_procs=omp_nthreads) + op2ref_reg = pe.Node(ants.Registration( + from_file=ants_settings, output_warped_image=True), + name='op2ref_reg', n_procs=omp_nthreads) workflow = Workflow(name=name) + workflow.connect([ + (inputnode, split, [('maps_pe', 'in_files'), + ('epi_pe', 'pe_dir')]), + (split, merge_op, [(('out', _front), 'in_files')]), + (merge_op, ref_op_wf, [('out_file', 'inputnode.in_file')]), + (ref_op_wf, op2ref_reg, [ + ('outputnode.skull_stripped_file', 'moving_image')]), + (inputnode, op2ref_reg, [('ref_brain', 'fixed_image')]), + (op2ref_reg, outputnode, [('warped_image', 'opposed_pe')]), + ]) + + if not matched_pe: + workflow.connect([ + (inputnode, outputnode, [('ref_brain', 'matched_pe')]), + ]) + return workflow + + merge_ma = pe.Node( + StructuralReference(auto_detect_sensitivity=True, + initial_timepoint=1, + fixed_timepoint=True, # Align to first image + intensity_scaling=True, + # 7-DOF (rigid + intensity) + no_iteration=True, + subsample_threshold=200, + out_file='template.nii.gz'), + name='merge_ma') - def _flatten(l): - from nipype.utils.filemanip import filename_to_list - return [item for sublist in l for item in filename_to_list(sublist)] + ref_ma_wf = init_enhance_and_skullstrip_bold_wf( + omp_nthreads=omp_nthreads, name='ref_ma_wf') + + ma2ref_reg = pe.Node(ants.Registration( + from_file=ants_settings, output_warped_image=True), + name='ma2ref_reg', n_procs=omp_nthreads) workflow.connect([ - (inputnode, split, [('fmaps', 'in_file')]), - (split, merge, [(('out_files', _flatten), 'in_files')]), - (merge, enhance_and_skullstrip_bold_wf, [('out_file', 'inputnode.in_file')]), - (enhance_and_skullstrip_bold_wf, fmap2ref_reg, [ + (split, merge_ma, [(('out', _last), 'in_files')]), + (merge_ma, ref_ma_wf, [('out_file', 'inputnode.in_file')]), + (ref_ma_wf, ma2ref_reg, [ ('outputnode.skull_stripped_file', 'moving_image')]), - (inputnode, fmap2ref_reg, [('ref_brain', 'fixed_image')]), - (fmap2ref_reg, outputnode, [('warped_image', 'out_file')]), + (inputnode, ma2ref_reg, [('ref_brain', 'fixed_image')]), + (ma2ref_reg, outputnode, [('warped_image', 'matched_pe')]), ]) - return workflow +def _front(inlist): + if isinstance(inlist, (list, tuple)): + return inlist[0] + return inlist + + +def _last(inlist): + if isinstance(inlist, (list, tuple)): + return inlist[-1] + return inlist + + def _fix_hdr(in_file, newpath=None): import nibabel as nb from nipype.utils.filemanip import fname_presuffix @@ -266,3 +315,69 @@ def _fix_hdr(in_file, newpath=None): nb.Nifti1Image(nii.get_data().astype('`__. -Fieldmap preprocessing workflow for fieldmap data structure -8.9.1 in BIDS 1.0.0: one phase diff and at least one magnitude image """ -from nipype.interfaces import ants, fsl, utility as niu +from nipype.interfaces import fsl, afni, utility as niu from nipype.pipeline import engine as pe -from nipype.workflows.dmri.fsl.utils import siemens2rads, demean_image, \ - cleanup_edge_pipeline +from niflow.nipype1.workflows.dmri.fsl.utils import siemens2rads from niworkflows.engine.workflows import LiterateWorkflow as Workflow -from niworkflows.interfaces.bids import ReadSidecarJSON, DerivativesDataSink -from niworkflows.interfaces.images import IntraModalMerge -from niworkflows.interfaces.masks import BETRPT -from ..interfaces.fmap import Phasediff2Fieldmap +from ..interfaces.fmap import Phasediff2Fieldmap, ProcessPhases, DiffPhases +from .gre import init_prepare_magnitude_wf, init_fmap_postproc_wf -def init_phdiff_wf(omp_nthreads, name='phdiff_wf'): +def init_calculate_phasediff_wf(omp_nthreads, name='create_phasediff_wf'): """ - Estimates the fieldmap using a phase-difference image and one or more - magnitude images corresponding to two or more :abbr:`GRE (Gradient Echo sequence)` - acquisitions. The `original code was taken from nipype - `_. + Create a phasediff image from two phase images. + + When two phase images are provided, the images are rescaled to range from 0 to 2*pi, + then unwrapped using FSL's PRELUDE. Finally, the short TE phase image is subtracted from + the long TE phase image. This workflow is based on the FSL FUGUE user guide. + + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.phdiff import init_calculate_phasediff_wf + wf = init_calculate_phasediff_wf(omp_nthreads=1) + + Parameters + ---------- + omp_nthreads : int + Maximum number of threads an individual process may use + difference : str + Either 'arctan' or 'unwrapped_subtraction' + + Inputs + ------ + phase1 : pathlike + Path to one of the phase images. + phase2 : pathlike + Path to the another phase image. + phase1_metadata : dict + Metadata dictionary corresponding to the phase1 input + phase2_metadata : dict + Metadata dictionary corresponding to the phase2 input + magnitude : pathlike + Preprocessed magnitude image + mask_file : pathlike + Brain mask image + + Outputs + ------- + phasediff : pathlike + A phasediff image created by subtracting two upwrapped phase images. + phasediff_metadata : dict + A dictionary containing the metadata for the calculated ``phasediff``. + It contains ``Echotime1`` and ``Echotime2`` from the original phase images. - .. workflow :: - :graph2use: orig - :simple_form: yes + """ + inputnode = pe.Node( + niu.IdentityInterface( + fields=['phase1', 'phase2', 'phase1_metadata', 'phase2_metadata', 'magnitude', + 'mask_file']), + name='inputnode') - from sdcflows.workflows.phdiff import init_phdiff_wf - wf = init_phdiff_wf(omp_nthreads=1) + outputnode = pe.Node(niu.IdentityInterface( + fields=['phasediff', 'phasediff_metadata']), name='outputnode') + workflow = Workflow(name=name) + workflow.__desc__ = """\ +Individual phase images were unwrapped using FSL's PRELUDE and subtracted to +create a phasediff image.""" + + # Scale to radians and unwrap phase1, phase2 + process_phases = pe.Node(ProcessPhases(), name='process_phases') + unwrap_phase1 = pe.Node(fsl.PRELUDE(), name='unwrap_phase1') + unwrap_phase2 = pe.Node(fsl.PRELUDE(), name='unwrap_phase2') + subtract_phases = pe.Node(DiffPhases(), name='subtract_phases') - Outputs:: + workflow.connect([ + (inputnode, unwrap_phase1, [('magnitude', 'magnitude_file')]), + (inputnode, unwrap_phase2, [('magnitude', 'magnitude_file')]), + (inputnode, unwrap_phase1, [('mask_file', 'mask_file')]), + (inputnode, unwrap_phase2, [('mask_file', 'mask_file')]), + (process_phases, unwrap_phase1, [('short_te_phase_image', 'phase_file')]), + (process_phases, unwrap_phase2, [('long_te_phase_image', 'phase_file')]), + (unwrap_phase1, subtract_phases, [('unwrapped_phase_file', 'short_te_phase_image')]), + (unwrap_phase2, subtract_phases, [('unwrapped_phase_file', 'long_te_phase_image')]), + (subtract_phases, outputnode, [('phasediff_file', 'phasediff')]), + (inputnode, process_phases, [ + ('phase1', 'phase1_file'), + ('phase2', 'phase2_file'), + ('phase1_metadata', 'phase1_metadata'), + ('phase2_metadata', 'phase2_metadata')]), + (process_phases, outputnode, [('phasediff_metadata', 'phasediff_metadata')]) + ]) - outputnode.fmap_ref - The average magnitude image, skull-stripped - outputnode.fmap_mask - The brain mask applied to the fieldmap - outputnode.fmap - The estimated fieldmap in Hz + return workflow +def init_phdiff_wf(omp_nthreads, fmap_bspline, create_phasediff=False, name='phdiff_wf'): """ + Distortion correction of EPI sequences using phase-difference maps. + Estimates the fieldmap using a phase-difference image and one or more + magnitude images corresponding to two or more :abbr:`GRE (Gradient Echo sequence)` + acquisitions. The `original code was taken from nipype + `_. + + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.phdiff import init_phdiff_wf + wf = init_phdiff_wf(omp_nthreads=1, fmap_bspline=False) + + Parameters + ---------- + omp_nthreads : int + Maximum number of threads an individual process may use + + Inputs + ------ + magnitude : pathlike + Path to the corresponding magnitude path(s). + phasediff : pathlike + Path to the corresponding phase-difference file. + metadata : dict + Metadata dictionary corresponding to the phasediff input + phase1 : pathlike + Path to one of the phase images. + phase2 : pathlike + Path to the another phase image. + phase1_metadata : dict + Metadata dictionary corresponding to the phase1 input + phase2_metadata : dict + Metadata dictionary corresponding to the phase2 input + + Outputs + ------- + fmap_ref : pathlike + The average magnitude image, skull-stripped + fmap_mask : pathlike + The brain mask applied to the fieldmap + fmap : pathlike + The estimated fieldmap in Hz + + """ workflow = Workflow(name=name) workflow.__desc__ = """\ A deformation field to correct for susceptibility distortions was estimated @@ -61,73 +172,81 @@ def init_phdiff_wf(omp_nthreads, name='phdiff_wf'): further improvements of HCP Pipelines [@hcppipelines]. """ - inputnode = pe.Node(niu.IdentityInterface(fields=['magnitude', 'phasediff']), - name='inputnode') + inputnode = pe.Node( + niu.IdentityInterface( + fields=['magnitude', 'phasediff', 'metadata', 'phase1', 'phase2', 'phase1_metadata', + 'phase2_metadata']), + name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['fmap', 'fmap_ref', 'fmap_mask']), name='outputnode') - def _pick1st(inlist): - return inlist[0] - - # Read phasediff echo times - meta = pe.Node(ReadSidecarJSON(bids_validate=False), name='meta', mem_gb=0.01) - - # Merge input magnitude images - magmrg = pe.Node(IntraModalMerge(), name='magmrg') - - # de-gradient the fields ("bias/illumination artifact") - n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3, copy_header=True), - name='n4', n_procs=omp_nthreads) - bet = pe.Node(BETRPT(generate_report=True, frac=0.6, mask=True), - name='bet') - ds_report_fmap_mask = pe.Node(DerivativesDataSink( - desc='brain', suffix='mask'), name='ds_report_fmap_mask', - mem_gb=0.01, run_without_submitting=True) - # uses mask from bet; outputs a mask - # dilate = pe.Node(fsl.maths.MathsCommand( - # nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate') - - # phase diff -> radians - pha2rads = pe.Node(niu.Function(function=siemens2rads), name='pha2rads') - - # FSL PRELUDE will perform phase-unwrapping - prelude = pe.Node(fsl.PRELUDE(), name='prelude') - - denoise = pe.Node(fsl.SpatialFilter(operation='median', kernel_shape='sphere', - kernel_size=3), name='denoise') - - demean = pe.Node(niu.Function(function=demean_image), name='demean') - - cleanup_wf = cleanup_edge_pipeline(name="cleanup_wf") + prepare_magnitude_wf = init_prepare_magnitude_wf(omp_nthreads=omp_nthreads) + + preprocessed_phasediff = pe.Node( + niu.IdentityInterface(fields=['phasediff', 'phasediff_metadata']), + name='preprocessed_phasediff') + + if create_phasediff: + calc_phdiff_wf = init_calculate_phasediff_wf(omp_nthreads=omp_nthreads) + workflow.connect([ + (inputnode, prepare_magnitude_wf, [('phase1', 'inputnode.source_file')]), + (prepare_magnitude_wf, calc_phdiff_wf, [ + ('outputnode.fmap_mask', 'inputnode.mask_file')]), + (prepare_magnitude_wf, calc_phdiff_wf, [ + ('outputnode.fmap_ref', 'inputnode.magnitude')]), + (inputnode, calc_phdiff_wf, [ + ('phase1', 'inputnode.phase1'), + ('phase2', 'inputnode.phase2'), + ('phase1_metadata', 'inputnode.phase1_metadata'), + ('phase2_metadata', 'inputnode.phase2_metadata')]), + (calc_phdiff_wf, preprocessed_phasediff, [ + ('outputnode.phasediff', 'phasediff'), + ('outputnode.phasediff_metadata', 'phasediff_metadata')]) + ]) + kernel_size = 5 + + else: + + # FSL PRELUDE will perform phase-unwrapping + prelude = pe.Node(fsl.PRELUDE(), name='prelude') + + # phase diff -> radians + pha2rads = pe.Node(niu.Function(function=siemens2rads), name='pha2rads') + + workflow.connect([ + (inputnode, prepare_magnitude_wf, [('phasediff', 'inputnode.source_file')]), + (prepare_magnitude_wf, prelude, [('outputnode.fmap_ref', 'magnitude_file')]), + (prepare_magnitude_wf, prelude, [('outputnode.fmap_mask', 'mask_file')]), + (inputnode, pha2rads, [('phasediff', 'in_file')]), + (pha2rads, prelude, [('out', 'phase_file')]), + (prelude, preprocessed_phasediff, [('unwrapped_phase_file', 'phasediff')]), + (inputnode, preprocessed_phasediff, [('metadata', 'phasediff_metadata')]), + ]) + kernel_size = 3 + + fmap_postproc_wf = init_fmap_postproc_wf(omp_nthreads=omp_nthreads, + fmap_bspline=fmap_bspline, + median_kernel_size=kernel_size) compfmap = pe.Node(Phasediff2Fieldmap(), name='compfmap') # The phdiff2fmap interface is equivalent to: - # rad2rsec (using rads2radsec from nipype.workflows.dmri.fsl.utils) + # rad2rsec (using rads2radsec from niflow.nipype1.workflows.dmri.fsl.utils) # pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='ComputeFieldmapFUGUE') # rsec2hz (divide by 2pi) workflow.connect([ - (inputnode, meta, [('phasediff', 'in_file')]), - (inputnode, magmrg, [('magnitude', 'in_files')]), - (magmrg, n4, [('out_avg', 'input_image')]), - (n4, prelude, [('output_image', 'magnitude_file')]), - (n4, bet, [('output_image', 'in_file')]), - (bet, prelude, [('mask_file', 'mask_file')]), - (inputnode, pha2rads, [('phasediff', 'in_file')]), - (pha2rads, prelude, [('out', 'phase_file')]), - (meta, compfmap, [('out_dict', 'metadata')]), - (prelude, denoise, [('unwrapped_phase_file', 'in_file')]), - (denoise, demean, [('out_file', 'in_file')]), - (demean, cleanup_wf, [('out', 'inputnode.in_file')]), - (bet, cleanup_wf, [('mask_file', 'inputnode.in_mask')]), - (cleanup_wf, compfmap, [('outputnode.out_file', 'in_file')]), + (inputnode, prepare_magnitude_wf, [('magnitude', 'inputnode.magnitude')]), + (preprocessed_phasediff, fmap_postproc_wf, [('phasediff', 'inputnode.fmap')]), + (prepare_magnitude_wf, fmap_postproc_wf, [ + ('outputnode.fmap_mask', 'inputnode.fmap_mask')]), + (fmap_postproc_wf, compfmap, [('outputnode.out_fmap', 'in_file')]), + (preprocessed_phasediff, compfmap, [('phasediff_metadata', 'metadata')]), (compfmap, outputnode, [('out_file', 'fmap')]), - (bet, outputnode, [('mask_file', 'fmap_mask'), - ('out_file', 'fmap_ref')]), - (inputnode, ds_report_fmap_mask, [('phasediff', 'source_file')]), - (bet, ds_report_fmap_mask, [('out_report', 'in_file')]), + (prepare_magnitude_wf, outputnode, [ + ('outputnode.fmap_mask', 'fmap_mask'), + ('outputnode.fmap_ref', 'fmap_ref')]), ]) return workflow diff --git a/sdcflows/workflows/syn.py b/sdcflows/workflows/syn.py index 300a4a904b..e0442e4ab0 100644 --- a/sdcflows/workflows/syn.py +++ b/sdcflows/workflows/syn.py @@ -1,11 +1,12 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ +Estimating the susceptibility distortions without fieldmaps. + .. _sdc_fieldmapless : Fieldmap-less estimation (experimental) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - In the absence of direct measurements of fieldmap data, we provide an (experimental) option to estimate the susceptibility distortion based on the ANTs symmetric normalization (SyN) technique. @@ -41,6 +42,8 @@ def init_syn_sdc_wf(omp_nthreads, bold_pe=None, atlas_threshold=3, name='syn_sdc_wf'): """ + Build the *fieldmap-less* susceptibility-distortion estimation workflow. + This workflow takes a skull-stripped T1w image and reference BOLD image and estimates a susceptibility distortion correction warp, using ANTs symmetric normalization (SyN) and the average fieldmap atlas described in @@ -55,42 +58,58 @@ def init_syn_sdc_wf(omp_nthreads, bold_pe=None, This technique is a variation on those developed in [Huntenburg2014]_ and [Wang2017]_. - .. workflow :: - :graph2use: orig - :simple_form: yes - - from sdcflows.workflows.syn import init_syn_sdc_wf - wf = init_syn_sdc_wf( - bold_pe='j', - omp_nthreads=8) - - **Inputs** - - bold_ref - reference image - bold_ref_brain - skull-stripped reference image - template : str - Name of template targeted by ``template`` output space - t1_brain - skull-stripped, bias-corrected structural image - std2anat_xfm - inverse registration transform of T1w image to MNI template - - **Outputs** - - out_reference - the ``bold_ref`` image after unwarping - out_reference_brain - the ``bold_ref_brain`` image after unwarping - out_warp - the corresponding :abbr:`DFM (displacements field map)` compatible with - ANTs - out_mask - mask of the unwarped input file + Workflow Graph + .. workflow :: + :graph2use: orig + :simple_form: yes + + from sdcflows.workflows.syn import init_syn_sdc_wf + wf = init_syn_sdc_wf( + bold_pe='j', + omp_nthreads=8) + + Inputs + ------ + bold_ref + reference image + bold_ref_brain + skull-stripped reference image + template : str + Name of template targeted by ``template`` output space + t1_brain + skull-stripped, bias-corrected structural image + std2anat_xfm + inverse registration transform of T1w image to MNI template + + Outputs + ------- + out_reference + the ``bold_ref`` image after unwarping + out_reference_brain + the ``bold_ref_brain`` image after unwarping + out_warp + the corresponding :abbr:`DFM (displacements field map)` compatible with + ANTs + out_mask + mask of the unwarped input file + + References + ---------- + .. [Treiber2016] Treiber, J. M. et al. (2016) Characterization and Correction + of Geometric Distortions in 814 Diffusion Weighted Images, + PLoS ONE 11(3): e0152472. doi:`10.1371/journal.pone.0152472 + `_. + .. [Wang2017] Wang S, et al. (2017) Evaluation of Field Map and Nonlinear + Registration Methods for Correction of Susceptibility Artifacts + in Diffusion MRI. Front. Neuroinform. 11:17. + doi:`10.3389/fninf.2017.00017 + `_. + .. [Huntenburg2014] Huntenburg, J. M. (2014) Evaluating Nonlinear + Coregistration of BOLD EPI and T1w Images. Berlin: Master + Thesis, Freie Universität. `PDF + `_. """ - if bold_pe is None or bold_pe[0] not in ['i', 'j']: LOGGER.warning('Incorrect phase-encoding direction, assuming PA (posterior-to-anterior).') bold_pe = 'j' @@ -199,7 +218,7 @@ def init_syn_sdc_wf(omp_nthreads, bold_pe=None, def _prior_path(template): - """Selects an appropriate input xform, based on template""" + """Select an appropriate input xform, based on template.""" from pkg_resources import resource_filename return resource_filename( 'sdcflows', 'data/fmap_atlas_2_{}_affine.mat'.format(template)) diff --git a/sdcflows/workflows/tests/__init__.py b/sdcflows/workflows/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sdcflows/workflows/tests/test_pepolar.py b/sdcflows/workflows/tests/test_pepolar.py new file mode 100644 index 0000000000..c35021785a --- /dev/null +++ b/sdcflows/workflows/tests/test_pepolar.py @@ -0,0 +1,194 @@ +"""Test pepolar type of fieldmaps.""" +from os import cpu_count +import pytest +from niworkflows.interfaces.bids import DerivativesDataSink +from nipype.pipeline import engine as pe + +from ..pepolar import ( + check_pes, _split_epi_lists, init_prepare_epi_wf, init_pepolar_unwarp_wf +) + + +def test_split_epi_lists(bids_layouts, tmpdir): + """Test preparation workflow.""" + tmpdir.chdir() + + layout = bids_layouts['testdata'] + + bold = layout.get(suffix='bold', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + epidata = layout.get(suffix='epi', desc=None, extension=['.nii.gz', '.nii']) + + # EPI fmaps in HCP101006 have 3 volumes each + a, b = _split_epi_lists( + in_files=[(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata], + pe_dir=bold.get_metadata()['PhaseEncodingDirection'] + ) + + assert len(a) == 3 + assert len(b) == 3 + + # If we append the BOLD aligned (not that you would do this), then the + # second array should have 53 volumes (50 from _bold, 3 from _epi) + a, b = _split_epi_lists( + in_files=[(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata + [bold]], + pe_dir=bold.get_metadata()['PhaseEncodingDirection'] + ) + + assert len(a) == 3 + assert len(b) == 53 + + +def test_prepare_epi_wf0(bids_layouts, tmpdir): + """Test preparation workflow.""" + tmpdir.chdir() + + layout = bids_layouts['testdata'] + + bold = layout.get(suffix='bold', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + epidata = layout.get(suffix='epi', dir='LR', direction='LR', + desc=None, extension=['.nii.gz', '.nii']) + + with pytest.raises(ValueError): + check_pes([ + (im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata], bold.get_metadata()['PhaseEncodingDirection']) + + +def test_prepare_epi_wf1(bids_layouts, tmpdir): + """Test preparation workflow.""" + tmpdir.chdir() + + layout = bids_layouts['testdata'] + + bold = layout.get(suffix='bold', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + boldref = layout.get(suffix='boldref', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + epidata = layout.get(suffix='epi', desc=None, extension=['.nii.gz', '.nii']) + + matched_pe = check_pes([(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata], bold.get_metadata()['PhaseEncodingDirection']) + + assert matched_pe is True + + wf = init_prepare_epi_wf(omp_nthreads=1, matched_pe=matched_pe) + wf.inputs.inputnode.maps_pe = [(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata] + wf.inputs.inputnode.ref_brain = boldref.path + wf.inputs.inputnode.epi_pe = bold.get_metadata()['PhaseEncodingDirection'] + + +def test_prepare_epi_wf2(bids_layouts, tmpdir): + """Test preparation workflow.""" + tmpdir.chdir() + + layout = bids_layouts['testdata'] + + bold = layout.get(suffix='bold', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + boldref = layout.get(suffix='boldref', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + epidata = layout.get(suffix='epi', dir='RL', direction='RL', + desc=None, extension=['.nii.gz', '.nii']) + + matched_pe = check_pes([(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata], bold.get_metadata()['PhaseEncodingDirection']) + + assert matched_pe is False + + wf = init_prepare_epi_wf(omp_nthreads=1, matched_pe=matched_pe) + wf.inputs.inputnode.maps_pe = [(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata] + wf.inputs.inputnode.ref_brain = boldref.path + wf.inputs.inputnode.epi_pe = bold.get_metadata()['PhaseEncodingDirection'] + + +@pytest.mark.parametrize('dataset', [ + 'ds001600', + 'testdata', +]) +def test_pepolar_wf1(bids_layouts, output_path, dataset, workdir): + """Test preparation workflow.""" + layout = bids_layouts[dataset] + + if dataset == 'testdata': + bold = layout.get(suffix='bold', dir='LR', direction='LR', + extension=['.nii.gz', '.nii'])[0] + boldref = layout.get(suffix='boldref', dir='LR', direction='LR', desc='brain', + extension=['.nii.gz', '.nii'])[0] + elif dataset == 'ds001600': + bold = layout.get(suffix='bold', acquisition='AP', + extension=['.nii.gz', '.nii'])[0] + + epidata = layout.get(suffix='epi', desc=None, extension=['.nii.gz', '.nii']) + + matched_pe = check_pes([(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata], bold.get_metadata()['PhaseEncodingDirection']) + + wf = init_pepolar_unwarp_wf(omp_nthreads=cpu_count(), matched_pe=matched_pe) + wf.inputs.inputnode.fmaps_epi = [(im.path, im.get_metadata()['PhaseEncodingDirection']) + for im in epidata] + wf.inputs.inputnode.bold_pe_dir = bold.get_metadata()['PhaseEncodingDirection'] + + if output_path: + from nipype.interfaces import utility as niu + from ..pepolar import Workflow + from ...interfaces.reportlets import FieldmapReportlet + + boiler = Workflow(name='boiler_%s' % dataset) + + split_field = pe.Node(niu.Function(function=_split_field), name='split_field') + + if dataset == 'ds001600': + from niworkflows.func.util import init_bold_reference_wf + gen_ref = init_bold_reference_wf( + omp_nthreads=cpu_count(), + bold_file=bold.path) + boiler.connect([ + (gen_ref, wf, [ + ('outputnode.ref_image', 'inputnode.in_reference'), + ('outputnode.ref_image_brain', 'inputnode.in_reference_brain')]) + ]) + else: + wf.inputs.inputnode.in_reference_brain = boldref.path + wf.inputs.inputnode.in_reference = boldref.path + + rep = pe.Node(FieldmapReportlet(), 'simple_report') + dsink = pe.Node(DerivativesDataSink( + base_directory=str(output_path), keep_dtype=True, + desc='pepolar'), name='dsink') + dsink.interface.out_path_base = 'sdcflows' + dsink.inputs.source_file = epidata[0].path + + boiler.connect([ + (wf, split_field, [ + ('inputnode.bold_pe_dir', 'pe_dir'), + ('outputnode.out_warp', 'in_field')]), + (split_field, rep, [ + ('out', 'fieldmap')]), + (wf, rep, [ + # ('outputnode.out_warp', 'fieldmap'), + ('outputnode.out_reference_brain', 'reference'), + ('outputnode.out_mask', 'mask')]), + (rep, dsink, [('out_report', 'in_file')]), + ]) + + if workdir: + boiler.base_dir = str(workdir) + boiler.run(plugin='MultiProc', plugin_args={'n_proc': cpu_count()}) + + +def _split_field(in_field, pe_dir): + from os.path import abspath + import numpy as np + import nibabel as nb + axis = 'ijk'.index(pe_dir[0]) + im = nb.load(in_field) + data = np.squeeze(im.get_fdata())[..., axis] + dirnii = nb.Nifti1Image(data, im.affine, im.header) + dirnii.to_filename('fieldmap.nii.gz') + return abspath('fieldmap.nii.gz') diff --git a/sdcflows/workflows/tests/test_phdiff.py b/sdcflows/workflows/tests/test_phdiff.py new file mode 100644 index 0000000000..e55719fc3d --- /dev/null +++ b/sdcflows/workflows/tests/test_phdiff.py @@ -0,0 +1,144 @@ +"""Test phase-difference type of fieldmaps.""" +import pytest +from niworkflows.interfaces.bids import DerivativesDataSink +from nipype.pipeline import engine as pe + +from ..phdiff import init_phdiff_wf, Workflow + + +@pytest.mark.parametrize('dataset', [ + 'ds001600', + 'testdata', +]) +def test_workflow(bids_layouts, tmpdir, output_path, dataset, workdir): + """Test creation of the workflow.""" + tmpdir.chdir() + + data = bids_layouts[dataset] + wf = Workflow(name='phdiff_%s' % dataset) + phdiff_wf = init_phdiff_wf(omp_nthreads=2, fmap_bspline=False) + phdiff_wf.inputs.inputnode.magnitude = data.get( + suffix=['magnitude1', 'magnitude2'], + acq='v4', + return_type='file', + extension=['.nii', '.nii.gz']) + + phdiff_file = data.get(suffix='phasediff', acq='v4', + extension=['.nii', '.nii.gz'])[0] + + phdiff_wf.inputs.inputnode.phasediff = phdiff_file.path + phdiff_wf.inputs.inputnode.metadata = phdiff_file.get_metadata() + + if output_path: + from ...interfaces.reportlets import FieldmapReportlet + rep = pe.Node(FieldmapReportlet(), 'simple_report') + + dsink = pe.Node(DerivativesDataSink( + base_directory=str(output_path), keep_dtype=True), name='dsink') + dsink.interface.out_path_base = 'sdcflows' + dsink.inputs.source_file = phdiff_file.path + + wf.connect([ + (phdiff_wf, rep, [ + ('outputnode.fmap', 'fieldmap'), + ('outputnode.fmap_ref', 'reference'), + ('outputnode.fmap_mask', 'mask')]), + (rep, dsink, [('out_report', 'in_file')]), + ]) + else: + wf.add_nodes([phdiff_wf]) + + if workdir: + wf.base_dir = str(workdir) + + wf.run() + + +def test_phases_workflow(bids_layouts, tmpdir, output_path): + """Test creation of the workflow.""" + tmpdir.chdir() + + data = bids_layouts['ds001600'] + wf = Workflow(name='tstphasesworkflow') + phdiff_wf = init_phdiff_wf(omp_nthreads=1, fmap_bspline=False, create_phasediff=True) + phdiff_wf.inputs.inputnode.magnitude = data.get( + suffix=['magnitude1', 'magnitude2'], + acq='v2', + return_type='file', + extension=['.nii', '.nii.gz']) + + phase1_file = data.get(suffix='phase1', acquisition='v2', + extension=['.nii', '.nii.gz'])[0] + phase2_file = data.get(suffix='phase2', acquisition='v2', + extension=['.nii', '.nii.gz'])[0] + + phdiff_wf.inputs.inputnode.phase1 = phase1_file.path + phdiff_wf.inputs.inputnode.phase2 = phase2_file.path + phdiff_wf.inputs.inputnode.phase1_metadata = phase1_file.get_metadata() + phdiff_wf.inputs.inputnode.phase2_metadata = phase2_file.get_metadata() + + if output_path: + from ...interfaces.reportlets import FieldmapReportlet + rep = pe.Node(FieldmapReportlet(), 'simple_report') + + dsink = pe.Node(DerivativesDataSink( + base_directory=str(output_path), keep_dtype=True), name='dsink') + dsink.interface.out_path_base = 'sdcflows' + dsink.inputs.source_file = phase1_file.path + + wf.connect([ + (phdiff_wf, rep, [ + ('outputnode.fmap', 'fieldmap'), + ('outputnode.fmap_ref', 'reference'), + ('outputnode.fmap_mask', 'mask')]), + (rep, dsink, [('out_report', 'in_file')]), + ]) + else: + wf.add_nodes([phdiff_wf]) + + wf.run() + + +def test_phases_workflow(bids_layouts, tmpdir, output_path): + """Test creation of the workflow.""" + tmpdir.chdir() + + data = bids_layouts['ds001600'] + wf = Workflow(name='tstphasesworkflow') + phdiff_wf = init_phdiff_wf(omp_nthreads=1, fmap_bspline=False, create_phasediff=True) + phdiff_wf.inputs.inputnode.magnitude = data.get( + suffix=['magnitude1', 'magnitude2'], + acq='v2', + return_type='file', + extension=['.nii', '.nii.gz']) + + phase1_file = data.get(suffix='phase1', acquisition='v2', + extension=['.nii', '.nii.gz'])[0] + phase2_file = data.get(suffix='phase2', acquisition='v2', + extension=['.nii', '.nii.gz'])[0] + + phdiff_wf.inputs.inputnode.phase1 = phase1_file.path + phdiff_wf.inputs.inputnode.phase2 = phase2_file.path + phdiff_wf.inputs.inputnode.phase1_metadata = phase1_file.get_metadata() + phdiff_wf.inputs.inputnode.phase2_metadata = phase2_file.get_metadata() + + if output_path: + from ...interfaces.reportlets import FieldmapReportlet + rep = pe.Node(FieldmapReportlet(), 'simple_report') + + dsink = pe.Node(DerivativesDataSink( + base_directory=str(output_path), keep_dtype=True), name='dsink') + dsink.interface.out_path_base = 'sdcflows' + dsink.inputs.source_file = phase1_file.path + + wf.connect([ + (phdiff_wf, rep, [ + ('outputnode.fmap', 'fieldmap'), + ('outputnode.fmap_ref', 'reference'), + ('outputnode.fmap_mask', 'mask')]), + (rep, dsink, [('out_report', 'in_file')]), + ]) + else: + wf.add_nodes([phdiff_wf]) + + wf.run() diff --git a/setup.cfg b/setup.cfg index 5d46683cb0..0987e81a39 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ author_email = code@oscaresteban.es maintainer = Oscar Esteban maintainer_email = code@oscaresteban.es description = Susceptibility Distortion Correction (SDC) workflows for EPI MR schemes. -long_description = file:long_description.rst +long_description = file:README.rst long_description_content_type = text/x-rst; charset=UTF-8 license = Apache-2.0 classifiers = @@ -24,8 +24,10 @@ setup_requires = setuptools >=40.8 install_requires = nibabel >=2.2.1 - nipype >=1.2.0 - niworkflows ~=0.10 + niflow-nipype1-workflows ~= 0.0.1 + nipype ~= 1.3.0 + # nipype @ git+https://github.com/nipy/nipype.git@5058ab7dfa6c32d4f3a16ee698a8fc4e3d28bee7 + niworkflows ~= 1.0.0rc1 numpy pybids ~=0.9.2 templateflow ~= 0.4 @@ -41,19 +43,20 @@ include_package_data = True [options.extras_require] doc = - nbsphinx - packaging - pydot >=1.2.3 + sphinx >= 2.1.2 + pydot >= 1.2.3 pydotplus - sphinx >=1.5.3 - sphinx-argparse sphinx_rtd_theme + sphinxcontrib-apidoc ~= 0.3.0 + sphinxcontrib-napoleon + sphinxcontrib-versioning docs = %(doc)s tests = - coverage - codecov pytest + pytest-xdist + pytest-cov == 2.5.1 + coverage all = %(doc)s %(tests)s @@ -79,9 +82,21 @@ parentdir_prefix = [flake8] max-line-length = 99 -doctests = True -exclude=*build/ +doctests = False +exclude = + *build/ + docs/sphinxext/ + docs/tools/ putty-ignore = */__init__.py : +F401 docs/conf.py : +E265 /^\s*\.\. _.*?: http/ : +E501 + +[tool:pytest] +norecursedirs = .git +addopts = -svx --doctest-modules +doctest_optionflags = ALLOW_UNICODE NORMALIZE_WHITESPACE +env = + PYTHONHASHSEED=0 +filterwarnings = + ignore::DeprecationWarning