From 0bb16d25c183ff2f4ef2d936326e040f2056da73 Mon Sep 17 00:00:00 2001
From: Chris Markiewicz
Date: Fri, 15 Nov 2024 17:58:38 -0500
Subject: [PATCH 01/13] sty: Disable black, configure ruff
---
pyproject.toml | 48 +++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 45 insertions(+), 3 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 5624790eb3a..dbcea6fd6ac 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -109,11 +109,53 @@ raw-options = {version_scheme = "release-branch-semver" }
[tool.hatch.build.hooks.vcs]
version-file = "niworkflows/_version.py"
+# Disable black
[tool.black]
+exclude = ".*"
+
+[tool.ruff]
line-length = 99
-target-version = ['py37']
-skip-string-normalization = true
-extend-exclude = '_version.py'
+
+[tool.ruff.lint]
+extend-select = [
+ "F",
+ "E",
+ "W",
+ "I",
+ "UP",
+ "YTT",
+ "S",
+ "BLE",
+ "B",
+ "A",
+ # "CPY",
+ "C4",
+ "DTZ",
+ "T10",
+ # "EM",
+ "EXE",
+ "FA",
+ "ISC",
+ "ICN",
+ "PT",
+ "Q",
+]
+ignore = [
+ "S311", # We are not using random for cryptographic purposes
+ "ISC001",
+ "S603",
+]
+
+[tool.ruff.lint.flake8-quotes]
+inline-quotes = "single"
+
+[tool.ruff.lint.extend-per-file-ignores]
+"*/test_*.py" = ["S101"]
+"docs/conf.py" = ["A001"]
+"docs/sphinxext/github_link.py" = ["BLE001"]
+
+[tool.ruff.format]
+quote-style = "single"
[tool.pytest.ini_options]
minversion = "6"
From 69d818a85eb1ce381dc6216e8d82176e50445f0e Mon Sep 17 00:00:00 2001
From: Chris Markiewicz
Date: Fri, 15 Nov 2024 17:59:56 -0500
Subject: [PATCH 02/13] sty: ruff format [ignore-rev]
Follow-up with ruff check --select ISC001 --fix
---
docker/fetch_templates.py | 46 +-
docs/conf.py | 146 ++---
docs/sphinxext/docscrape.py | 348 ++++++-----
docs/sphinxext/docscrape_sphinx.py | 232 ++++----
docs/sphinxext/github.py | 27 +-
docs/sphinxext/math_dollar.py | 25 +-
docs/sphinxext/numpydoc.py | 204 +++----
niworkflows/__about__.py | 19 +-
niworkflows/__init__.py | 18 +-
niworkflows/anat/ants.py | 317 +++++-----
niworkflows/anat/coregistration.py | 93 ++-
niworkflows/anat/freesurfer.py | 34 +-
niworkflows/anat/skullstrip.py | 25 +-
niworkflows/cli/boldref.py | 45 +-
niworkflows/conftest.py | 49 +-
niworkflows/engine/__init__.py | 1 +
niworkflows/engine/plugin.py | 76 +--
niworkflows/engine/tests/test_plugin.py | 50 +-
niworkflows/engine/tests/test_workflows.py | 34 +-
niworkflows/engine/workflows.py | 3 +-
niworkflows/func/tests/test_util.py | 61 +-
niworkflows/func/util.py | 147 +++--
niworkflows/interfaces/bids.py | 429 +++++++-------
niworkflows/interfaces/bold.py | 30 +-
niworkflows/interfaces/cifti.py | 169 +++---
niworkflows/interfaces/confounds.py | 199 ++++---
niworkflows/interfaces/conftest.py | 8 +-
niworkflows/interfaces/fixes.py | 50 +-
niworkflows/interfaces/freesurfer.py | 168 +++---
niworkflows/interfaces/header.py | 179 +++---
niworkflows/interfaces/images.py | 299 +++++-----
niworkflows/interfaces/itk.py | 97 ++--
niworkflows/interfaces/morphology.py | 31 +-
niworkflows/interfaces/nibabel.py | 295 +++++-----
niworkflows/interfaces/nilearn.py | 89 ++-
niworkflows/interfaces/nitransforms.py | 38 +-
niworkflows/interfaces/norm.py | 209 +++----
niworkflows/interfaces/plotting.py | 144 +++--
niworkflows/interfaces/probmaps.py | 57 +-
niworkflows/interfaces/reportlets/__init__.py | 4 +-
niworkflows/interfaces/reportlets/base.py | 29 +-
niworkflows/interfaces/reportlets/masks.py | 72 +--
.../interfaces/reportlets/registration.py | 127 ++--
.../interfaces/reportlets/segmentation.py | 73 +--
niworkflows/interfaces/space.py | 17 +-
niworkflows/interfaces/surf.py | 260 ++++-----
niworkflows/interfaces/tests/data/__init__.py | 1 +
niworkflows/interfaces/tests/test_bids.py | 541 +++++++++---------
niworkflows/interfaces/tests/test_cifti.py | 10 +-
.../interfaces/tests/test_freesurfer.py | 16 +-
niworkflows/interfaces/tests/test_header.py | 58 +-
niworkflows/interfaces/tests/test_images.py | 78 +--
niworkflows/interfaces/tests/test_itk.py | 44 +-
.../interfaces/tests/test_morphology.py | 19 +-
niworkflows/interfaces/tests/test_nibabel.py | 84 ++-
niworkflows/interfaces/tests/test_plotting.py | 25 +-
niworkflows/interfaces/tests/test_utility.py | 3 +-
niworkflows/interfaces/utility.py | 131 ++---
niworkflows/interfaces/workbench.py | 399 ++++++-------
niworkflows/reports/__init__.py | 2 +-
niworkflows/reports/core.py | 129 ++---
niworkflows/reports/tests/test_core.py | 207 +++----
niworkflows/testing.py | 21 +-
niworkflows/tests/conftest.py | 11 +-
niworkflows/tests/data/__init__.py | 1 +
niworkflows/tests/generate_data.py | 22 +-
niworkflows/tests/test_confounds.py | 109 ++--
niworkflows/tests/test_registration.py | 111 ++--
niworkflows/tests/test_segmentation.py | 147 +++--
niworkflows/tests/test_utils.py | 30 +-
niworkflows/tests/test_viz.py | 129 +++--
niworkflows/utils/bids.py | 88 ++-
niworkflows/utils/connections.py | 5 +-
niworkflows/utils/debug.py | 2 +-
niworkflows/utils/images.py | 56 +-
niworkflows/utils/misc.py | 111 ++--
niworkflows/utils/spaces.py | 134 +++--
niworkflows/utils/testing.py | 31 +-
niworkflows/utils/tests/test_bids_skeleton.py | 176 +++---
niworkflows/utils/tests/test_images.py | 36 +-
niworkflows/utils/tests/test_misc.py | 27 +-
niworkflows/utils/tests/test_spaces.py | 59 +-
niworkflows/utils/tests/test_utils.py | 41 +-
niworkflows/utils/timeseries.py | 38 +-
niworkflows/viz/__init__.py | 2 +-
niworkflows/viz/notebook.py | 13 +-
niworkflows/viz/plots.py | 420 +++++++-------
niworkflows/viz/utils.py | 246 ++++----
niworkflows/workflows/epi/refmap.py | 50 +-
.../workflows/epi/tests/test_refmap.py | 5 +-
90 files changed, 4264 insertions(+), 4677 deletions(-)
diff --git a/docker/fetch_templates.py b/docker/fetch_templates.py
index bc81bf8e229..8991ee118cf 100755
--- a/docker/fetch_templates.py
+++ b/docker/fetch_templates.py
@@ -19,11 +19,11 @@ def fetch_MNI2009():
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz
tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz
"""
- template = "MNI152NLin2009cAsym"
+ template = 'MNI152NLin2009cAsym'
- tf.get(template, resolution=2, desc="brain", suffix="mask")
- tf.get(template, resolution=2, desc="fMRIPrep", suffix="boldref")
- tf.get(template, resolution=1, label="brain", suffix="probseg")
+ tf.get(template, resolution=2, desc='brain', suffix='mask')
+ tf.get(template, resolution=2, desc='fMRIPrep', suffix='boldref')
+ tf.get(template, resolution=1, label='brain', suffix='probseg')
def fetch_MNI152Lin():
@@ -33,10 +33,10 @@ def fetch_MNI152Lin():
tpl-MNI152Lin/tpl-MNI152Lin_res-02_T1w.nii.gz
tpl-MNI152Lin/tpl-MNI152Lin_res-02_desc-brain_mask.nii.gz
"""
- template = "MNI152Lin"
+ template = 'MNI152Lin'
- tf.get(template, resolution=2, desc=None, suffix="T1w")
- tf.get(template, resolution=2, desc="brain", suffix="mask")
+ tf.get(template, resolution=2, desc=None, suffix='T1w')
+ tf.get(template, resolution=2, desc='brain', suffix='mask')
def fetch_OASIS():
@@ -50,13 +50,13 @@ def fetch_OASIS():
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-brain_mask.nii.gz
tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-BrainCerebellumExtraction_mask.nii.gz
"""
- template = "OASIS30ANTs"
+ template = 'OASIS30ANTs'
- tf.get(template, resolution=1, desc="4", suffix="dseg")
- tf.get(template, resolution=1, desc=None, suffix="T1w")
- tf.get(template, resolution=1, label="brain", suffix="mask")
- tf.get(template, resolution=1, desc="BrainCerebellumExtraction", suffix="mask")
- tf.get(template, resolution=1, label="brain", suffix="probseg")
+ tf.get(template, resolution=1, desc='4', suffix='dseg')
+ tf.get(template, resolution=1, desc=None, suffix='T1w')
+ tf.get(template, resolution=1, label='brain', suffix='mask')
+ tf.get(template, resolution=1, desc='BrainCerebellumExtraction', suffix='mask')
+ tf.get(template, resolution=1, label='brain', suffix='probseg')
def fetch_fsaverage():
@@ -70,10 +70,10 @@ def fetch_fsaverage():
tpl-fsaverage/tpl-fsaverage_hemi-L_den-164k_midthickness.surf.gii
tpl-fsaverage/tpl-fsaverage_hemi-R_den-164k_midthickness.surf.gii
"""
- template = "fsaverage"
+ template = 'fsaverage'
- tf.get(template, density="164k", desc="std", suffix="sphere")
- tf.get(template, density="164k", suffix="midthickness")
+ tf.get(template, density='164k', desc='std', suffix='sphere')
+ tf.get(template, density='164k', suffix='midthickness')
def fetch_fsLR():
@@ -89,7 +89,7 @@ def fetch_fsLR():
tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-L_den-32k_sphere.surf.gii
tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-R_den-32k_sphere.surf.gii
"""
- tf.get("fsLR", density="32k")
+ tf.get('fsLR', density='32k')
def fetch_all():
@@ -100,21 +100,21 @@ def fetch_all():
fetch_fsLR()
-if __name__ == "__main__":
+if __name__ == '__main__':
parser = argparse.ArgumentParser(
- description="Helper script for pre-caching required templates to run fMRIPrep",
+ description='Helper script for pre-caching required templates to run fMRIPrep',
)
parser.add_argument(
- "--tf-dir",
+ '--tf-dir',
type=os.path.abspath,
- help="Directory to save templates in. If not provided, templates will be saved to"
- " `${HOME}/.cache/templateflow`.",
+ help='Directory to save templates in. If not provided, templates will be saved to'
+ ' `${HOME}/.cache/templateflow`.',
)
opts = parser.parse_args()
# set envvar (if necessary) prior to templateflow import
if opts.tf_dir is not None:
- os.environ["TEMPLATEFLOW_HOME"] = opts.tf_dir
+ os.environ['TEMPLATEFLOW_HOME'] = opts.tf_dir
import templateflow.api as tf
diff --git a/docs/conf.py b/docs/conf.py
index a8284a0fa80..30ed709a379 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -21,12 +21,12 @@
# Prevent etelemetry from loading at all
# Could set NO_ET environment variable, but why?
MOCKS = [
- "etelemetry",
- "matplotlib",
- "matplotlib.pyplot",
- "matplotlib.cm",
- "matplotlib.colors",
- "matplotlib.colorbar",
+ 'etelemetry',
+ 'matplotlib',
+ 'matplotlib.pyplot',
+ 'matplotlib.cm',
+ 'matplotlib.colors',
+ 'matplotlib.colorbar',
]
sys.modules.update({mod: mock.Mock() for mod in MOCKS})
@@ -38,12 +38,12 @@
from niworkflows import __version__, __copyright__, __packagename__
-sys.path.append(os.path.abspath("sphinxext"))
+sys.path.append(os.path.abspath('sphinxext'))
# -- Project information -----------------------------------------------------
project = __packagename__
copyright = __copyright__
-author = "The NiPreps Developers"
+author = 'The NiPreps Developers'
# The short X.Y version
version = Version(__version__).base_version
@@ -53,30 +53,30 @@
# -- General configuration ---------------------------------------------------
extensions = [
- "sphinx.ext.autodoc",
- "sphinx.ext.coverage",
- "sphinx.ext.doctest",
- "sphinx.ext.githubpages",
- "sphinx.ext.ifconfig",
- "sphinx.ext.intersphinx",
- "sphinx.ext.mathjax",
- "sphinx.ext.viewcode",
- "sphinxcontrib.apidoc",
- "nipype.sphinxext.apidoc",
- "nipype.sphinxext.plot_workflow",
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.githubpages',
+ 'sphinx.ext.ifconfig',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.viewcode',
+ 'sphinxcontrib.apidoc',
+ 'nipype.sphinxext.apidoc',
+ 'nipype.sphinxext.plot_workflow',
]
autodoc_mock_imports = [
- "nilearn",
- "nitime",
- "numpy",
- "pandas",
- "seaborn",
- "skimage",
- "svgutils",
- "templateflow",
- "transforms3d",
- "yaml",
+ 'nilearn',
+ 'nitime',
+ 'numpy',
+ 'pandas',
+ 'seaborn',
+ 'skimage',
+ 'svgutils',
+ 'templateflow',
+ 'transforms3d',
+ 'yaml',
]
# Accept custom section names to be parsed for numpy-style docstrings
@@ -85,24 +85,24 @@
# https://github.com/sphinx-contrib/napoleon/pull/10 is merged.
napoleon_use_param = False
napoleon_custom_sections = [
- ("Inputs", "Parameters"),
- ("Outputs", "Parameters"),
- ("Attributes", "Parameters"),
- ("Mandatory Inputs", "Parameters"),
- ("Optional Inputs", "Parameters"),
+ ('Inputs', 'Parameters'),
+ ('Outputs', 'Parameters'),
+ ('Attributes', 'Parameters'),
+ ('Mandatory Inputs', 'Parameters'),
+ ('Optional Inputs', 'Parameters'),
]
# Add any paths that contain templates here, relative to this directory.
-templates_path = ["_templates"]
+templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
-source_suffix = ".rst"
+source_suffix = '.rst'
# The master toctree document.
-master_doc = "index"
+master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -115,11 +115,11 @@
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
- "_build",
- "Thumbs.db",
- ".DS_Store",
- "api/modules.rst",
- "api/niworkflows.rst",
+ '_build',
+ 'Thumbs.db',
+ '.DS_Store',
+ 'api/modules.rst',
+ 'api/niworkflows.rst',
]
# The name of the Pygments (syntax highlighting) style to use.
@@ -131,7 +131,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = "furo"
+html_theme = 'furo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -142,12 +142,12 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ["_static"]
+html_static_path = ['_static']
html_js_files = [
- "js/version-switch.js",
+ 'js/version-switch.js',
]
html_css_files = [
- "css/version-switch.css",
+ 'css/version-switch.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
@@ -164,7 +164,7 @@
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
-htmlhelp_basename = "niworkflowsdoc"
+htmlhelp_basename = 'niworkflowsdoc'
# -- Options for LaTeX output ------------------------------------------------
@@ -190,10 +190,10 @@
latex_documents = [
(
master_doc,
- "niworkflows.tex",
- "NiWorkflows Documentation",
- "The NiPreps Developers",
- "manual",
+ 'niworkflows.tex',
+ 'NiWorkflows Documentation',
+ 'The NiPreps Developers',
+ 'manual',
),
]
@@ -202,7 +202,7 @@
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [(master_doc, "niworkflows", "NiWorkflows Documentation", [author], 1)]
+man_pages = [(master_doc, 'niworkflows', 'NiWorkflows Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
@@ -213,12 +213,12 @@
texinfo_documents = [
(
master_doc,
- "niworkflows",
- "NiWorkflows Documentation",
+ 'niworkflows',
+ 'NiWorkflows Documentation',
author,
- "NiWorkflows",
- "One line description of project.",
- "Miscellaneous",
+ 'NiWorkflows',
+ 'One line description of project.',
+ 'Miscellaneous',
),
]
@@ -238,32 +238,32 @@
# epub_uid = ''
# A list of files that should not be packed into the epub file.
-epub_exclude_files = ["search.html"]
+epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
-apidoc_module_dir = "../niworkflows"
-apidoc_output_dir = "api"
-apidoc_excluded_paths = ["conftest.py", "*/tests/*", "tests/*", "testing.py"]
+apidoc_module_dir = '../niworkflows'
+apidoc_output_dir = 'api'
+apidoc_excluded_paths = ['conftest.py', '*/tests/*', 'tests/*', 'testing.py']
apidoc_separate_modules = True
-apidoc_extra_args = ["--module-first", "-d 1", "-T"]
+apidoc_extra_args = ['--module-first', '-d 1', '-T']
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
- "bids": ("https://bids-standard.github.io/pybids/", None),
- "matplotlib": ("https://matplotlib.org/stable", None),
- "nibabel": ("https://nipy.org/nibabel/", None),
- "nipype": ("https://nipype.readthedocs.io/en/latest/", None),
- "numpy": ("https://numpy.org/doc/stable/", None),
- "pandas": ("https://pandas.pydata.org/pandas-docs/dev", None),
- "python": ("https://docs.python.org/3/", None),
- "scipy": ("https://docs.scipy.org/doc/scipy/", None),
- "smriprep": ("https://www.nipreps.org/smriprep/", None),
- "surfplot": ("https://surfplot.readthedocs.io/en/latest/", None),
- "templateflow": ("https://www.templateflow.org/python-client", None),
+ 'bids': ('https://bids-standard.github.io/pybids/', None),
+ 'matplotlib': ('https://matplotlib.org/stable', None),
+ 'nibabel': ('https://nipy.org/nibabel/', None),
+ 'nipype': ('https://nipype.readthedocs.io/en/latest/', None),
+ 'numpy': ('https://numpy.org/doc/stable/', None),
+ 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),
+ 'python': ('https://docs.python.org/3/', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy/', None),
+ 'smriprep': ('https://www.nipreps.org/smriprep/', None),
+ 'surfplot': ('https://surfplot.readthedocs.io/en/latest/', None),
+ 'templateflow': ('https://www.templateflow.org/python-client', None),
}
# -- Options for versioning extension ----------------------------------------
diff --git a/docs/sphinxext/docscrape.py b/docs/sphinxext/docscrape.py
index fb3a0b6347e..1c68502804b 100644
--- a/docs/sphinxext/docscrape.py
+++ b/docs/sphinxext/docscrape.py
@@ -1,6 +1,4 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
+"""Extract reference documentation from the NumPy source tree."""
import inspect
import textwrap
@@ -38,7 +36,7 @@ def __init__(self, data):
if isinstance(data, list):
self._str = data
else:
- self._str = data.split("\n") # store string as list of lines
+ self._str = data.split('\n') # store string as list of lines
self.reset()
@@ -54,7 +52,7 @@ def read(self):
self._l += 1
return out
else:
- return ""
+ return ''
def seek_next_non_empty_line(self):
for l in self[self._l :]:
@@ -94,21 +92,21 @@ def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
- return ""
+ return ''
def is_empty(self):
- return not "".join(self._str).strip()
+ return not ''.join(self._str).strip()
class ParseError(Exception):
def __str__(self):
message = self.args[0]
- if hasattr(self, "docstring"):
- message = f"{message} in {self.docstring!r}"
+ if hasattr(self, 'docstring'):
+ message = f'{message} in {self.docstring!r}'
return message
-Parameter = namedtuple("Parameter", ["name", "type", "desc"])
+Parameter = namedtuple('Parameter', ['name', 'type', 'desc'])
class NumpyDocString(Mapping):
@@ -119,29 +117,29 @@ class NumpyDocString(Mapping):
"""
sections = {
- "Signature": "",
- "Summary": [""],
- "Extended Summary": [],
- "Parameters": [],
- "Returns": [],
- "Yields": [],
- "Receives": [],
- "Raises": [],
- "Warns": [],
- "Other Parameters": [],
- "Attributes": [],
- "Methods": [],
- "See Also": [],
- "Notes": [],
- "Warnings": [],
- "References": "",
- "Examples": "",
- "index": {},
+ 'Signature': '',
+ 'Summary': [''],
+ 'Extended Summary': [],
+ 'Parameters': [],
+ 'Returns': [],
+ 'Yields': [],
+ 'Receives': [],
+ 'Raises': [],
+ 'Warns': [],
+ 'Other Parameters': [],
+ 'Attributes': [],
+ 'Methods': [],
+ 'See Also': [],
+ 'Notes': [],
+ 'Warnings': [],
+ 'References': '',
+ 'Examples': '',
+ 'index': {},
}
def __init__(self, docstring, config=None):
orig_docstring = docstring
- docstring = textwrap.dedent(docstring).split("\n")
+ docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = copy.deepcopy(self.sections)
@@ -157,7 +155,7 @@ def __getitem__(self, key):
def __setitem__(self, key, val):
if key not in self._parsed_data:
- self._error_location(f"Unknown section {key}", error=False)
+ self._error_location(f'Unknown section {key}', error=False)
else:
self._parsed_data[key] = val
@@ -175,17 +173,17 @@ def _is_at_section(self):
l1 = self._doc.peek().strip() # e.g. Parameters
- if l1.startswith(".. index::"):
+ if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
- if len(l2) >= 3 and (set(l2) in ({"-"}, {"="})) and len(l2) != len(l1):
- snip = "\n".join(self._doc._str[:2]) + "..."
+ if len(l2) >= 3 and (set(l2) in ({'-'}, {'='})) and len(l2) != len(l1):
+ snip = '\n'.join(self._doc._str[:2]) + '...'
self._error_location(
- f"potentially wrong underline length... \n{l1} \n{l2} in \n{snip}",
+ f'potentially wrong underline length... \n{l1} \n{l2} in \n{snip}',
error=False,
)
- return l2.startswith("-" * len(l1)) or l2.startswith("=" * len(l1))
+ return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
def _strip(self, doc):
i = 0
@@ -205,7 +203,7 @@ def _read_to_next_section(self):
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
- section += [""]
+ section += ['']
section += self._doc.read_to_next_empty_line()
@@ -216,7 +214,7 @@ def _read_sections(self):
data = self._read_to_next_section()
name = data[0].strip()
- if name.startswith(".."): # index section
+ if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
@@ -229,18 +227,18 @@ def _parse_param_list(self, content, single_element_is_type=False):
params = []
while not r.eof():
header = r.read().strip()
- if " : " in header:
- arg_name, arg_type = header.split(" : ", maxsplit=1)
+ if ' : ' in header:
+ arg_name, arg_type = header.split(' : ', maxsplit=1)
else:
# NOTE: param line with single element should never have a
# a " :" before the description line, so this should probably
# warn.
- if header.endswith(" :"):
+ if header.endswith(' :'):
header = header[:-2]
if single_element_is_type:
- arg_name, arg_type = "", header
+ arg_name, arg_type = '', header
else:
- arg_name, arg_type = header, ""
+ arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
@@ -266,28 +264,28 @@ def _parse_param_list(self, content, single_element_is_type=False):
# Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
# is a string describing the function.
- _role = r":(?P(py:)?\w+):"
- _funcbacktick = r"`(?P(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
- _funcplain = r"(?P[a-zA-Z0-9_\.-]+)"
- _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
- _funcnamenext = _funcname.replace("role", "rolenext")
- _funcnamenext = _funcnamenext.replace("name", "namenext")
- _description = r"(?P\s*:(\s+(?P\S+.*))?)?\s*$"
- _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
+ _role = r':(?P(py:)?\w+):'
+ _funcbacktick = r'`(?P(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`'
+ _funcplain = r'(?P[a-zA-Z0-9_\.-]+)'
+ _funcname = r'(' + _role + _funcbacktick + r'|' + _funcplain + r')'
+ _funcnamenext = _funcname.replace('role', 'rolenext')
+ _funcnamenext = _funcnamenext.replace('name', 'namenext')
+ _description = r'(?P\s*:(\s+(?P\S+.*))?)?\s*$'
+ _func_rgx = re.compile(r'^\s*' + _funcname + r'\s*')
_line_rgx = re.compile(
- r"^\s*"
- + r"(?P"
+ r'^\s*'
+ + r'(?P'
+ _funcname # group for all function names
- + r"(?P([,]\s+"
+ + r'(?P([,]\s+'
+ _funcnamenext
- + r")*)"
- + r")"
- + r"(?P[,\.])?" # end of "allfuncs"
+ + r')*)'
+ + r')'
+ + r'(?P[,\.])?' # end of "allfuncs"
+ _description # Some function lists have a trailing comma (or period) '\s*'
)
# Empty elements are replaced with '..'
- empty_description = ".."
+ empty_description = '..'
def _parse_see_also(self, content):
"""
@@ -306,9 +304,9 @@ def parse_item_name(text):
"""Match ':role:`name`' or 'name'."""
m = self._func_rgx.match(text)
if not m:
- self._error_location(f"Error parsing See Also entry {line!r}")
- role = m.group("role")
- name = m.group("name") if role else m.group("name2")
+ self._error_location(f'Error parsing See Also entry {line!r}')
+ role = m.group('role')
+ name = m.group('name') if role else m.group('name2')
return name, role, m.end()
rest = []
@@ -319,30 +317,30 @@ def parse_item_name(text):
line_match = self._line_rgx.match(line)
description = None
if line_match:
- description = line_match.group("desc")
- if line_match.group("trailing") and description:
+ description = line_match.group('desc')
+ if line_match.group('trailing') and description:
self._error_location(
- "Unexpected comma or period after function list at index %d of "
- 'line "%s"' % (line_match.end("trailing"), line),
+ 'Unexpected comma or period after function list at index %d of '
+ 'line "%s"' % (line_match.end('trailing'), line),
error=False,
)
- if not description and line.startswith(" "):
+ if not description and line.startswith(' '):
rest.append(line.strip())
elif line_match:
funcs = []
- text = line_match.group("allfuncs")
+ text = line_match.group('allfuncs')
while True:
if not text.strip():
break
name, role, match_end = parse_item_name(text)
funcs.append((name, role))
text = text[match_end:].strip()
- if text and text[0] == ",":
+ if text and text[0] == ',':
text = text[1:].strip()
rest = list(filter(None, [description]))
items.append((funcs, rest))
else:
- self._error_location(f"Error parsing See Also entry {line!r}")
+ self._error_location(f'Error parsing See Also entry {line!r}')
return items
def _parse_index(self, section, content):
@@ -356,13 +354,13 @@ def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
- section = section.split("::")
+ section = section.split('::')
if len(section) > 1:
- out["default"] = strip_each_in(section[1].split(","))[0]
+ out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
- line = line.split(":")
+ line = line.split(':')
if len(line) > 2:
- out[line[1]] = strip_each_in(line[2].split(","))
+ out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
@@ -373,19 +371,19 @@ def _parse_summary(self):
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
- summary_str = " ".join([s.strip() for s in summary]).strip()
- compiled = re.compile(r"^([\w., ]+=)?\s*[\w\.]+\(.*\)$")
+ summary_str = ' '.join([s.strip() for s in summary]).strip()
+ compiled = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
if compiled.match(summary_str):
- self["Signature"] = summary_str
+ self['Signature'] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
- self["Summary"] = summary
+ self['Summary'] = summary
if not self._is_at_section():
- self["Extended Summary"] = self._read_to_next_section()
+ self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
@@ -394,44 +392,42 @@ def _parse(self):
sections = list(self._read_sections())
section_names = {section for section, content in sections}
- has_returns = "Returns" in section_names
- has_yields = "Yields" in section_names
+ has_returns = 'Returns' in section_names
+ has_yields = 'Yields' in section_names
# We could do more tests, but we are not. Arbitrarily.
if has_returns and has_yields:
- msg = "Docstring contains both a Returns and Yields section."
+ msg = 'Docstring contains both a Returns and Yields section.'
raise ValueError(msg)
- if not has_yields and "Receives" in section_names:
- msg = "Docstring contains a Receives section but not Yields."
+ if not has_yields and 'Receives' in section_names:
+ msg = 'Docstring contains a Receives section but not Yields.'
raise ValueError(msg)
for section, content in sections:
- if not section.startswith(".."):
- section = (s.capitalize() for s in section.split(" "))
- section = " ".join(section)
+ if not section.startswith('..'):
+ section = (s.capitalize() for s in section.split(' '))
+ section = ' '.join(section)
if self.get(section):
self._error_location(
- "The section %s appears twice in %s"
- % (section, "\n".join(self._doc._str))
+ 'The section %s appears twice in %s'
+ % (section, '\n'.join(self._doc._str))
)
- if section in ("Parameters", "Other Parameters", "Attributes", "Methods"):
+ if section in ('Parameters', 'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
- elif section in ("Returns", "Yields", "Raises", "Warns", "Receives"):
- self[section] = self._parse_param_list(
- content, single_element_is_type=True
- )
- elif section.startswith(".. index::"):
- self["index"] = self._parse_index(section, content)
- elif section == "See Also":
- self["See Also"] = self._parse_see_also(content)
+ elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):
+ self[section] = self._parse_param_list(content, single_element_is_type=True)
+ elif section.startswith('.. index::'):
+ self['index'] = self._parse_index(section, content)
+ elif section == 'See Also':
+ self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
@property
def _obj(self):
- if hasattr(self, "_cls"):
+ if hasattr(self, '_cls'):
return self._cls
- elif hasattr(self, "_f"):
+ elif hasattr(self, '_f'):
return self._f
return None
@@ -444,12 +440,12 @@ def _error_location(self, msg, error=True):
filename = None
# Make UserWarning more descriptive via object introspection.
# Skip if introspection fails
- name = getattr(self._obj, "__name__", None)
+ name = getattr(self._obj, '__name__', None)
if name is None:
- name = getattr(getattr(self._obj, "__class__", None), "__name__", None)
+ name = getattr(getattr(self._obj, '__class__', None), '__name__', None)
if name is not None:
- msg += f" in the docstring of {name}"
- msg += f" in {filename}." if filename else ""
+ msg += f' in the docstring of {name}'
+ msg += f' in {filename}.' if filename else ''
if error:
raise ValueError(msg)
else:
@@ -457,25 +453,25 @@ def _error_location(self, msg, error=True):
# string conversion routines
- def _str_header(self, name, symbol="-"):
+ def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
- return [" " * indent + line for line in doc]
+ return [' ' * indent + line for line in doc]
def _str_signature(self):
- if self["Signature"]:
- return [self["Signature"].replace("*", r"\*")] + [""]
- return [""]
+ if self['Signature']:
+ return [self['Signature'].replace('*', r'\*')] + ['']
+ return ['']
def _str_summary(self):
- if self["Summary"]:
- return self["Summary"] + [""]
+ if self['Summary']:
+ return self['Summary'] + ['']
return []
def _str_extended_summary(self):
- if self["Extended Summary"]:
- return self["Extended Summary"] + [""]
+ if self['Extended Summary']:
+ return self['Extended Summary'] + ['']
return []
def _str_param_list(self, name):
@@ -488,10 +484,10 @@ def _str_param_list(self, name):
parts.append(param.name)
if param.type:
parts.append(param.type)
- out += [" : ".join(parts)]
- if param.desc and "".join(param.desc).strip():
+ out += [' : '.join(parts)]
+ if param.desc and ''.join(param.desc).strip():
out += self._str_indent(param.desc)
- out += [""]
+ out += ['']
return out
def _str_section(self, name):
@@ -499,119 +495,119 @@ def _str_section(self, name):
if self[name]:
out += self._str_header(name)
out += self[name]
- out += [""]
+ out += ['']
return out
def _str_see_also(self, func_role):
- if not self["See Also"]:
+ if not self['See Also']:
return []
out = []
- out += self._str_header("See Also")
- out += [""]
+ out += self._str_header('See Also')
+ out += ['']
last_had_desc = True
- for funcs, desc in self["See Also"]:
+ for funcs, desc in self['See Also']:
assert isinstance(funcs, list)
links = []
for func, role in funcs:
if role:
- link = f":{role}:`{func}`"
+ link = f':{role}:`{func}`'
elif func_role:
- link = f":{func_role}:`{func}`"
+ link = f':{func_role}:`{func}`'
else:
- link = f"`{func}`_"
+ link = f'`{func}`_'
links.append(link)
- link = ", ".join(links)
+ link = ', '.join(links)
out += [link]
if desc:
- out += self._str_indent([" ".join(desc)])
+ out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += self._str_indent([self.empty_description])
if last_had_desc:
- out += [""]
- out += [""]
+ out += ['']
+ out += ['']
return out
def _str_index(self):
- idx = self["index"]
+ idx = self['index']
out = []
output_index = False
- default_index = idx.get("default", "")
+ default_index = idx.get('default', '')
if default_index:
output_index = True
- out += [f".. index:: {default_index}"]
+ out += [f'.. index:: {default_index}']
for section, references in idx.items():
- if section == "default":
+ if section == 'default':
continue
output_index = True
out += [f" :{section}: {', '.join(references)}"]
if output_index:
return out
- return ""
+ return ''
- def __str__(self, func_role=""):
+ def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in (
- "Parameters",
- "Returns",
- "Yields",
- "Receives",
- "Other Parameters",
- "Raises",
- "Warns",
+ 'Parameters',
+ 'Returns',
+ 'Yields',
+ 'Receives',
+ 'Other Parameters',
+ 'Raises',
+ 'Warns',
):
out += self._str_param_list(param_list)
- out += self._str_section("Warnings")
+ out += self._str_section('Warnings')
out += self._str_see_also(func_role)
- for s in ("Notes", "References", "Examples"):
+ for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
- for param_list in ("Attributes", "Methods"):
+ for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
- return "\n".join(out)
+ return '\n'.join(out)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
- return textwrap.dedent("\n".join(lines)).split("\n")
+ return textwrap.dedent('\n'.join(lines)).split('\n')
class FunctionDoc(NumpyDocString):
- def __init__(self, func, role="func", doc=None, config=None):
+ def __init__(self, func, role='func', doc=None, config=None):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
- raise ValueError("No function or docstring given")
- doc = inspect.getdoc(func) or ""
+ raise ValueError('No function or docstring given')
+ doc = inspect.getdoc(func) or ''
if config is None:
config = {}
NumpyDocString.__init__(self, doc, config)
def get_func(self):
- func_name = getattr(self._f, "__name__", self.__class__.__name__)
+ func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
- func = getattr(self._f, "__call__", self._f.__init__)
+ func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
- out = ""
+ out = ''
func, func_name = self.get_func()
- roles = {"func": "function", "meth": "method"}
+ roles = {'func': 'function', 'meth': 'method'}
if self._role:
if self._role not in roles:
- print(f"Warning: invalid role {self._role}")
+ print(f'Warning: invalid role {self._role}')
out += f".. {roles.get(self._role, '')}:: {func_name}\n \n\n"
out += super().__str__(func_role=self._role)
@@ -627,39 +623,39 @@ def __init__(self, obj, doc=None, config=None):
class ClassDoc(NumpyDocString):
- extra_public_methods = ["__call__"]
+ extra_public_methods = ['__call__']
- def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None):
+ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, config=None):
if not inspect.isclass(cls) and cls is not None:
- raise ValueError(f"Expected a class or None, but got {cls!r}")
+ raise ValueError(f'Expected a class or None, but got {cls!r}')
self._cls = cls
- if "sphinx" in sys.modules:
+ if 'sphinx' in sys.modules:
from sphinx.ext.autodoc import ALL
else:
ALL = object()
if config is None:
config = {}
- self.show_inherited_members = config.get("show_inherited_class_members", True)
+ self.show_inherited_members = config.get('show_inherited_class_members', True)
- if modulename and not modulename.endswith("."):
- modulename += "."
+ if modulename and not modulename.endswith('.'):
+ modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
- raise ValueError("No class or documentation string given")
+ raise ValueError('No class or documentation string given')
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
- _members = config.get("members", [])
+ _members = config.get('members', [])
if _members is ALL:
_members = None
- _exclude = config.get("exclude-members", [])
+ _exclude = config.get('exclude-members', [])
- if config.get("show_class_members", True) and _exclude is not ALL:
+ if config.get('show_class_members', True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
@@ -668,8 +664,8 @@ def splitlines_x(s):
return s.splitlines()
for field, items in [
- ("Methods", self.methods),
- ("Attributes", self.properties),
+ ('Methods', self.methods),
+ ('Attributes', self.properties),
]:
if not self[field]:
doc_list = []
@@ -678,7 +674,7 @@ def splitlines_x(s):
continue
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
- doc_list.append(Parameter(name, "", splitlines_x(doc_item)))
+ doc_list.append(Parameter(name, '', splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@@ -691,7 +687,7 @@ def methods(self):
name
for name, func in inspect.getmembers(self._cls)
if (
- (not name.startswith("_") or name in self.extra_public_methods)
+ (not name.startswith('_') or name in self.extra_public_methods)
and isinstance(func, Callable)
and self._is_show_member(name)
)
@@ -705,7 +701,7 @@ def properties(self):
name
for name, func in inspect.getmembers(self._cls)
if (
- not name.startswith("_")
+ not name.startswith('_')
and not self._should_skip_member(name, self._cls)
and (
func is None
@@ -722,8 +718,8 @@ def _should_skip_member(name, klass):
# Namedtuples should skip everything in their ._fields as the
# docstrings for each of the members is: "Alias for field number X"
issubclass(klass, tuple)
- and hasattr(klass, "_asdict")
- and hasattr(klass, "_fields")
+ and hasattr(klass, '_asdict')
+ and hasattr(klass, '_fields')
and name in klass._fields
):
return True
@@ -748,19 +744,19 @@ def get_doc_object(
):
if what is None:
if inspect.isclass(obj):
- what = "class"
+ what = 'class'
elif inspect.ismodule(obj):
- what = "module"
+ what = 'module'
elif isinstance(obj, Callable):
- what = "function"
+ what = 'function'
else:
- what = "object"
+ what = 'object'
if config is None:
config = {}
- if what == "class":
+ if what == 'class':
return class_doc(obj, func_doc=func_doc, doc=doc, config=config)
- elif what in ("function", "method"):
+ elif what in ('function', 'method'):
return func_doc(obj, doc=doc, config=config)
else:
if doc is None:
diff --git a/docs/sphinxext/docscrape_sphinx.py b/docs/sphinxext/docscrape_sphinx.py
index 771c1ea445d..758ef866e83 100644
--- a/docs/sphinxext/docscrape_sphinx.py
+++ b/docs/sphinxext/docscrape_sphinx.py
@@ -15,7 +15,7 @@
from .xref import make_xref
-IMPORT_MATPLOTLIB_RE = r"\b(import +matplotlib|from +matplotlib +import)\b"
+IMPORT_MATPLOTLIB_RE = r'\b(import +matplotlib|from +matplotlib +import)\b'
class SphinxDocString(NumpyDocString):
@@ -26,73 +26,69 @@ def __init__(self, docstring, config=None):
self.load_config(config)
def load_config(self, config):
- self.use_plots = config.get("use_plots", False)
- self.class_members_toctree = config.get("class_members_toctree", True)
- self.attributes_as_param_list = config.get("attributes_as_param_list", True)
- self.xref_param_type = config.get("xref_param_type", False)
- self.xref_aliases = config.get("xref_aliases", dict())
- self.xref_ignore = config.get("xref_ignore", set())
- self.template = config.get("template", None)
+ self.use_plots = config.get('use_plots', False)
+ self.class_members_toctree = config.get('class_members_toctree', True)
+ self.attributes_as_param_list = config.get('attributes_as_param_list', True)
+ self.xref_param_type = config.get('xref_param_type', False)
+ self.xref_aliases = config.get('xref_aliases', dict())
+ self.xref_ignore = config.get('xref_ignore', set())
+ self.template = config.get('template', None)
if self.template is None:
- template_dirs = [os.path.join(os.path.dirname(__file__), "templates")]
+ template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')]
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
- self.template = template_env.get_template("numpydoc_docstring.rst")
+ self.template = template_env.get_template('numpydoc_docstring.rst')
# string conversion routines
- def _str_header(self, name, symbol="`"):
- return [".. rubric:: " + name, ""]
+ def _str_header(self, name, symbol='`'):
+ return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
- return [":" + name + ":"]
+ return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
- out += [" " * indent + line]
+ out += [' ' * indent + line]
return out
def _str_signature(self):
- return [""]
+ return ['']
def _str_summary(self):
- return self["Summary"] + [""]
+ return self['Summary'] + ['']
def _str_extended_summary(self):
- return self["Extended Summary"] + [""]
+ return self['Extended Summary'] + ['']
- def _str_returns(self, name="Returns"):
- named_fmt = "**%s** : %s"
- unnamed_fmt = "%s"
+ def _str_returns(self, name='Returns'):
+ named_fmt = '**%s** : %s'
+ unnamed_fmt = '%s'
out = []
if self[name]:
out += self._str_field_list(name)
- out += [""]
+ out += ['']
for param in self[name]:
param_type = param.type
if param_type and self.xref_param_type:
- param_type = make_xref(
- param_type, self.xref_aliases, self.xref_ignore
- )
+ param_type = make_xref(param_type, self.xref_aliases, self.xref_ignore)
if param.name:
- out += self._str_indent(
- [named_fmt % (param.name.strip(), param_type)]
- )
+ out += self._str_indent([named_fmt % (param.name.strip(), param_type)])
else:
out += self._str_indent([unnamed_fmt % param_type.strip()])
if not param.desc:
- out += self._str_indent([".."], 8)
+ out += self._str_indent(['..'], 8)
else:
out += self._str_indent(param.desc, 8)
- out += [""]
+ out += ['']
return out
def _escape_args_and_kwargs(self, name):
- if name[:2] == "**":
- return r"\*\*" + name[2:]
- elif name[:1] == "*":
- return r"\*" + name[1:]
+ if name[:2] == '**':
+ return r'\*\*' + name[2:]
+ elif name[:1] == '*':
+ return r'\*' + name[1:]
else:
return name
@@ -135,7 +131,7 @@ def _process_param(self, param, desc, fake_autosummary):
# XXX: If changing the following, please check the rendering when param
# ends with '_', e.g. 'word_'
# See https://github.com/numpy/numpydoc/pull/144
- display_param = f"**{param}**"
+ display_param = f'**{param}**'
if not fake_autosummary:
return display_param, desc
@@ -153,25 +149,25 @@ def _process_param(self, param, desc, fake_autosummary):
if not (param_obj and obj_doc):
return display_param, desc
- prefix = getattr(self, "_name", "")
+ prefix = getattr(self, '_name', '')
if prefix:
- link_prefix = f"{prefix}."
+ link_prefix = f'{prefix}.'
else:
- link_prefix = ""
+ link_prefix = ''
# Referenced object has a docstring
- display_param = f":obj:`{param} <{link_prefix}{param}>`"
+ display_param = f':obj:`{param} <{link_prefix}{param}>`'
if obj_doc:
# Overwrite desc. Take summary logic of autosummary
- desc = re.split(r"\n\s*\n", obj_doc.strip(), 1)[0]
+ desc = re.split(r'\n\s*\n', obj_doc.strip(), 1)[0]
# XXX: Should this have DOTALL?
# It does not in autosummary
- m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(desc.split()))
+ m = re.search(r'^([A-Z].*?\.)(?:\s|$)', ' '.join(desc.split()))
if m:
desc = m.group(1).strip()
else:
- desc = desc.partition("\n")[0]
- desc = desc.split("\n")
+ desc = desc.partition('\n')[0]
+ desc = desc.split('\n')
return display_param, desc
def _str_param_list(self, name, fake_autosummary=False):
@@ -196,11 +192,9 @@ def _str_param_list(self, name, fake_autosummary=False):
out = []
if self[name]:
out += self._str_field_list(name)
- out += [""]
+ out += ['']
for param in self[name]:
- display_param, desc = self._process_param(
- param.name, param.desc, fake_autosummary
- )
+ display_param, desc = self._process_param(param.name, param.desc, fake_autosummary)
parts = []
if display_param:
parts.append(display_param)
@@ -208,17 +202,15 @@ def _str_param_list(self, name, fake_autosummary=False):
if param_type:
param_type = param.type
if self.xref_param_type:
- param_type = make_xref(
- param_type, self.xref_aliases, self.xref_ignore
- )
+ param_type = make_xref(param_type, self.xref_aliases, self.xref_ignore)
parts.append(param_type)
- out += self._str_indent([" : ".join(parts)])
+ out += self._str_indent([' : '.join(parts)])
if not desc:
# empty definition
- desc = [".."]
+ desc = ['..']
out += self._str_indent(desc, 8)
- out += [""]
+ out += ['']
return out
@@ -230,11 +222,11 @@ def _str_member_list(self, name):
"""
out = []
if self[name]:
- out += [f".. rubric:: {name}", ""]
- prefix = getattr(self, "_name", "")
+ out += [f'.. rubric:: {name}', '']
+ prefix = getattr(self, '_name', '')
if prefix:
- prefix = f"~{prefix}."
+ prefix = f'~{prefix}.'
autosum = []
others = []
@@ -252,138 +244,138 @@ def _str_member_list(self, name):
if param_obj and pydoc.getdoc(param_obj):
# Referenced object has a docstring
- autosum += [f" {prefix}{param.name}"]
+ autosum += [f' {prefix}{param.name}']
else:
others.append(param)
if autosum:
- out += [".. autosummary::"]
+ out += ['.. autosummary::']
if self.class_members_toctree:
- out += [" :toctree:"]
- out += [""] + autosum
+ out += [' :toctree:']
+ out += [''] + autosum
if others:
maxlen_0 = max(3, max(len(p.name) + 4 for p in others))
- hdr = "=" * maxlen_0 + " " + "=" * 10
- fmt = "%%%ds %%s " % (maxlen_0,)
- out += ["", "", hdr]
+ hdr = '=' * maxlen_0 + ' ' + '=' * 10
+ fmt = '%%%ds %%s ' % (maxlen_0,)
+ out += ['', '', hdr]
for param in others:
- name = "**" + param.name.strip() + "**"
- desc = " ".join(x.strip() for x in param.desc).strip()
+ name = '**' + param.name.strip() + '**'
+ desc = ' '.join(x.strip() for x in param.desc).strip()
if param.type:
- desc = f"({param.type}) {desc}"
+ desc = f'({param.type}) {desc}'
out += [fmt % (name, desc)]
out += [hdr]
- out += [""]
+ out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
- content = textwrap.dedent("\n".join(self[name])).split("\n")
+ content = textwrap.dedent('\n'.join(self[name])).split('\n')
out += content
- out += [""]
+ out += ['']
return out
def _str_see_also(self, func_role):
out = []
- if self["See Also"]:
+ if self['See Also']:
see_also = super()._str_see_also(func_role)
- out = [".. seealso::", ""]
+ out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
- if self["Warnings"]:
- out = [".. warning::", ""]
- out += self._str_indent(self["Warnings"])
- out += [""]
+ if self['Warnings']:
+ out = ['.. warning::', '']
+ out += self._str_indent(self['Warnings'])
+ out += ['']
return out
def _str_index(self):
- idx = self["index"]
+ idx = self['index']
out = []
if len(idx) == 0:
return out
out += [f".. index:: {idx.get('default', '')}"]
for section, references in idx.items():
- if section == "default":
+ if section == 'default':
continue
- elif section == "refguide":
+ elif section == 'refguide':
out += [f" single: {', '.join(references)}"]
else:
out += [f" {section}: {','.join(references)}"]
- out += [""]
+ out += ['']
return out
def _str_references(self):
out = []
- if self["References"]:
- out += self._str_header("References")
- if isinstance(self["References"], str):
- self["References"] = [self["References"]]
- out.extend(self["References"])
- out += [""]
+ if self['References']:
+ out += self._str_header('References')
+ if isinstance(self['References'], str):
+ self['References'] = [self['References']]
+ out.extend(self['References'])
+ out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
- out += [".. only:: latex", ""]
+ out += ['.. only:: latex', '']
items = []
- for line in self["References"]:
- m = re.match(r".. \[([a-z0-9._-]+)\]", line, re.I)
+ for line in self['References']:
+ m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
- out += [" " + ", ".join([f"[{item}]_" for item in items]), ""]
+ out += [' ' + ', '.join([f'[{item}]_' for item in items]), '']
return out
def _str_examples(self):
- examples_str = "\n".join(self["Examples"])
+ examples_str = '\n'.join(self['Examples'])
if (
self.use_plots
and re.search(IMPORT_MATPLOTLIB_RE, examples_str)
- and "plot::" not in examples_str
+ and 'plot::' not in examples_str
):
out = []
- out += self._str_header("Examples")
- out += [".. plot::", ""]
- out += self._str_indent(self["Examples"])
- out += [""]
+ out += self._str_header('Examples')
+ out += ['.. plot::', '']
+ out += self._str_indent(self['Examples'])
+ out += ['']
return out
else:
- return self._str_section("Examples")
+ return self._str_section('Examples')
- def __str__(self, indent=0, func_role="obj"):
+ def __str__(self, indent=0, func_role='obj'):
ns = {
- "signature": self._str_signature(),
- "index": self._str_index(),
- "summary": self._str_summary(),
- "extended_summary": self._str_extended_summary(),
- "parameters": self._str_param_list("Parameters"),
- "returns": self._str_returns("Returns"),
- "yields": self._str_returns("Yields"),
- "receives": self._str_returns("Receives"),
- "other_parameters": self._str_param_list("Other Parameters"),
- "raises": self._str_returns("Raises"),
- "warns": self._str_returns("Warns"),
- "warnings": self._str_warnings(),
- "see_also": self._str_see_also(func_role),
- "notes": self._str_section("Notes"),
- "references": self._str_references(),
- "examples": self._str_examples(),
- "attributes": (
- self._str_param_list("Attributes", fake_autosummary=True)
+ 'signature': self._str_signature(),
+ 'index': self._str_index(),
+ 'summary': self._str_summary(),
+ 'extended_summary': self._str_extended_summary(),
+ 'parameters': self._str_param_list('Parameters'),
+ 'returns': self._str_returns('Returns'),
+ 'yields': self._str_returns('Yields'),
+ 'receives': self._str_returns('Receives'),
+ 'other_parameters': self._str_param_list('Other Parameters'),
+ 'raises': self._str_returns('Raises'),
+ 'warns': self._str_returns('Warns'),
+ 'warnings': self._str_warnings(),
+ 'see_also': self._str_see_also(func_role),
+ 'notes': self._str_section('Notes'),
+ 'references': self._str_references(),
+ 'examples': self._str_examples(),
+ 'attributes': (
+ self._str_param_list('Attributes', fake_autosummary=True)
if self.attributes_as_param_list
- else self._str_member_list("Attributes")
+ else self._str_member_list('Attributes')
),
- "methods": self._str_member_list("Methods"),
+ 'methods': self._str_member_list('Methods'),
}
- ns = {k: "\n".join(v) for k, v in ns.items()}
+ ns = {k: '\n'.join(v) for k, v in ns.items()}
rendered = self.template.render(**ns)
- return "\n".join(self._str_indent(rendered.split("\n"), indent))
+ return '\n'.join(self._str_indent(rendered.split('\n'), indent))
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
@@ -414,14 +406,14 @@ def get_doc_object(obj, what=None, doc=None, config=None, builder=None):
if config is None:
config = {}
- template_dirs = [os.path.join(os.path.dirname(__file__), "templates")]
+ template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')]
if builder is not None:
template_loader = BuiltinTemplateLoader()
template_loader.init(builder, dirs=template_dirs)
else:
template_loader = FileSystemLoader(template_dirs)
template_env = SandboxedEnvironment(loader=template_loader)
- config["template"] = template_env.get_template("numpydoc_docstring.rst")
+ config['template'] = template_env.get_template('numpydoc_docstring.rst')
return get_doc_object_orig(
obj,
diff --git a/docs/sphinxext/github.py b/docs/sphinxext/github.py
index 4f74b64d648..34c3e4e68d4 100644
--- a/docs/sphinxext/github.py
+++ b/docs/sphinxext/github.py
@@ -20,6 +20,7 @@
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
+
def make_link_node(rawtext, app, type, slug, options):
"""Create a link to a github resource.
@@ -41,13 +42,13 @@ def make_link_node(rawtext, app, type, slug, options):
ref = base + type + '/' + slug + '/'
set_classes(options)
- prefix = "#"
+ prefix = '#'
if type == 'pull':
- prefix = "PR " + prefix
- node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref,
- **options)
+ prefix = 'PR ' + prefix
+ node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref, **options)
return node
+
def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub issue.
@@ -71,24 +72,27 @@ def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
except ValueError:
msg = inliner.reporter.error(
'GitHub issue number must be a number greater than or equal to 1; '
- '"%s" is invalid.' % text, line=lineno)
+ '"%s" is invalid.' % text,
+ line=lineno,
+ )
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
- #app.info('issue %r' % text)
+ # app.info('issue %r' % text)
if 'pull' in name.lower():
category = 'pull'
elif 'issue' in name.lower():
category = 'issues'
else:
msg = inliner.reporter.error(
- 'GitHub roles include "ghpull" and "ghissue", '
- '"%s" is invalid.' % name, line=lineno)
+ 'GitHub roles include "ghpull" and "ghissue", "%s" is invalid.' % name, line=lineno
+ )
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = make_link_node(rawtext, app, category, str(issue_num), options)
return [node], []
+
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub user.
@@ -105,11 +109,12 @@ def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
:param content: The directive content for customization.
"""
app = inliner.document.settings.env.app
- #app.info('user link %r' % text)
+ # app.info('user link %r' % text)
ref = 'https://www.github.com/' + text
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], []
+
def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub commit.
@@ -126,7 +131,7 @@ def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
:param content: The directive content for customization.
"""
app = inliner.document.settings.env.app
- #app.info('user link %r' % text)
+ # app.info('user link %r' % text)
try:
base = app.config.github_project_url
if not base:
@@ -143,7 +148,7 @@ def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
def setup(app):
"""Install the plugin.
-
+
:param app: Sphinx application context.
"""
app.info('Initializing GitHub plugin')
diff --git a/docs/sphinxext/math_dollar.py b/docs/sphinxext/math_dollar.py
index ad415deb905..d223c461493 100644
--- a/docs/sphinxext/math_dollar.py
+++ b/docs/sphinxext/math_dollar.py
@@ -1,5 +1,6 @@
import re
+
def dollars_to_math(source):
r"""
Replace dollar signs with backticks.
@@ -21,8 +22,8 @@ def dollars_to_math(source):
`f(n) = 0 \text{ if $n$ is prime}`
"""
- s = "\n".join(source)
- if s.find("$") == -1:
+ s = '\n'.join(source)
+ if s.find('$') == -1:
return
# This searches for "$blah$" inside a pair of curly braces --
# don't change these, since they're probably coming from a nested
@@ -30,25 +31,27 @@ def dollars_to_math(source):
# string, and later on we substitute the original back.
global _data
_data = {}
+
def repl(matchobj):
global _data
s = matchobj.group(0)
- t = "___XXX_REPL_%d___" % len(_data)
+ t = '___XXX_REPL_%d___' % len(_data)
_data[t] = s
return t
- s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s)
+
+ s = re.sub(r'({[^{}$]*\$[^{}$]*\$[^{}]*})', repl, s)
# matches $...$
- dollars = re.compile(r"(?= Version(
- "2.2.0"
- ):
- fixed_mask_trait += "s"
+ if norm.interface.version and parseversion(norm.interface.version) >= Version('2.2.0'):
+ fixed_mask_trait += 's'
map_brainmask = pe.Node(
- ApplyTransforms(interpolation="Gaussian"), name="map_brainmask", mem_gb=1,
+ ApplyTransforms(interpolation='Gaussian'),
+ name='map_brainmask',
+ mem_gb=1,
)
map_brainmask.inputs.input_image = str(tpl_mask_path)
@@ -329,7 +327,7 @@ def init_brain_extraction_wf(
outside_value=0,
copy_header=True,
),
- name="thr_brainmask",
+ name='thr_brainmask',
)
# Refine INU correction
@@ -344,20 +342,20 @@ def init_brain_extraction_wf(
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
- name="inu_n4_final",
- iterfield=["input_image"],
+ name='inu_n4_final',
+ iterfield=['input_image'],
)
try:
inu_n4_final.inputs.rescale_intensities = True
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
- f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
+ f'({inu_n4_final.interface.version} found.) Please consider upgrading.',
UserWarning,
)
# Apply mask
- apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask")
+ apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask')
# fmt: off
wf.connect([
@@ -386,21 +384,18 @@ def init_brain_extraction_wf(
])
# fmt: on
- wm_tpm = (
- get_template(in_template, label="WM", suffix="probseg", **common_spec) or None
- )
+ wm_tpm = get_template(in_template, label='WM', suffix='probseg', **common_spec) or None
if wm_tpm:
map_wmmask = pe.Node(
- ApplyTransforms(interpolation="Gaussian"), name="map_wmmask", mem_gb=1,
+ ApplyTransforms(interpolation='Gaussian'),
+ name='map_wmmask',
+ mem_gb=1,
)
# Add the brain stem if it is found.
- bstem_tpm = (
- get_template(in_template, label="BS", suffix="probseg", **common_spec)
- or None
- )
+ bstem_tpm = get_template(in_template, label='BS', suffix='probseg', **common_spec) or None
if bstem_tpm:
- full_wm = pe.Node(niu.Function(function=_imsum), name="full_wm")
+ full_wm = pe.Node(niu.Function(function=_imsum), name='full_wm')
full_wm.inputs.op1 = str(wm_tpm)
full_wm.inputs.op2 = str(bstem_tpm)
# fmt: off
@@ -426,17 +421,17 @@ def init_brain_extraction_wf(
if use_laplacian:
lap_tmpl = pe.Node(
- ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
- name="lap_tmpl",
+ ImageMath(operation='Laplacian', op2='1.5 1', copy_header=True),
+ name='lap_tmpl',
)
lap_tmpl.inputs.op1 = tpl_target_path
lap_target = pe.Node(
- ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True),
- name="lap_target",
+ ImageMath(operation='Laplacian', op2='1.5 1', copy_header=True),
+ name='lap_target',
)
- mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
+ mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
mrg_tmpl.inputs.in1 = tpl_target_path
- mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
+ mrg_target = pe.Node(niu.Merge(2), name='mrg_target')
# fmt: off
wf.connect([
(inu_n4, lap_target, [(("output_image", _pop), "op1")]),
@@ -498,12 +493,12 @@ def init_brain_extraction_wf(
def init_atropos_wf(
- name="atropos_wf",
+ name='atropos_wf',
use_random_seed=True,
omp_nthreads=None,
mem_gb=3.0,
padding=10,
- in_segmentation_model=tuple(ATROPOS_MODELS["T1w"].values()),
+ in_segmentation_model=tuple(ATROPOS_MODELS['T1w'].values()),
bspline_fitting_distance=200,
wm_prior=False,
):
@@ -589,30 +584,28 @@ def init_atropos_wf(
"""
wf = pe.Workflow(name)
- out_fields = ["bias_corrected", "bias_image", "out_mask", "out_segm", "out_tpms"]
+ out_fields = ['bias_corrected', 'bias_image', 'out_mask', 'out_segm', 'out_tpms']
inputnode = pe.Node(
- niu.IdentityInterface(
- fields=["in_files", "in_corrected", "in_mask", "wm_prior"]
- ),
- name="inputnode",
+ niu.IdentityInterface(fields=['in_files', 'in_corrected', 'in_mask', 'wm_prior']),
+ name='inputnode',
)
outputnode = pe.Node(
- niu.IdentityInterface(fields=["out_file"] + out_fields), name="outputnode"
+ niu.IdentityInterface(fields=['out_file'] + out_fields), name='outputnode'
)
copy_xform = pe.Node(
- CopyXForm(fields=out_fields), name="copy_xform", run_without_submitting=True
+ CopyXForm(fields=out_fields), name='copy_xform', run_without_submitting=True
)
# Morphological dilation, radius=2
dil_brainmask = pe.Node(
- ImageMath(operation="MD", op2="2", copy_header=True), name="dil_brainmask"
+ ImageMath(operation='MD', op2='2', copy_header=True), name='dil_brainmask'
)
# Get largest connected component
get_brainmask = pe.Node(
- ImageMath(operation="GetLargestComponent", copy_header=True),
- name="get_brainmask",
+ ImageMath(operation='GetLargestComponent', copy_header=True),
+ name='get_brainmask',
)
# Run atropos (core node)
@@ -620,8 +613,8 @@ def init_atropos_wf(
Atropos(
convergence_threshold=0.0,
dimension=3,
- initialization="KMeans",
- likelihood_model="Gaussian",
+ initialization='KMeans',
+ likelihood_model='Gaussian',
mrf_radius=[1, 1, 1],
mrf_smoothing_factor=0.1,
n_iterations=3,
@@ -629,42 +622,40 @@ def init_atropos_wf(
save_posteriors=True,
use_random_seed=use_random_seed,
),
- name="01_atropos",
+ name='01_atropos',
n_procs=omp_nthreads,
mem_gb=mem_gb,
)
# massage outputs
pad_segm = pe.Node(
- ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False),
- name="02_pad_segm",
+ ImageMath(operation='PadImage', op2=f'{padding}', copy_header=False),
+ name='02_pad_segm',
)
pad_mask = pe.Node(
- ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False),
- name="03_pad_mask",
+ ImageMath(operation='PadImage', op2=f'{padding}', copy_header=False),
+ name='03_pad_mask',
)
# Split segmentation in binary masks
sel_labels = pe.Node(
- niu.Function(
- function=_select_labels, output_names=["out_wm", "out_gm", "out_csf"]
- ),
- name="04_sel_labels",
+ niu.Function(function=_select_labels, output_names=['out_wm', 'out_gm', 'out_csf']),
+ name='04_sel_labels',
)
sel_labels.inputs.labels = list(reversed(in_segmentation_model[1:]))
# Select largest components (GM, WM)
# ImageMath ${DIMENSION} ${EXTRACTION_WM} GetLargestComponent ${EXTRACTION_WM}
- get_wm = pe.Node(ImageMath(operation="GetLargestComponent"), name="05_get_wm")
- get_gm = pe.Node(ImageMath(operation="GetLargestComponent"), name="06_get_gm")
+ get_wm = pe.Node(ImageMath(operation='GetLargestComponent'), name='05_get_wm')
+ get_gm = pe.Node(ImageMath(operation='GetLargestComponent'), name='06_get_gm')
# Fill holes and calculate intersection
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM}
- fill_gm = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="07_fill_gm")
+ fill_gm = pe.Node(ImageMath(operation='FillHoles', op2='2'), name='07_fill_gm')
mult_gm = pe.Node(
- MultiplyImages(dimension=3, output_product_image="08_mult_gm.nii.gz"),
- name="08_mult_gm",
+ MultiplyImages(dimension=3, output_product_image='08_mult_gm.nii.gz'),
+ name='08_mult_gm',
)
# MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM}
@@ -673,78 +664,72 @@ def init_atropos_wf(
MultiplyImages(
dimension=3,
second_input=in_segmentation_model[-1],
- output_product_image="09_relabel_wm.nii.gz",
+ output_product_image='09_relabel_wm.nii.gz',
),
- name="09_relabel_wm",
+ name='09_relabel_wm',
)
- me_csf = pe.Node(ImageMath(operation="ME", op2="10"), name="10_me_csf")
+ me_csf = pe.Node(ImageMath(operation='ME', op2='10'), name='10_me_csf')
# ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP}
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM}
# ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM}
- add_gm = pe.Node(ImageMath(operation="addtozero"), name="11_add_gm")
+ add_gm = pe.Node(ImageMath(operation='addtozero'), name='11_add_gm')
relabel_gm = pe.Node(
MultiplyImages(
dimension=3,
second_input=in_segmentation_model[-2],
- output_product_image="12_relabel_gm.nii.gz",
+ output_product_image='12_relabel_gm.nii.gz',
),
- name="12_relabel_gm",
+ name='12_relabel_gm',
)
- add_gm_wm = pe.Node(ImageMath(operation="addtozero"), name="13_add_gm_wm")
+ add_gm_wm = pe.Node(ImageMath(operation='addtozero'), name='13_add_gm_wm')
# Superstep 7
# Split segmentation in binary masks
sel_labels2 = pe.Node(
- niu.Function(function=_select_labels, output_names=["out_gm", "out_wm"]),
- name="14_sel_labels2",
+ niu.Function(function=_select_labels, output_names=['out_gm', 'out_wm']),
+ name='14_sel_labels2',
)
sel_labels2.inputs.labels = in_segmentation_model[2:]
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} ${EXTRACTION_TMP}
- add_7 = pe.Node(ImageMath(operation="addtozero"), name="15_add_7")
+ add_7 = pe.Node(ImageMath(operation='addtozero'), name='15_add_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 2
- me_7 = pe.Node(ImageMath(operation="ME", op2="2"), name="16_me_7")
+ me_7 = pe.Node(ImageMath(operation='ME', op2='2'), name='16_me_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} GetLargestComponent ${EXTRACTION_MASK}
- comp_7 = pe.Node(ImageMath(operation="GetLargestComponent"), name="17_comp_7")
+ comp_7 = pe.Node(ImageMath(operation='GetLargestComponent'), name='17_comp_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 4
- md_7 = pe.Node(ImageMath(operation="MD", op2="4"), name="18_md_7")
+ md_7 = pe.Node(ImageMath(operation='MD', op2='4'), name='18_md_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} FillHoles ${EXTRACTION_MASK} 2
- fill_7 = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="19_fill_7")
+ fill_7 = pe.Node(ImageMath(operation='FillHoles', op2='2'), name='19_fill_7')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} \
# ${EXTRACTION_MASK_PRIOR_WARPED}
- add_7_2 = pe.Node(ImageMath(operation="addtozero"), name="20_add_7_2")
+ add_7_2 = pe.Node(ImageMath(operation='addtozero'), name='20_add_7_2')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 5
- md_7_2 = pe.Node(ImageMath(operation="MD", op2="5"), name="21_md_7_2")
+ md_7_2 = pe.Node(ImageMath(operation='MD', op2='5'), name='21_md_7_2')
# ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 5
- me_7_2 = pe.Node(ImageMath(operation="ME", op2="5"), name="22_me_7_2")
+ me_7_2 = pe.Node(ImageMath(operation='ME', op2='5'), name='22_me_7_2')
# De-pad
depad_mask = pe.Node(
- ImageMath(operation="PadImage", op2="-%d" % padding), name="23_depad_mask"
+ ImageMath(operation='PadImage', op2='-%d' % padding), name='23_depad_mask'
)
depad_segm = pe.Node(
- ImageMath(operation="PadImage", op2="-%d" % padding), name="24_depad_segm"
- )
- depad_gm = pe.Node(
- ImageMath(operation="PadImage", op2="-%d" % padding), name="25_depad_gm"
- )
- depad_wm = pe.Node(
- ImageMath(operation="PadImage", op2="-%d" % padding), name="26_depad_wm"
- )
- depad_csf = pe.Node(
- ImageMath(operation="PadImage", op2="-%d" % padding), name="27_depad_csf"
+ ImageMath(operation='PadImage', op2='-%d' % padding), name='24_depad_segm'
)
+ depad_gm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding), name='25_depad_gm')
+ depad_wm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding), name='26_depad_wm')
+ depad_csf = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding), name='27_depad_csf')
- msk_conform = pe.Node(niu.Function(function=_conform_mask), name="msk_conform")
- merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name="merge_tpms")
+ msk_conform = pe.Node(niu.Function(function=_conform_mask), name='msk_conform')
+ merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name='merge_tpms')
- sel_wm = pe.Node(niu.Select(), name="sel_wm", run_without_submitting=True)
+ sel_wm = pe.Node(niu.Select(), name='sel_wm', run_without_submitting=True)
if not wm_prior:
sel_wm.inputs.index = in_segmentation_model[-1] - 1
copy_xform_wm = pe.Node(
- CopyXForm(fields=["wm_map"]), name="copy_xform_wm", run_without_submitting=True
+ CopyXForm(fields=['wm_map']), name='copy_xform_wm', run_without_submitting=True
)
# Refine INU correction
@@ -759,8 +744,8 @@ def init_atropos_wf(
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
- name="inu_n4_final",
- iterfield=["input_image"],
+ name='inu_n4_final',
+ iterfield=['input_image'],
)
try:
@@ -768,12 +753,12 @@ def init_atropos_wf(
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
- f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
+ f'({inu_n4_final.interface.version} found.) Please consider upgrading.',
UserWarning,
)
# Apply mask
- apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask")
+ apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask')
# fmt: off
wf.connect([
@@ -850,12 +835,12 @@ def _argmax(in_dice):
match_wm = pe.Node(
niu.Function(function=_matchlen),
- name="match_wm",
+ name='match_wm',
run_without_submitting=True,
)
- overlap = pe.Node(FuzzyOverlap(), name="overlap", run_without_submitting=True)
+ overlap = pe.Node(FuzzyOverlap(), name='overlap', run_without_submitting=True)
- apply_wm_prior = pe.Node(niu.Function(function=_improd), name="apply_wm_prior")
+ apply_wm_prior = pe.Node(niu.Function(function=_improd), name='apply_wm_prior')
# fmt: off
wf.disconnect([
@@ -880,9 +865,9 @@ def init_n4_only_wf(
atropos_model=None,
atropos_refine=True,
atropos_use_random_seed=True,
- bids_suffix="T1w",
+ bids_suffix='T1w',
mem_gb=3.0,
- name="n4_only_wf",
+ name='n4_only_wf',
omp_nthreads=None,
):
"""
@@ -956,26 +941,24 @@ def init_n4_only_wf(
wf = pe.Workflow(name)
- inputnode = pe.Node(
- niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode"
- )
+ inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']), name='inputnode')
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
- "out_file",
- "out_mask",
- "bias_corrected",
- "bias_image",
- "out_segm",
- "out_tpms",
+ 'out_file',
+ 'out_mask',
+ 'bias_corrected',
+ 'bias_image',
+ 'out_segm',
+ 'out_tpms',
]
),
- name="outputnode",
+ name='outputnode',
)
# Create brain mask
- thr_brainmask = pe.Node(Binarize(thresh_low=2), name="binarize")
+ thr_brainmask = pe.Node(Binarize(thresh_low=2), name='binarize')
# INU correction
inu_n4_final = pe.MapNode(
@@ -989,8 +972,8 @@ def init_n4_only_wf(
bspline_fitting_distance=200,
),
n_procs=omp_nthreads,
- name="inu_n4_final",
- iterfield=["input_image"],
+ name='inu_n4_final',
+ iterfield=['input_image'],
)
# Check ANTs version
@@ -999,7 +982,7 @@ def init_n4_only_wf(
except ValueError:
warn(
"N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 "
- f"({inu_n4_final.interface.version} found.) Please consider upgrading.",
+ f'({inu_n4_final.interface.version} found.) Please consider upgrading.',
UserWarning,
)
@@ -1057,11 +1040,11 @@ def _select_labels(in_segm, labels):
cwd = getcwd()
nii = nb.load(in_segm)
- label_data = np.asanyarray(nii.dataobj).astype("uint8")
+ label_data = np.asanyarray(nii.dataobj).astype('uint8')
for label in labels:
newnii = nii.__class__(np.uint8(label_data == label), nii.affine, nii.header)
- newnii.set_data_dtype("uint8")
- out_file = fname_presuffix(in_segm, suffix="_class-%02d" % label, newpath=cwd)
+ newnii.set_data_dtype('uint8')
+ out_file = fname_presuffix(in_segm, suffix='_class-%02d' % label, newpath=cwd)
newnii.to_filename(out_file)
out_files.append(out_file)
return out_files
@@ -1077,7 +1060,7 @@ def _conform_mask(in_mask, in_reference):
ref = nb.load(in_reference)
nii = nb.load(in_mask)
hdr = nii.header.copy()
- hdr.set_data_dtype("int16")
+ hdr.set_data_dtype('int16')
hdr.set_slope_inter(1, 0)
qform, qcode = ref.header.get_qform(coded=True)
@@ -1088,15 +1071,15 @@ def _conform_mask(in_mask, in_reference):
if scode is not None:
hdr.set_sform(sform, int(scode))
- if "_maths" in in_mask: # Cut the name at first _maths occurrence
- ext = "".join(Path(in_mask).suffixes)
+ if '_maths' in in_mask: # Cut the name at first _maths occurrence
+ ext = ''.join(Path(in_mask).suffixes)
basename = Path(in_mask).name
- in_mask = basename.split("_maths")[0] + ext
+ in_mask = basename.split('_maths')[0] + ext
- out_file = fname_presuffix(in_mask, suffix="_mask", newpath=str(Path()))
- nii.__class__(
- np.asanyarray(nii.dataobj).astype("int16"), ref.affine, hdr
- ).to_filename(out_file)
+ out_file = fname_presuffix(in_mask, suffix='_mask', newpath=str(Path()))
+ nii.__class__(np.asanyarray(nii.dataobj).astype('int16'), ref.affine, hdr).to_filename(
+ out_file
+ )
return out_file
@@ -1109,14 +1092,14 @@ def _imsum(op1, op2, out_file=None):
im1 = nb.load(op1)
- data = im1.get_fdata(dtype="float32") + nb.load(op2).get_fdata(dtype="float32")
+ data = im1.get_fdata(dtype='float32') + nb.load(op2).get_fdata(dtype='float32')
data /= data.max()
nii = nb.Nifti1Image(data, im1.affine, im1.header)
if out_file is None:
from pathlib import Path
- out_file = str((Path() / "summap.nii.gz").absolute())
+ out_file = str((Path() / 'summap.nii.gz').absolute())
nii.to_filename(out_file)
return out_file
@@ -1127,7 +1110,7 @@ def _improd(op1, op2, in_mask, out_file=None):
im1 = nb.load(op1)
- data = im1.get_fdata(dtype="float32") * nb.load(op2).get_fdata(dtype="float32")
+ data = im1.get_fdata(dtype='float32') * nb.load(op2).get_fdata(dtype='float32')
mskdata = nb.load(in_mask).get_fdata() > 0
data[~mskdata] = 0
data[data < 0] = 0
@@ -1138,7 +1121,7 @@ def _improd(op1, op2, in_mask, out_file=None):
if out_file is None:
from pathlib import Path
- out_file = str((Path() / "prodmap.nii.gz").absolute())
+ out_file = str((Path() / 'prodmap.nii.gz').absolute())
nii.to_filename(out_file)
return out_file
diff --git a/niworkflows/anat/coregistration.py b/niworkflows/anat/coregistration.py
index 557ec856a11..f3eabf18a33 100644
--- a/niworkflows/anat/coregistration.py
+++ b/niworkflows/anat/coregistration.py
@@ -21,20 +21,21 @@
# https://www.nipreps.org/community/licensing/
#
"""Workflow for the registration of EPI datasets to anatomical space via reconstructed surfaces."""
+
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype import logging
-LOGGER = logging.getLogger("workflow")
+LOGGER = logging.getLogger('workflow')
def init_bbreg_wf(
*,
omp_nthreads,
debug=False,
- epi2t1w_init="register",
+ epi2t1w_init='register',
epi2t1w_dof=6,
- name="bbreg_wf",
+ name='bbreg_wf',
use_bbr=None,
):
"""
@@ -118,46 +119,44 @@ def init_bbreg_wf(
`bbregister` (FreeSurfer) which implements boundary-based registration [@bbr].
Co-registration was configured with {dof} degrees of freedom{reason}.
""".format(
- dof={6: "six", 9: "nine", 12: "twelve"}[epi2t1w_dof],
- reason=""
+ dof={6: 'six', 9: 'nine', 12: 'twelve'}[epi2t1w_dof],
+ reason=''
if epi2t1w_dof == 6
- else "to account for distortions remaining in the EPI reference",
+ else 'to account for distortions remaining in the EPI reference',
)
inputnode = pe.Node(
niu.IdentityInterface(
[
- "in_file",
- "fsnative2t1w_xfm",
- "subjects_dir",
- "subject_id", # BBRegister
- "t1w_dseg", # FLIRT BBR
- "t1w_brain", # FLIRT BBR
+ 'in_file',
+ 'fsnative2t1w_xfm',
+ 'subjects_dir',
+ 'subject_id', # BBRegister
+ 't1w_dseg', # FLIRT BBR
+ 't1w_brain', # FLIRT BBR
]
),
- name="inputnode",
+ name='inputnode',
)
outputnode = pe.Node(
- niu.IdentityInterface(
- ["itk_epi_to_t1w", "itk_t1w_to_epi", "out_report", "fallback"]
- ),
- name="outputnode",
+ niu.IdentityInterface(['itk_epi_to_t1w', 'itk_t1w_to_epi', 'out_report', 'fallback']),
+ name='outputnode',
)
- if epi2t1w_init not in ("register", "header"):
- raise ValueError(f"Unknown EPI-T1w initialization option: {epi2t1w_init}")
+ if epi2t1w_init not in ('register', 'header'):
+ raise ValueError(f'Unknown EPI-T1w initialization option: {epi2t1w_init}')
# For now make BBR unconditional - in the future, we can fall back to identity,
# but adding the flexibility without testing seems a bit dangerous
- if epi2t1w_init == "header":
+ if epi2t1w_init == 'header':
if use_bbr is False:
- raise ValueError("Cannot disable BBR and use header registration")
+ raise ValueError('Cannot disable BBR and use header registration')
if use_bbr is None:
- LOGGER.warning("Initializing BBR with header; affine fallback disabled")
+ LOGGER.warning('Initializing BBR with header; affine fallback disabled')
use_bbr = True
- merge_ltas = pe.Node(niu.Merge(2), name="merge_ltas", run_without_submitting=True)
- concat_xfm = pe.Node(ConcatenateXFMs(inverse=True), name="concat_xfm")
+ merge_ltas = pe.Node(niu.Merge(2), name='merge_ltas', run_without_submitting=True)
+ concat_xfm = pe.Node(ConcatenateXFMs(inverse=True), name='concat_xfm')
# fmt:off
workflow.connect([
@@ -172,10 +171,8 @@ def init_bbreg_wf(
if debug is True:
from ..interfaces.nibabel import RegridToZooms
- downsample = pe.Node(
- RegridToZooms(zooms=(4.0, 4.0, 4.0), smooth=True), name="downsample"
- )
- workflow.connect([(inputnode, downsample, [("in_file", "in_file")])])
+ downsample = pe.Node(RegridToZooms(zooms=(4.0, 4.0, 4.0), smooth=True), name='downsample')
+ workflow.connect([(inputnode, downsample, [('in_file', 'in_file')])])
mri_coreg = pe.Node(
MRICoregRPT(
@@ -185,13 +182,13 @@ def init_bbreg_wf(
linmintol=0.01,
generate_report=not use_bbr,
),
- name="mri_coreg",
+ name='mri_coreg',
n_procs=omp_nthreads,
mem_gb=5,
)
# Use mri_coreg
- if epi2t1w_init == "register":
+ if epi2t1w_init == 'register':
# fmt:off
workflow.connect([
(inputnode, mri_coreg, [("subjects_dir", "subjects_dir"),
@@ -200,9 +197,9 @@ def init_bbreg_wf(
# fmt:on
if not debug:
- workflow.connect(inputnode, "in_file", mri_coreg, "source_file")
+ workflow.connect(inputnode, 'in_file', mri_coreg, 'source_file')
else:
- workflow.connect(downsample, "out_file", mri_coreg, "source_file")
+ workflow.connect(downsample, 'out_file', mri_coreg, 'source_file')
# Short-circuit workflow building, use initial registration
if use_bbr is False:
@@ -219,12 +216,12 @@ def init_bbreg_wf(
bbregister = pe.Node(
BBRegisterRPT(
dof=epi2t1w_dof,
- contrast_type="t2",
+ contrast_type='t2',
registered_file=True,
out_lta_file=True,
generate_report=True,
),
- name="bbregister",
+ name='bbregister',
mem_gb=12,
)
@@ -236,14 +233,14 @@ def init_bbreg_wf(
# fmt:on
if not debug:
- workflow.connect(inputnode, "in_file", bbregister, "source_file")
+ workflow.connect(inputnode, 'in_file', bbregister, 'source_file')
else:
- workflow.connect(downsample, "out_file", bbregister, "source_file")
+ workflow.connect(downsample, 'out_file', bbregister, 'source_file')
- if epi2t1w_init == "header":
- bbregister.inputs.init = "header"
+ if epi2t1w_init == 'header':
+ bbregister.inputs.init = 'header'
else:
- workflow.connect([(mri_coreg, bbregister, [("out_lta_file", "init_reg_file")])])
+ workflow.connect([(mri_coreg, bbregister, [('out_lta_file', 'init_reg_file')])])
# Short-circuit workflow building, use boundary-based registration
if use_bbr is True:
@@ -258,22 +255,16 @@ def init_bbreg_wf(
return workflow
# Only reach this point if epi2t1w_init is "register" and use_bbr is None
- transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name="transforms")
- reports = pe.Node(niu.Merge(2), run_without_submitting=True, name="reports")
+ transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name='transforms')
+ reports = pe.Node(niu.Merge(2), run_without_submitting=True, name='reports')
lta_ras2ras = pe.MapNode(
- LTAConvert(out_lta=True), iterfield=["in_lta"], name="lta_ras2ras", mem_gb=2
- )
- compare_transforms = pe.Node(
- niu.Function(function=compare_xforms), name="compare_transforms"
+ LTAConvert(out_lta=True), iterfield=['in_lta'], name='lta_ras2ras', mem_gb=2
)
+ compare_transforms = pe.Node(niu.Function(function=compare_xforms), name='compare_transforms')
- select_transform = pe.Node(
- niu.Select(), run_without_submitting=True, name="select_transform"
- )
- select_report = pe.Node(
- niu.Select(), run_without_submitting=True, name="select_report"
- )
+ select_transform = pe.Node(niu.Select(), run_without_submitting=True, name='select_transform')
+ select_report = pe.Node(niu.Select(), run_without_submitting=True, name='select_report')
# fmt:off
workflow.connect([
diff --git a/niworkflows/anat/freesurfer.py b/niworkflows/anat/freesurfer.py
index aba48ede7e0..8109706a368 100644
--- a/niworkflows/anat/freesurfer.py
+++ b/niworkflows/anat/freesurfer.py
@@ -35,9 +35,7 @@
from ..interfaces.surf import NormalizeSurf
-def init_gifti_surface_wf(
- name="gifti_surface_wf", subjects_dir=getenv("SUBJECTS_DIR", None)
-):
+def init_gifti_surface_wf(name='gifti_surface_wf', subjects_dir=getenv('SUBJECTS_DIR', None)):
"""
Build a Nipype workflow to prepare GIFTI surfaces from FreeSurfer.
@@ -85,49 +83,45 @@ def init_gifti_surface_wf(
"""
if subjects_dir is None:
- raise RuntimeError("``$SUBJECTS_DIR`` must be set")
+ raise RuntimeError('``$SUBJECTS_DIR`` must be set')
workflow = pe.Workflow(name=name)
- inputnode = pe.Node(
- niu.IdentityInterface(["in_t1w", "subject_id"]), name="inputnode"
- )
+ inputnode = pe.Node(niu.IdentityInterface(['in_t1w', 'subject_id']), name='inputnode')
outputnode = pe.Node(
- niu.IdentityInterface(["surfaces", "surf_norm", "fsnative_to_t1w_xfm"]),
- name="outputnode",
+ niu.IdentityInterface(['surfaces', 'surf_norm', 'fsnative_to_t1w_xfm']),
+ name='outputnode',
)
fssource = pe.Node(
nio.FreeSurferSource(subjects_dir=subjects_dir),
- name="fssource",
+ name='fssource',
run_without_submitting=True,
)
fsnative_2_t1_xfm = pe.Node(
- RobustRegister(auto_sens=True, est_int_scale=True), name="fsnative_2_t1_xfm"
+ RobustRegister(auto_sens=True, est_int_scale=True), name='fsnative_2_t1_xfm'
)
midthickness = pe.MapNode(
- MakeMidthickness(thickness=True, distance=0.5, out_name="midthickness"),
- iterfield="in_file",
- name="midthickness",
+ MakeMidthickness(thickness=True, distance=0.5, out_name='midthickness'),
+ iterfield='in_file',
+ name='midthickness',
)
save_midthickness = pe.Node(
nio.DataSink(parameterization=False, base_directory=subjects_dir),
- name="save_midthickness",
+ name='save_midthickness',
run_without_submitting=True,
)
surface_list = pe.Node(
niu.Merge(4, ravel_inputs=True),
- name="surface_list",
+ name='surface_list',
run_without_submitting=True,
)
- fs_2_gii = pe.MapNode(
- fs.MRIsConvert(out_datatype="gii"), iterfield="in_file", name="fs_2_gii"
- )
- fix_surfs = pe.MapNode(NormalizeSurf(), iterfield="in_file", name="fix_surfs")
+ fs_2_gii = pe.MapNode(fs.MRIsConvert(out_datatype='gii'), iterfield='in_file', name='fs_2_gii')
+ fix_surfs = pe.MapNode(NormalizeSurf(), iterfield='in_file', name='fix_surfs')
# fmt: off
workflow.connect([
diff --git a/niworkflows/anat/skullstrip.py b/niworkflows/anat/skullstrip.py
index 7f2f54f39ea..88ea10acd58 100644
--- a/niworkflows/anat/skullstrip.py
+++ b/niworkflows/anat/skullstrip.py
@@ -21,13 +21,14 @@
# https://www.nipreps.org/community/licensing/
#
"""Brain extraction workflows."""
+
from nipype.interfaces import afni, utility as niu
from nipype.pipeline import engine as pe
from ..interfaces.nibabel import Binarize
from ..interfaces.fixes import FixN4BiasFieldCorrection as N4BiasFieldCorrection
-def afni_wf(name="AFNISkullStripWorkflow", unifize=False, n4_nthreads=1):
+def afni_wf(name='AFNISkullStripWorkflow', unifize=False, n4_nthreads=1):
"""
Create a skull-stripping workflow based on AFNI's tools.
@@ -72,12 +73,10 @@ def afni_wf(name="AFNISkullStripWorkflow", unifize=False, n4_nthreads=1):
"""
workflow = pe.Workflow(name=name)
- inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
+ inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode')
outputnode = pe.Node(
- niu.IdentityInterface(
- fields=["bias_corrected", "out_file", "out_mask", "bias_image"]
- ),
- name="outputnode",
+ niu.IdentityInterface(fields=['bias_corrected', 'out_file', 'out_mask', 'bias_image']),
+ name='outputnode',
)
inu_n4 = pe.Node(
@@ -89,22 +88,20 @@ def afni_wf(name="AFNISkullStripWorkflow", unifize=False, n4_nthreads=1):
copy_header=True,
),
n_procs=n4_nthreads,
- name="inu_n4",
+ name='inu_n4',
)
- sstrip = pe.Node(afni.SkullStrip(outputtype="NIFTI_GZ"), name="skullstrip")
+ sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip')
sstrip_orig_vol = pe.Node(
- afni.Calc(expr="a*step(b)", outputtype="NIFTI_GZ"), name="sstrip_orig_vol"
+ afni.Calc(expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol'
)
- binarize = pe.Node(Binarize(thresh_low=0.0), name="binarize")
+ binarize = pe.Node(Binarize(thresh_low=0.0), name='binarize')
if unifize:
# Add two unifize steps, pre- and post- skullstripping.
- inu_uni_0 = pe.Node(
- afni.Unifize(outputtype="NIFTI_GZ"), name="unifize_pre_skullstrip"
- )
+ inu_uni_0 = pe.Node(afni.Unifize(outputtype='NIFTI_GZ'), name='unifize_pre_skullstrip')
inu_uni_1 = pe.Node(
- afni.Unifize(gm=True, outputtype="NIFTI_GZ"), name="unifize_post_skullstrip"
+ afni.Unifize(gm=True, outputtype='NIFTI_GZ'), name='unifize_post_skullstrip'
)
# fmt: off
workflow.connect([
diff --git a/niworkflows/cli/boldref.py b/niworkflows/cli/boldref.py
index c329680f00f..8dce5e20ca4 100644
--- a/niworkflows/cli/boldref.py
+++ b/niworkflows/cli/boldref.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Run the BOLD reference+mask workflow"""
+
import os
@@ -32,37 +33,37 @@ def get_parser():
parser = ArgumentParser(
description="""NiWorkflows Utilities""", formatter_class=RawTextHelpFormatter
)
- subparsers = parser.add_subparsers(dest="command")
+ subparsers = parser.add_subparsers(dest='command')
be_parser = subparsers.add_parser(
- "brain-extract",
+ 'brain-extract',
formatter_class=RawDescriptionHelpFormatter,
description="""Execute brain extraction and related operations (e.g., \
intensity nonuniformity correction, robust averaging, etc.)""",
)
- be_parser.add_argument("input_file", action="store", help="the input file")
- be_parser.add_argument("out_path", action="store", help="the output directory")
+ be_parser.add_argument('input_file', action='store', help='the input file')
+ be_parser.add_argument('out_path', action='store', help='the output directory')
be_parser.add_argument(
- "--modality",
- "-m",
- action="store",
- choices=("bold", "t1w"),
- default="bold",
- help="the input file",
+ '--modality',
+ '-m',
+ action='store',
+ choices=('bold', 't1w'),
+ default='bold',
+ help='the input file',
)
parser.add_argument(
- "--omp-nthreads",
- action="store",
+ '--omp-nthreads',
+ action='store',
type=int,
default=os.cpu_count(),
- help="Number of CPUs available to individual processes",
+ help='Number of CPUs available to individual processes',
)
parser.add_argument(
- "--nprocs",
- action="store",
+ '--nprocs',
+ action='store',
type=int,
default=os.cpu_count(),
- help="Number of processes that may run in parallel",
+ help='Number of processes that may run in parallel',
)
return parser
@@ -76,20 +77,22 @@ def main(args=None):
opts = get_parser().parse_args(args=args)
wf = init_bold_reference_wf(
- opts.omp_nthreads, gen_report=True, name=hash_infile(opts.input_file),
+ opts.omp_nthreads,
+ gen_report=True,
+ name=hash_infile(opts.input_file),
)
wf.inputs.inputnode.bold_file = opts.input_file
wf.base_dir = os.getcwd()
plugin = {
- "plugin": "MultiProc",
- "plugin_args": {"nprocs": opts.nprocs},
+ 'plugin': 'MultiProc',
+ 'plugin_args': {'nprocs': opts.nprocs},
}
if opts.nprocs < 2:
- plugin = {"plugin": "Linear"}
+ plugin = {'plugin': 'Linear'}
wf.run(**plugin)
-if __name__ == "__main__":
+if __name__ == '__main__':
from sys import argv
main(args=argv[1:])
diff --git a/niworkflows/conftest.py b/niworkflows/conftest.py
index 75a19fe26db..747ffb6f166 100644
--- a/niworkflows/conftest.py
+++ b/niworkflows/conftest.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""py.test configuration"""
+
import os
from sys import version_info
from pathlib import Path
@@ -44,16 +45,16 @@
def find_resource_or_skip(resource):
pathlike = load_resource(resource)
if not pathlike.exists():
- pytest.skip(f"Missing resource {resource}; run this test from a source repository")
+ pytest.skip(f'Missing resource {resource}; run this test from a source repository')
return pathlike
-@pytest.fixture(scope="session", autouse=True)
+@pytest.fixture(scope='session', autouse=True)
def legacy_printoptions():
from packaging.version import Version
- if Version(np.__version__) >= Version("1.22"):
- np.set_printoptions(legacy="1.21")
+ if Version(np.__version__) >= Version('1.22'):
+ np.set_printoptions(legacy='1.21')
@pytest.fixture(autouse=True)
@@ -61,30 +62,30 @@ def add_np(doctest_namespace):
from .utils.bids import collect_data
from .testing import data_dir, data_dir_canary
- doctest_namespace["PY_VERSION"] = version_info
- doctest_namespace["np"] = np
- doctest_namespace["nb"] = nb
- doctest_namespace["pd"] = pd
- doctest_namespace["os"] = os
- doctest_namespace["pytest"] = pytest
- doctest_namespace["importlib_resources"] = importlib_resources
- doctest_namespace["find_resource_or_skip"] = find_resource_or_skip
- doctest_namespace["Path"] = Path
- doctest_namespace["datadir"] = data_dir
- doctest_namespace["data_dir_canary"] = data_dir_canary
- doctest_namespace["bids_collect_data"] = collect_data
- doctest_namespace["test_data"] = load_resource('tests/data')
+ doctest_namespace['PY_VERSION'] = version_info
+ doctest_namespace['np'] = np
+ doctest_namespace['nb'] = nb
+ doctest_namespace['pd'] = pd
+ doctest_namespace['os'] = os
+ doctest_namespace['pytest'] = pytest
+ doctest_namespace['importlib_resources'] = importlib_resources
+ doctest_namespace['find_resource_or_skip'] = find_resource_or_skip
+ doctest_namespace['Path'] = Path
+ doctest_namespace['datadir'] = data_dir
+ doctest_namespace['data_dir_canary'] = data_dir_canary
+ doctest_namespace['bids_collect_data'] = collect_data
+ doctest_namespace['test_data'] = load_resource('tests/data')
tmpdir = tempfile.TemporaryDirectory()
- doctest_namespace["tmpdir"] = tmpdir.name
+ doctest_namespace['tmpdir'] = tmpdir.name
- nifti_fname = str(Path(tmpdir.name) / "test.nii.gz")
- nii = nb.Nifti1Image(np.random.random((5, 5)).astype("f4"), np.eye(4))
+ nifti_fname = str(Path(tmpdir.name) / 'test.nii.gz')
+ nii = nb.Nifti1Image(np.random.random((5, 5)).astype('f4'), np.eye(4))
nii.header.set_qform(np.diag([1, 1, 1, 1]), code=1)
nii.header.set_sform(np.diag([-1, 1, 1, 1]), code=1)
nii.to_filename(nifti_fname)
- doctest_namespace["nifti_fname"] = nifti_fname
+ doctest_namespace['nifti_fname'] = nifti_fname
cwd = os.getcwd()
os.chdir(tmpdir.name)
@@ -96,23 +97,27 @@ def add_np(doctest_namespace):
@pytest.fixture
def testdata_dir():
from .testing import data_dir
+
return data_dir
@pytest.fixture
def ds000030_dir():
from .testing import test_data_env, data_env_canary
+
data_env_canary()
- return Path(test_data_env) / "ds000030"
+ return Path(test_data_env) / 'ds000030'
@pytest.fixture
def workdir():
from .testing import test_workdir
+
return None if test_workdir is None else Path(test_workdir)
@pytest.fixture
def outdir():
from .testing import test_output_dir
+
return None if test_output_dir is None else Path(test_output_dir)
diff --git a/niworkflows/engine/__init__.py b/niworkflows/engine/__init__.py
index c8a53565712..c1d0e12fd85 100644
--- a/niworkflows/engine/__init__.py
+++ b/niworkflows/engine/__init__.py
@@ -4,4 +4,5 @@
"""
The fmriprep reporting engine for visual assessment
"""
+
from .workflows import LiterateWorkflow as Workflow
diff --git a/niworkflows/engine/plugin.py b/niworkflows/engine/plugin.py
index 14c2547f0c3..407c215476b 100644
--- a/niworkflows/engine/plugin.py
+++ b/niworkflows/engine/plugin.py
@@ -58,10 +58,10 @@ def run_node(node, updatehash, taskid):
# Try and execute the node via node.run()
try:
- result["result"] = node.run(updatehash=updatehash)
+ result['result'] = node.run(updatehash=updatehash)
except: # noqa: E722, intendedly catch all here
- result["traceback"] = format_exception(*sys.exc_info())
- result["result"] = node.result
+ result['traceback'] = format_exception(*sys.exc_info())
+ result['result'] = node.result
# Return the result dictionary
return result
@@ -76,7 +76,7 @@ def __init__(self, plugin_args=None):
plugin_args = {}
self.plugin_args = plugin_args
self._config = None
- self._status_callback = plugin_args.get("status_callback")
+ self._status_callback = plugin_args.get('status_callback')
def run(self, graph, config, updatehash=False):
"""
@@ -143,7 +143,7 @@ def __init__(self, plugin_args=None):
self.proc_done = None
self.proc_pending = None
self.pending_tasks = []
- self.max_jobs = self.plugin_args.get("max_jobs", None)
+ self.max_jobs = self.plugin_args.get('max_jobs', None)
def _prerun_check(self, graph):
"""Stub method to validate/massage graph and nodes before running."""
@@ -156,7 +156,7 @@ def run(self, graph, config, updatehash=False):
import numpy as np
self._config = config
- poll_sleep_secs = float(config["execution"]["poll_sleep_duration"])
+ poll_sleep_secs = float(config['execution']['poll_sleep_duration'])
self._prerun_check(graph)
# Generate appropriate structures for worker-manager model
@@ -180,9 +180,9 @@ def run(self, graph, config, updatehash=False):
errors.append(exc)
else:
if result:
- if result["traceback"]:
+ if result['traceback']:
notrun.append(self._clean_queue(jobid, graph, result=result))
- errors.append("".join(result["traceback"]))
+ errors.append(''.join(result['traceback']))
else:
self._task_finished_cb(jobid)
self._remove_node_dirs()
@@ -214,7 +214,7 @@ def run(self, graph, config, updatehash=False):
if len(errors) > 1:
error, cause = (
- RuntimeError(f"{len(errors)} raised. Re-raising first."),
+ RuntimeError(f'{len(errors)} raised. Re-raising first.'),
error,
)
@@ -231,8 +231,8 @@ def _report_crash(self, node, result=None):
tb = None
if result is not None:
- node._result = result["result"]
- tb = result["traceback"]
+ node._result = result['result']
+ tb = result['traceback']
node._traceback = tb
return report_crash(node, traceback=tb)
@@ -241,16 +241,16 @@ def _clear_task(self, taskid):
def _clean_queue(self, jobid, graph, result=None):
if self._status_callback:
- self._status_callback(self.procs[jobid], "exception")
+ self._status_callback(self.procs[jobid], 'exception')
if result is None:
result = {
- "result": None,
- "traceback": "\n".join(format_exception(*sys.exc_info())),
+ 'result': None,
+ 'traceback': '\n'.join(format_exception(*sys.exc_info())),
}
crashfile = self._report_crash(self.procs[jobid], result=result)
- if str2bool(self._config["execution"]["stop_on_first_crash"]):
- raise RuntimeError("".join(result["traceback"]))
+ if str2bool(self._config['execution']['stop_on_first_crash']):
+ raise RuntimeError(''.join(result['traceback']))
if jobid in self.mapnodesubids:
# remove current jobid
self.proc_pending[jobid] = False
@@ -279,11 +279,11 @@ def _submit_mapnode(self, jobid):
self.procs.extend(mapnodesubids)
self.depidx = ssp.vstack(
(self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))),
- "lil",
+ 'lil',
)
self.depidx = ssp.hstack(
(self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))),
- "lil",
+ 'lil',
)
self.depidx[-numnodes:, jobid] = 1
self.proc_done = np.concatenate((self.proc_done, np.zeros(numnodes, dtype=bool)))
@@ -291,7 +291,7 @@ def _submit_mapnode(self, jobid):
return False
def _local_hash_check(self, jobid, graph):
- if not str2bool(self.procs[jobid].config["execution"]["local_hash_check"]):
+ if not str2bool(self.procs[jobid].config['execution']['local_hash_check']):
return False
try:
@@ -319,7 +319,7 @@ def _task_finished_cb(self, jobid, cached=False):
This is called when a job is completed.
"""
if self._status_callback:
- self._status_callback(self.procs[jobid], "end")
+ self._status_callback(self.procs[jobid], 'end')
# Update job and worker queues
self.proc_pending[jobid] = False
# update the job dependency structure
@@ -342,7 +342,7 @@ def _generate_dependency_list(self, graph):
from networkx import to_scipy_sparse_matrix as to_scipy_sparse_array
self.procs, _ = topological_sort(graph)
- self.depidx = to_scipy_sparse_array(graph, nodelist=self.procs, format="lil")
+ self.depidx = to_scipy_sparse_array(graph, nodelist=self.procs, format='lil')
self.refidx = self.depidx.astype(int)
self.proc_done = np.zeros(len(self.procs), dtype=bool)
self.proc_pending = np.zeros(len(self.procs), dtype=bool)
@@ -366,7 +366,7 @@ def _remove_node_dirs(self):
import numpy as np
from shutil import rmtree
- if str2bool(self._config["execution"]["remove_node_directories"]):
+ if str2bool(self._config['execution']['remove_node_directories']):
indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0]
for idx in indices:
if idx in self.mapnodesubids:
@@ -430,7 +430,7 @@ def __init__(self, pool=None, plugin_args=None):
# Retrieve a nipreps-style configuration object
try:
- config = plugin_args["app_config"]
+ config = plugin_args['app_config']
except (KeyError, TypeError):
from types import SimpleNamespace
from nipype.utils.profiler import get_system_total_memory_gb
@@ -447,15 +447,15 @@ def __init__(self, pool=None, plugin_args=None):
)
# Read in options or set defaults.
- self.processors = self.plugin_args.get("n_procs", mp.cpu_count())
+ self.processors = self.plugin_args.get('n_procs', mp.cpu_count())
self.memory_gb = self.plugin_args.get(
- "memory_gb", # Allocate 90% of system memory
+ 'memory_gb', # Allocate 90% of system memory
config.environment.total_memory * 0.9,
)
- self.raise_insufficient = self.plugin_args.get("raise_insufficient", False)
+ self.raise_insufficient = self.plugin_args.get('raise_insufficient', False)
# Instantiate different thread pools for non-daemon processes
- mp_context = mp.get_context(self.plugin_args.get("mp_context"))
+ mp_context = mp.get_context(self.plugin_args.get('mp_context'))
self.pool = pool or ProcessPoolExecutor(
max_workers=self.processors,
initializer=config._process_initializer,
@@ -467,7 +467,7 @@ def __init__(self, pool=None, plugin_args=None):
def _async_callback(self, args):
result = args.result()
- self._taskresult[result["taskid"]] = result
+ self._taskresult[result['taskid']] = result
def _get_result(self, taskid):
return self._taskresult.get(taskid)
@@ -479,8 +479,8 @@ def _submit_job(self, node, updatehash=False):
self._taskid += 1
# Don't allow streaming outputs
- if getattr(node.interface, "terminal_output", "") == "stream":
- node.interface.terminal_output = "allatonce"
+ if getattr(node.interface, 'terminal_output', '') == 'stream':
+ node.interface.terminal_output = 'allatonce'
result_future = self.pool.submit(run_node, node, updatehash, self._taskid)
result_future.add_done_callback(self._async_callback)
@@ -501,7 +501,7 @@ def _prerun_check(self, graph):
np.any(np.array(tasks_mem_gb) > self.memory_gb)
or np.any(np.array(tasks_num_th) > self.processors)
):
- raise RuntimeError("Insufficient resources available for job")
+ raise RuntimeError('Insufficient resources available for job')
def _postrun_check(self):
self.pool.shutdown()
@@ -545,7 +545,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
if len(jobids) + len(self.pending_tasks) == 0:
return
- jobids = self._sort_jobs(jobids, scheduler=self.plugin_args.get("scheduler"))
+ jobids = self._sort_jobs(jobids, scheduler=self.plugin_args.get('scheduler'))
# Run garbage collector before potentially submitting jobs
gc.collect()
@@ -553,13 +553,13 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
# Submit jobs
for jobid in jobids:
# First expand mapnodes
- if self.procs[jobid].__class__.__name__ == "MapNode":
+ if self.procs[jobid].__class__.__name__ == 'MapNode':
try:
num_subnodes = self.procs[jobid].num_subnodes()
except Exception:
traceback = format_exception(*sys.exc_info())
self._clean_queue(
- jobid, graph, result={"result": None, "traceback": traceback}
+ jobid, graph, result={'result': None, 'traceback': traceback}
)
self.proc_pending[jobid] = False
continue
@@ -593,7 +593,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
except Exception:
traceback = format_exception(*sys.exc_info())
self._clean_queue(
- jobid, graph, result={"result": None, "traceback": traceback}
+ jobid, graph, result={'result': None, 'traceback': traceback}
)
# Release resources
@@ -611,7 +611,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
# Task should be submitted to workers
# Send job to task manager and add to pending tasks
if self._status_callback:
- self._status_callback(self.procs[jobid], "start")
+ self._status_callback(self.procs[jobid], 'start')
tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash)
if tid is None:
self.proc_done[jobid] = False
@@ -621,8 +621,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None):
# Display stats next loop
self._stats = None
- def _sort_jobs(self, jobids, scheduler="tsort"):
- if scheduler == "mem_thread":
+ def _sort_jobs(self, jobids, scheduler='tsort'):
+ if scheduler == 'mem_thread':
return sorted(
jobids,
key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs),
diff --git a/niworkflows/engine/tests/test_plugin.py b/niworkflows/engine/tests/test_plugin.py
index 6956ba19c6f..f8e2ad345b0 100644
--- a/niworkflows/engine/tests/test_plugin.py
+++ b/niworkflows/engine/tests/test_plugin.py
@@ -21,36 +21,36 @@ def addall(inlist):
@pytest.fixture
def workflow(tmp_path):
- workflow = pe.Workflow(name="test_wf", base_dir=tmp_path)
+ workflow = pe.Workflow(name='test_wf', base_dir=tmp_path)
- inputnode = pe.Node(niu.IdentityInterface(fields=["x", "y"]), name="inputnode")
- outputnode = pe.Node(niu.IdentityInterface(fields=["z"]), name="outputnode")
+ inputnode = pe.Node(niu.IdentityInterface(fields=['x', 'y']), name='inputnode')
+ outputnode = pe.Node(niu.IdentityInterface(fields=['z']), name='outputnode')
# Generate many nodes and claim a lot of memory
add_nd = pe.MapNode(
- niu.Function(function=add, input_names=["x", "y"], output_names=["z"]),
- name="add",
- iterfield=["x"],
+ niu.Function(function=add, input_names=['x', 'y'], output_names=['z']),
+ name='add',
+ iterfield=['x'],
mem_gb=0.8,
)
# Regular node
- sum_nd = pe.Node(niu.Function(function=addall, input_names=["inlist"]), name="sum")
+ sum_nd = pe.Node(niu.Function(function=addall, input_names=['inlist']), name='sum')
# Run without submitting is another code path
add_more_nd = pe.Node(
- niu.Function(function=add, input_names=["x", "y"], output_names=["z"]),
- name="add_more",
+ niu.Function(function=add, input_names=['x', 'y'], output_names=['z']),
+ name='add_more',
run_without_submitting=True,
)
workflow.connect(
[
- (inputnode, add_nd, [("x", "x"), ("y", "y")]),
- (add_nd, sum_nd, [("z", "inlist")]),
- (sum_nd, add_more_nd, [("out", "x")]),
- (inputnode, add_more_nd, [("y", "y")]),
- (add_more_nd, outputnode, [("z", "z")]),
+ (inputnode, add_nd, [('x', 'x'), ('y', 'y')]),
+ (add_nd, sum_nd, [('z', 'inlist')]),
+ (sum_nd, add_more_nd, [('out', 'x')]),
+ (inputnode, add_more_nd, [('y', 'y')]),
+ (add_more_nd, outputnode, [('z', 'z')]),
]
)
@@ -58,42 +58,40 @@ def workflow(tmp_path):
inputnode.inputs.y = 4
# Avoid unnecessary sleeps
- workflow.config["execution"]["poll_sleep_duration"] = 0
+ workflow.config['execution']['poll_sleep_duration'] = 0
return workflow
def test_plugin_defaults(workflow, caplog):
"""Test the plugin works without any arguments."""
- caplog.set_level(logging.CRITICAL, logger="nipype.workflow")
+ caplog.set_level(logging.CRITICAL, logger='nipype.workflow')
workflow.run(plugin=MultiProcPlugin())
def test_plugin_args_noconfig(workflow, caplog):
"""Test the plugin works with typical nipype arguments."""
- caplog.set_level(logging.CRITICAL, logger="nipype.workflow")
- workflow.run(plugin=MultiProcPlugin(plugin_args={"n_procs": 2, "memory_gb": 0.1}))
+ caplog.set_level(logging.CRITICAL, logger='nipype.workflow')
+ workflow.run(plugin=MultiProcPlugin(plugin_args={'n_procs': 2, 'memory_gb': 0.1}))
def touch_file(file_path: str) -> None:
"""Module-level functions play more nicely with multiprocessing."""
- with open(file_path, "w") as f:
- f.write("flag")
+ with open(file_path, 'w') as f:
+ f.write('flag')
def test_plugin_app_config(tmp_path, workflow, caplog):
"""Test the plugin works with a nipreps-style configuration."""
- init_flag = tmp_path / "init_flag.txt"
+ init_flag = tmp_path / 'init_flag.txt'
app_config = SimpleNamespace(
environment=SimpleNamespace(total_memory=1),
_process_initializer=touch_file,
file_path=str(init_flag),
)
- caplog.set_level(logging.INFO, logger="nipype.workflow")
- workflow.run(
- plugin=MultiProcPlugin(plugin_args={"n_procs": 2, "app_config": app_config})
- )
+ caplog.set_level(logging.INFO, logger='nipype.workflow')
+ workflow.run(plugin=MultiProcPlugin(plugin_args={'n_procs': 2, 'app_config': app_config}))
- assert init_flag.exists() and init_flag.read_text() == "flag"
+ assert init_flag.exists() and init_flag.read_text() == 'flag'
diff --git a/niworkflows/engine/tests/test_workflows.py b/niworkflows/engine/tests/test_workflows.py
index 5d6ff8404d3..5a56f73e5fe 100644
--- a/niworkflows/engine/tests/test_workflows.py
+++ b/niworkflows/engine/tests/test_workflows.py
@@ -21,26 +21,25 @@
# https://www.nipreps.org/community/licensing/
#
"""Test the LiterateWorkflow."""
+
from nipype.pipeline.engine import Node
from nipype.interfaces import afni, utility as niu
from ..workflows import LiterateWorkflow as Workflow
-def _reorient_wf(name="ReorientWorkflow"):
+def _reorient_wf(name='ReorientWorkflow'):
"""A workflow to reorient images to 'RPI' orientation."""
workflow = Workflow(name=name)
- workflow.__desc__ = "Inner workflow. "
- inputnode = Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
- outputnode = Node(niu.IdentityInterface(fields=["out_file"]), name="outputnode")
- deoblique = Node(afni.Refit(deoblique=True), name="deoblique")
- reorient = Node(
- afni.Resample(orientation="RPI", outputtype="NIFTI_GZ"), name="reorient"
- )
+ workflow.__desc__ = 'Inner workflow. '
+ inputnode = Node(niu.IdentityInterface(fields=['in_file']), name='inputnode')
+ outputnode = Node(niu.IdentityInterface(fields=['out_file']), name='outputnode')
+ deoblique = Node(afni.Refit(deoblique=True), name='deoblique')
+ reorient = Node(afni.Resample(orientation='RPI', outputtype='NIFTI_GZ'), name='reorient')
workflow.connect(
[
- (inputnode, deoblique, [("in_file", "in_file")]),
- (deoblique, reorient, [("out_file", "in_file")]),
- (reorient, outputnode, [("out_file", "out_file")]),
+ (inputnode, deoblique, [('in_file', 'in_file')]),
+ (deoblique, reorient, [('out_file', 'in_file')]),
+ (reorient, outputnode, [('out_file', 'out_file')]),
]
)
return workflow
@@ -48,11 +47,11 @@ def _reorient_wf(name="ReorientWorkflow"):
def test_boilerplate():
"""Check the boilerplate is generated."""
- workflow = Workflow(name="test")
- workflow.__desc__ = "Outer workflow. "
- workflow.__postdesc__ = "Outer workflow (postdesc)."
+ workflow = Workflow(name='test')
+ workflow.__desc__ = 'Outer workflow. '
+ workflow.__postdesc__ = 'Outer workflow (postdesc).'
- inputnode = Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
+ inputnode = Node(niu.IdentityInterface(fields=['in_file']), name='inputnode')
inner = _reorient_wf()
# fmt: off
@@ -61,7 +60,4 @@ def test_boilerplate():
])
# fmt: on
- assert (
- workflow.visit_desc()
- == "Outer workflow. Inner workflow. Outer workflow (postdesc)."
- )
+ assert workflow.visit_desc() == 'Outer workflow. Inner workflow. Outer workflow (postdesc).'
diff --git a/niworkflows/engine/workflows.py b/niworkflows/engine/workflows.py
index 9f02f3674d8..dda14ba8f6c 100644
--- a/niworkflows/engine/workflows.py
+++ b/niworkflows/engine/workflows.py
@@ -25,6 +25,7 @@
Add special features to the Nipype's vanilla workflows
"""
+
from nipype.pipeline import engine as pe
@@ -63,4 +64,4 @@ def visit_desc(self):
if self.__postdesc__:
desc += [self.__postdesc__]
- return "".join(desc)
+ return ''.join(desc)
diff --git a/niworkflows/func/tests/test_util.py b/niworkflows/func/tests/test_util.py
index 76fd652a3a1..7ca1bda167d 100755
--- a/niworkflows/func/tests/test_util.py
+++ b/niworkflows/func/tests/test_util.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Testing module for fmriprep.workflows.bold.util."""
+
import pytest
import os
from pathlib import Path
@@ -36,32 +37,26 @@
from ..util import init_enhance_and_skullstrip_bold_wf
-datapath = os.getenv("FMRIPREP_REGRESSION_SOURCE")
+datapath = os.getenv('FMRIPREP_REGRESSION_SOURCE')
parameters = []
if datapath:
datapath = Path(datapath)
bold_datasets = []
- for ds in datapath.glob("ds*/"):
- paths = [p for p in ds.glob("*_bold.nii.gz") if p.exists()]
- subjects = set([p.name.replace("sub-", "").split("_")[0] for p in paths])
+ for ds in datapath.glob('ds*/'):
+ paths = [p for p in ds.glob('*_bold.nii.gz') if p.exists()]
+ subjects = set([p.name.replace('sub-', '').split('_')[0] for p in paths])
for sub in subjects:
- subject_data = [p for p in paths if p.name.startswith(f"sub-{sub}")]
+ subject_data = [p for p in paths if p.name.startswith(f'sub-{sub}')]
se_epi = sorted(
- [
- str(p.relative_to(datapath))
- for p in subject_data
- if "echo-" not in p.name
- ]
+ [str(p.relative_to(datapath)) for p in subject_data if 'echo-' not in p.name]
)
if se_epi:
bold_datasets.append(se_epi)
- meecho = sorted(
- [str(p.relative_to(datapath)) for p in paths if "echo-" in p.name]
- )
+ meecho = sorted([str(p.relative_to(datapath)) for p in paths if 'echo-' in p.name])
if meecho:
bold_datasets.append([meecho[0]])
@@ -72,23 +67,21 @@
str(
(
datapath
- / "derivatives"
- / path.replace("_echo-1", "").replace("_bold.nii", "_bold_mask.nii")
+ / 'derivatives'
+ / path.replace('_echo-1', '').replace('_bold.nii', '_bold_mask.nii')
).absolute()
)
)
- bold_datasets = [
- [str((datapath / p).absolute()) for p in ds] for ds in bold_datasets
- ]
+ bold_datasets = [[str((datapath / p).absolute()) for p in ds] for ds in bold_datasets]
parameters = zip(bold_datasets, exp_masks)
if not bold_datasets:
raise RuntimeError(
- f"Data folder <{datapath}> was provided, but no images were found. "
- "Folder contents:\n{}".format(
- "\n".join([str(p) for p in datapath.glob("ds*/*.nii.gz")])
+ f'Data folder <{datapath}> was provided, but no images were found. '
+ 'Folder contents:\n{}'.format(
+ '\n'.join([str(p) for p in datapath.glob('ds*/*.nii.gz')])
)
)
@@ -105,10 +98,10 @@ def symmetric_overlap(img1, img2):
@pytest.mark.skipif(
not datapath,
- reason="FMRIPREP_REGRESSION_SOURCE env var not set, or no data is available",
+ reason='FMRIPREP_REGRESSION_SOURCE env var not set, or no data is available',
)
-@pytest.mark.skipif(not which("antsAI"), reason="antsAI executable not found")
-@pytest.mark.parametrize("input_fname,expected_fname", parameters)
+@pytest.mark.skipif(not which('antsAI'), reason='antsAI executable not found')
+@pytest.mark.parametrize('input_fname,expected_fname', parameters)
def test_masking(input_fname, expected_fname):
"""Check for regressions in masking."""
from nipype import config as ncfg
@@ -117,40 +110,36 @@ def test_masking(input_fname, expected_fname):
dsname = Path(expected_fname).parent.name
# Reconstruct base_fname from above
- reports_dir = Path(os.getenv("FMRIPREP_REGRESSION_REPORTS", ""))
+ reports_dir = Path(os.getenv('FMRIPREP_REGRESSION_REPORTS', ''))
newpath = reports_dir / dsname
newpath.mkdir(parents=True, exist_ok=True)
# Nipype config (logs and execution)
ncfg.update_config(
{
- "execution": {
- "crashdump_dir": str(newpath),
+ 'execution': {
+ 'crashdump_dir': str(newpath),
}
}
)
- wf = pe.Workflow(name=basename.replace("_bold.nii.gz", "").replace("-", "_"))
- base_dir = os.getenv("CACHED_WORK_DIRECTORY")
+ wf = pe.Workflow(name=basename.replace('_bold.nii.gz', '').replace('-', '_'))
+ base_dir = os.getenv('CACHED_WORK_DIRECTORY')
if base_dir:
base_dir = Path(base_dir) / dsname
base_dir.mkdir(parents=True, exist_ok=True)
wf.base_dir = str(base_dir)
- epi_reference_wf = init_epi_reference_wf(
- omp_nthreads=os.cpu_count(), auto_bold_nss=True
- )
+ epi_reference_wf = init_epi_reference_wf(omp_nthreads=os.cpu_count(), auto_bold_nss=True)
epi_reference_wf.inputs.inputnode.in_files = input_fname
enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf()
out_fname = fname_presuffix(
- Path(expected_fname).name, suffix=".svg", use_ext=False, newpath=str(newpath)
+ Path(expected_fname).name, suffix='.svg', use_ext=False, newpath=str(newpath)
)
- mask_diff_plot = pe.Node(
- ROIsPlot(colors=["limegreen"], levels=[0.5]), name="mask_diff_plot"
- )
+ mask_diff_plot = pe.Node(ROIsPlot(colors=['limegreen'], levels=[0.5]), name='mask_diff_plot')
mask_diff_plot.always_run = True
mask_diff_plot.inputs.in_mask = expected_fname
mask_diff_plot.inputs.out_report = out_fname
diff --git a/niworkflows/func/util.py b/niworkflows/func/util.py
index 19321b87704..f81ce38bb5e 100644
--- a/niworkflows/func/util.py
+++ b/niworkflows/func/util.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Utility workflows."""
+
from packaging.version import parse as parseversion, Version
from nipype.pipeline import engine as pe
@@ -51,7 +52,7 @@ def init_bold_reference_wf(
brainmask_thresh=0.85,
pre_mask=False,
multiecho=False,
- name="bold_reference_wf",
+ name='bold_reference_wf',
gen_report=False,
):
"""
@@ -152,27 +153,25 @@ def init_bold_reference_wf(
"""
inputnode = pe.Node(
- niu.IdentityInterface(
- fields=["bold_file", "bold_mask", "dummy_scans", "sbref_file"]
- ),
- name="inputnode",
+ niu.IdentityInterface(fields=['bold_file', 'bold_mask', 'dummy_scans', 'sbref_file']),
+ name='inputnode',
)
outputnode = pe.Node(
niu.IdentityInterface(
fields=[
- "bold_file",
- "all_bold_files",
- "raw_ref_image",
- "skip_vols",
- "algo_dummy_scans",
- "ref_image",
- "ref_image_brain",
- "bold_mask",
- "validation_report",
- "mask_report",
+ 'bold_file',
+ 'all_bold_files',
+ 'raw_ref_image',
+ 'skip_vols',
+ 'algo_dummy_scans',
+ 'ref_image',
+ 'ref_image_brain',
+ 'bold_mask',
+ 'validation_report',
+ 'mask_report',
]
),
- name="outputnode",
+ name='outputnode',
)
# Simplify manually setting input image
@@ -181,13 +180,13 @@ def init_bold_reference_wf(
val_bold = pe.MapNode(
ValidateImage(),
- name="val_bold",
+ name='val_bold',
mem_gb=DEFAULT_MEMORY_MIN_GB,
- iterfield=["in_file"],
+ iterfield=['in_file'],
)
- get_dummy = pe.Node(NonsteadyStatesDetector(), name="get_dummy")
- gen_avg = pe.Node(RobustAverage(), name="gen_avg", mem_gb=1)
+ get_dummy = pe.Node(NonsteadyStatesDetector(), name='get_dummy')
+ gen_avg = pe.Node(RobustAverage(), name='gen_avg', mem_gb=1)
enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf(
brainmask_thresh=brainmask_thresh,
@@ -196,8 +195,8 @@ def init_bold_reference_wf(
)
calc_dummy_scans = pe.Node(
- niu.Function(function=_pass_dummy_scans, output_names=["skip_vols_num"]),
- name="calc_dummy_scans",
+ niu.Function(function=_pass_dummy_scans, output_names=['skip_vols_num']),
+ name='calc_dummy_scans',
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
@@ -225,7 +224,7 @@ def init_bold_reference_wf(
# fmt: on
if gen_report:
- mask_reportlet = pe.Node(SimpleShowMaskRPT(), name="mask_reportlet")
+ mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet')
# fmt: off
workflow.connect([
(enhance_and_skullstrip_bold_wf, mask_reportlet, [
@@ -254,11 +253,11 @@ def init_bold_reference_wf(
val_sbref = pe.MapNode(
ValidateImage(),
- name="val_sbref",
+ name='val_sbref',
mem_gb=DEFAULT_MEMORY_MIN_GB,
- iterfield=["in_file"],
+ iterfield=['in_file'],
)
- merge_sbrefs = pe.Node(MergeSeries(), name="merge_sbrefs")
+ merge_sbrefs = pe.Node(MergeSeries(), name='merge_sbrefs')
# fmt: off
workflow.connect([
@@ -279,7 +278,7 @@ def init_bold_reference_wf(
def init_enhance_and_skullstrip_bold_wf(
brainmask_thresh=0.5,
- name="enhance_and_skullstrip_bold_wf",
+ name='enhance_and_skullstrip_bold_wf',
omp_nthreads=1,
pre_mask=False,
):
@@ -366,114 +365,104 @@ def init_enhance_and_skullstrip_bold_wf(
from niworkflows.interfaces.nibabel import ApplyMask, BinaryDilation
workflow = Workflow(name=name)
- inputnode = pe.Node(
- niu.IdentityInterface(fields=["in_file", "pre_mask"]), name="inputnode"
- )
+ inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'pre_mask']), name='inputnode')
outputnode = pe.Node(
- niu.IdentityInterface(
- fields=["mask_file", "skull_stripped_file", "bias_corrected_file"]
- ),
- name="outputnode",
+ niu.IdentityInterface(fields=['mask_file', 'skull_stripped_file', 'bias_corrected_file']),
+ name='outputnode',
)
# Run N4 normally, force num_threads=1 for stability (images are small, no need for >1)
n4_correct = pe.Node(
- N4BiasFieldCorrection(
- dimension=3, copy_header=True, bspline_fitting_distance=200
- ),
+ N4BiasFieldCorrection(dimension=3, copy_header=True, bspline_fitting_distance=200),
shrink_factor=2,
- name="n4_correct",
+ name='n4_correct',
n_procs=1,
)
n4_correct.inputs.rescale_intensities = True
# Create a generous BET mask out of the bias-corrected EPI
- skullstrip_first_pass = pe.Node(
- fsl.BET(frac=0.2, mask=True), name="skullstrip_first_pass"
- )
- first_dilate = pe.Node(BinaryDilation(radius=6), name="first_dilate")
- first_mask = pe.Node(ApplyMask(), name="first_mask")
+ skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True), name='skullstrip_first_pass')
+ first_dilate = pe.Node(BinaryDilation(radius=6), name='first_dilate')
+ first_mask = pe.Node(ApplyMask(), name='first_mask')
# Use AFNI's unifize for T2 contrast & fix header
unifize = pe.Node(
afni.Unifize(
t2=True,
- outputtype="NIFTI_GZ",
+ outputtype='NIFTI_GZ',
# Default -clfrac is 0.1, 0.4 was too conservative
# -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation)
- args="-clfrac 0.2 -rbt 18.3 65.0 90.0",
- out_file="uni.nii.gz",
+ args='-clfrac 0.2 -rbt 18.3 65.0 90.0',
+ out_file='uni.nii.gz',
),
- name="unifize",
+ name='unifize',
)
- fixhdr_unifize = pe.Node(CopyXForm(), name="fixhdr_unifize", mem_gb=0.1)
+ fixhdr_unifize = pe.Node(CopyXForm(), name='fixhdr_unifize', mem_gb=0.1)
# Run ANFI's 3dAutomask to extract a refined brain mask
skullstrip_second_pass = pe.Node(
- afni.Automask(dilate=1, outputtype="NIFTI_GZ"), name="skullstrip_second_pass"
+ afni.Automask(dilate=1, outputtype='NIFTI_GZ'), name='skullstrip_second_pass'
)
- fixhdr_skullstrip2 = pe.Node(CopyXForm(), name="fixhdr_skullstrip2", mem_gb=0.1)
+ fixhdr_skullstrip2 = pe.Node(CopyXForm(), name='fixhdr_skullstrip2', mem_gb=0.1)
# Take intersection of both masks
- combine_masks = pe.Node(fsl.BinaryMaths(operation="mul"), name="combine_masks")
+ combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'), name='combine_masks')
# Compute masked brain
- apply_mask = pe.Node(ApplyMask(), name="apply_mask")
+ apply_mask = pe.Node(ApplyMask(), name='apply_mask')
if not pre_mask:
from nipype.interfaces.ants.utils import AI
bold_template = get_template(
- "MNI152NLin2009cAsym", resolution=2, desc="fMRIPrep", suffix="boldref"
- )
- brain_mask = get_template(
- "MNI152NLin2009cAsym", resolution=2, desc="brain", suffix="mask"
+ 'MNI152NLin2009cAsym', resolution=2, desc='fMRIPrep', suffix='boldref'
)
+ brain_mask = get_template('MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask')
# Initialize transforms with antsAI
init_aff = pe.Node(
AI(
fixed_image=str(bold_template),
fixed_image_mask=str(brain_mask),
- metric=("Mattes", 32, "Regular", 0.2),
- transform=("Affine", 0.1),
+ metric=('Mattes', 32, 'Regular', 0.2),
+ transform=('Affine', 0.1),
search_factor=(20, 0.12),
principal_axes=False,
convergence=(10, 1e-6, 10),
verbose=True,
),
- name="init_aff",
+ name='init_aff',
n_procs=omp_nthreads,
)
# Registration().version may be None
- if parseversion(Registration().version or "0.0.0") > Version("2.2.0"):
+ if parseversion(Registration().version or '0.0.0') > Version('2.2.0'):
init_aff.inputs.search_grid = (40, (0, 40, 40))
# Set up spatial normalization
norm = pe.Node(
- Registration(from_file=data.load("epi_atlasbased_brainmask.json")),
- name="norm",
+ Registration(from_file=data.load('epi_atlasbased_brainmask.json')),
+ name='norm',
n_procs=omp_nthreads,
)
norm.inputs.fixed_image = str(bold_template)
map_brainmask = pe.Node(
ApplyTransforms(
- interpolation="Linear",
+ interpolation='Linear',
# Use the higher resolution and probseg for numerical stability in rounding
input_image=str(
get_template(
- "MNI152NLin2009cAsym",
+ 'MNI152NLin2009cAsym',
resolution=1,
- label="brain",
- suffix="probseg",
+ label='brain',
+ suffix='probseg',
)
),
),
- name="map_brainmask",
+ name='map_brainmask',
)
# Ensure mask's header matches reference's
- fix_header = pe.Node(CopyHeader(), name="fix_header", run_without_submitting=True)
+ fix_header = pe.Node(CopyHeader(), name='fix_header', run_without_submitting=True)
# fmt: off
workflow.connect([
@@ -523,7 +512,7 @@ def init_enhance_and_skullstrip_bold_wf(
return workflow
-def init_skullstrip_bold_wf(name="skullstrip_bold_wf"):
+def init_skullstrip_bold_wf(name='skullstrip_bold_wf'):
"""
Apply skull-stripping to a BOLD image.
@@ -558,22 +547,18 @@ def init_skullstrip_bold_wf(name="skullstrip_bold_wf"):
from niworkflows.interfaces.nibabel import ApplyMask
workflow = Workflow(name=name)
- inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode")
+ inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode')
outputnode = pe.Node(
- niu.IdentityInterface(
- fields=["mask_file", "skull_stripped_file", "out_report"]
- ),
- name="outputnode",
- )
- skullstrip_first_pass = pe.Node(
- fsl.BET(frac=0.2, mask=True), name="skullstrip_first_pass"
+ niu.IdentityInterface(fields=['mask_file', 'skull_stripped_file', 'out_report']),
+ name='outputnode',
)
+ skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True), name='skullstrip_first_pass')
skullstrip_second_pass = pe.Node(
- afni.Automask(dilate=1, outputtype="NIFTI_GZ"), name="skullstrip_second_pass"
+ afni.Automask(dilate=1, outputtype='NIFTI_GZ'), name='skullstrip_second_pass'
)
- combine_masks = pe.Node(fsl.BinaryMaths(operation="mul"), name="combine_masks")
- apply_mask = pe.Node(ApplyMask(), name="apply_mask")
- mask_reportlet = pe.Node(SimpleShowMaskRPT(), name="mask_reportlet")
+ combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'), name='combine_masks')
+ apply_mask = pe.Node(ApplyMask(), name='apply_mask')
+ mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet')
# fmt: off
workflow.connect([
diff --git a/niworkflows/interfaces/bids.py b/niworkflows/interfaces/bids.py
index 6a50417d400..fd6d92bb733 100644
--- a/niworkflows/interfaces/bids.py
+++ b/niworkflows/interfaces/bids.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Interfaces for handling BIDS-like neuroimaging structures."""
+
from collections import defaultdict
from contextlib import suppress
from json import dumps, loads
@@ -56,13 +57,13 @@
from ..utils.images import set_consumables, unsafe_write_nifti_header_and_data
from ..utils.misc import _copy_any, unlink
-regz = re.compile(r"\.gz$")
-_pybids_spec = loads(data.load.readable("nipreps.json").read_text())
-BIDS_DERIV_ENTITIES = _pybids_spec["entities"]
-BIDS_DERIV_PATTERNS = tuple(_pybids_spec["default_path_patterns"])
+regz = re.compile(r'\.gz$')
+_pybids_spec = loads(data.load.readable('nipreps.json').read_text())
+BIDS_DERIV_ENTITIES = _pybids_spec['entities']
+BIDS_DERIV_PATTERNS = tuple(_pybids_spec['default_path_patterns'])
STANDARD_SPACES = tf.api.templates()
-LOGGER = logging.getLogger("nipype.interface")
+LOGGER = logging.getLogger('nipype.interface')
if sys.version_info < (3, 10): # PY39
@@ -70,7 +71,7 @@
def zip(*args, strict=False):
if strict and any(len(args[0]) != len(arg) for arg in args):
- raise ValueError("strict_zip() requires all arguments to have the same length")
+ raise ValueError('strict_zip() requires all arguments to have the same length')
return builtin_zip(*args)
@@ -82,24 +83,24 @@ def _none():
DEFAULT_DTYPES = defaultdict(
_none,
(
- ("mask", "uint8"),
- ("dseg", "int16"),
- ("probseg", "float32"),
- ("boldref", "float32"),
+ ('mask', 'uint8'),
+ ('dseg', 'int16'),
+ ('probseg', 'float32'),
+ ('boldref', 'float32'),
),
)
class _BIDSBaseInputSpec(BaseInterfaceInputSpec):
bids_dir = traits.Either(
- (None, Directory(exists=True)), usedefault=True, desc="optional bids directory"
+ (None, Directory(exists=True)), usedefault=True, desc='optional bids directory'
)
- bids_validate = traits.Bool(True, usedefault=True, desc="enable BIDS validator")
- index_db = Directory(exists=True, desc="a PyBIDS layout cache directory")
+ bids_validate = traits.Bool(True, usedefault=True, desc='enable BIDS validator')
+ index_db = Directory(exists=True, desc='a PyBIDS layout cache directory')
class _BIDSInfoInputSpec(_BIDSBaseInputSpec):
- in_file = File(mandatory=True, desc="input file, part of a BIDS tree")
+ in_file = File(mandatory=True, desc='input file, part of a BIDS tree')
class _BIDSInfoOutputSpec(DynamicTraitedSpec):
@@ -216,8 +217,7 @@ def _run_interface(self, runtime):
pass
params = parse_file_entities(in_file)
self._results = {
- key: params.get(key, Undefined)
- for key in _BIDSInfoOutputSpec().get().keys()
+ key: params.get(key, Undefined) for key in _BIDSInfoOutputSpec().get().keys()
}
return runtime
@@ -228,17 +228,17 @@ class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec):
class _BIDSDataGrabberOutputSpec(TraitedSpec):
- out_dict = traits.Dict(desc="output data structure")
- fmap = OutputMultiObject(desc="output fieldmaps")
- bold = OutputMultiObject(desc="output functional images")
- sbref = OutputMultiObject(desc="output sbrefs")
- t1w = OutputMultiObject(desc="output T1w images")
- roi = OutputMultiObject(desc="output ROI images")
- t2w = OutputMultiObject(desc="output T2w images")
- flair = OutputMultiObject(desc="output FLAIR images")
- pet = OutputMultiObject(desc="output PET images")
- dwi = OutputMultiObject(desc="output DWI images")
- asl = OutputMultiObject(desc="output ASL images")
+ out_dict = traits.Dict(desc='output data structure')
+ fmap = OutputMultiObject(desc='output fieldmaps')
+ bold = OutputMultiObject(desc='output functional images')
+ sbref = OutputMultiObject(desc='output sbrefs')
+ t1w = OutputMultiObject(desc='output T1w images')
+ roi = OutputMultiObject(desc='output ROI images')
+ t2w = OutputMultiObject(desc='output T2w images')
+ flair = OutputMultiObject(desc='output FLAIR images')
+ pet = OutputMultiObject(desc='output PET images')
+ dwi = OutputMultiObject(desc='output DWI images')
+ asl = OutputMultiObject(desc='output ASL images')
class BIDSDataGrabber(SimpleInterface):
@@ -265,8 +265,8 @@ class BIDSDataGrabber(SimpleInterface):
_require_funcs = True
def __init__(self, *args, **kwargs):
- anat_only = kwargs.pop("anat_only")
- anat_derivatives = kwargs.pop("anat_derivatives", None)
+ anat_only = kwargs.pop('anat_only')
+ anat_derivatives = kwargs.pop('anat_derivatives', None)
super().__init__(*args, **kwargs)
if anat_only is not None:
self._require_funcs = not anat_only
@@ -275,59 +275,54 @@ def __init__(self, *args, **kwargs):
def _run_interface(self, runtime):
bids_dict = self.inputs.subject_data
- self._results["out_dict"] = bids_dict
+ self._results['out_dict'] = bids_dict
self._results.update(bids_dict)
if self._require_t1w and not bids_dict['t1w']:
raise FileNotFoundError(
- "No T1w images found for subject sub-{}".format(self.inputs.subject_id)
+ 'No T1w images found for subject sub-{}'.format(self.inputs.subject_id)
)
- if self._require_funcs and not bids_dict["bold"]:
+ if self._require_funcs and not bids_dict['bold']:
raise FileNotFoundError(
- "No functional images found for subject sub-{}".format(
- self.inputs.subject_id
- )
+ 'No functional images found for subject sub-{}'.format(self.inputs.subject_id)
)
- for imtype in ["bold", "t2w", "flair", "fmap", "sbref", "roi", "pet", "asl"]:
+ for imtype in ['bold', 't2w', 'flair', 'fmap', 'sbref', 'roi', 'pet', 'asl']:
if not bids_dict[imtype]:
- LOGGER.info(
- 'No "%s" images found for sub-%s', imtype, self.inputs.subject_id
- )
+ LOGGER.info('No "%s" images found for sub-%s', imtype, self.inputs.subject_id)
return runtime
class _PrepareDerivativeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
- check_hdr = traits.Bool(True, usedefault=True, desc="fix headers of NIfTI outputs")
+ check_hdr = traits.Bool(True, usedefault=True, desc='fix headers of NIfTI outputs')
compress = InputMultiObject(
traits.Either(None, traits.Bool),
usedefault=True,
- desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
- "or left unmodified (None, default).",
+ desc='whether ``in_file`` should be compressed (True), uncompressed (False) '
+ 'or left unmodified (None, default).',
)
data_dtype = Str(
- desc="NumPy datatype to coerce NIfTI data to, or `source` to match the input file dtype"
+ desc='NumPy datatype to coerce NIfTI data to, or `source` to match the input file dtype'
)
dismiss_entities = InputMultiObject(
traits.Either(None, Str),
usedefault=True,
- desc="a list entities that will not be propagated from the source file",
- )
- in_file = InputMultiObject(
- File(exists=True), mandatory=True, desc="the object to be saved"
+ desc='a list entities that will not be propagated from the source file',
)
- meta_dict = traits.DictStrAny(desc="an input dictionary containing metadata")
+ in_file = InputMultiObject(File(exists=True), mandatory=True, desc='the object to be saved')
+ meta_dict = traits.DictStrAny(desc='an input dictionary containing metadata')
source_file = InputMultiObject(
- File(exists=False), mandatory=True, desc="the source file(s) to extract entities from")
+ File(exists=False), mandatory=True, desc='the source file(s) to extract entities from'
+ )
class _PrepareDerivativeOutputSpec(TraitedSpec):
- out_file = OutputMultiObject(File(exists=True), desc="derivative file path")
- out_meta = traits.DictStrAny(desc="derivative metadata")
- out_path = OutputMultiObject(Str, desc="relative path in target directory")
- fixed_hdr = traits.List(traits.Bool, desc="whether derivative header was fixed")
+ out_file = OutputMultiObject(File(exists=True), desc='derivative file path')
+ out_meta = traits.DictStrAny(desc='derivative metadata')
+ out_path = OutputMultiObject(Str, desc='relative path in target directory')
+ fixed_hdr = traits.List(traits.Bool, desc='whether derivative header was fixed')
class PrepareDerivative(SimpleInterface):
@@ -495,7 +490,7 @@ class PrepareDerivative(SimpleInterface):
input_spec = _PrepareDerivativeInputSpec
output_spec = _PrepareDerivativeOutputSpec
- _config_entities = frozenset({e["name"] for e in BIDS_DERIV_ENTITIES})
+ _config_entities = frozenset({e['name'] for e in BIDS_DERIV_ENTITIES})
_config_entities_dict = BIDS_DERIV_ENTITIES
_standard_spaces = STANDARD_SPACES
_file_patterns = BIDS_DERIV_PATTERNS
@@ -503,9 +498,7 @@ class PrepareDerivative(SimpleInterface):
def __init__(self, allowed_entities=None, **inputs):
"""Initialize the SimpleInterface and extend inputs with custom entities."""
- self._allowed_entities = set(allowed_entities or []).union(
- set(self._config_entities)
- )
+ self._allowed_entities = set(allowed_entities or []).union(set(self._config_entities))
self._metadata = {}
self._static_traits = self.input_spec.class_editable_traits() + sorted(
@@ -533,47 +526,52 @@ def _run_interface(self, runtime):
# Middle precedence: metadata passed to constructor
**self._metadata,
# Highest precedence: metadata set as inputs
- **({
- k: getattr(self.inputs, k)
- for k in self.inputs.copyable_trait_names()
- if k not in self._static_traits
- })
+ **(
+ {
+ k: getattr(self.inputs, k)
+ for k in self.inputs.copyable_trait_names()
+ if k not in self._static_traits
+ }
+ ),
}
in_file = listify(self.inputs.in_file)
# Initialize entities with those from the source file.
custom_config = Config(
- name="custom",
+ name='custom',
entities=self._config_entities_dict,
default_path_patterns=self._file_patterns,
)
in_entities = [
parse_file_entities(
str(relative_to_root(source_file)),
- config=["bids", "derivatives", custom_config],
+ config=['bids', 'derivatives', custom_config],
)
for source_file in self.inputs.source_file
]
- out_entities = {k: v for k, v in in_entities[0].items()
- if all(ent.get(k) == v for ent in in_entities[1:])}
+ out_entities = {
+ k: v
+ for k, v in in_entities[0].items()
+ if all(ent.get(k) == v for ent in in_entities[1:])
+ }
for drop_entity in listify(self.inputs.dismiss_entities or []):
out_entities.pop(drop_entity, None)
# Override extension with that of the input file(s)
- out_entities["extension"] = [
+ out_entities['extension'] = [
# _splitext does not accept .surf.gii (for instance)
- "".join(Path(orig_file).suffixes).lstrip(".")
+ ''.join(Path(orig_file).suffixes).lstrip('.')
for orig_file in in_file
]
compress = listify(self.inputs.compress) or [None]
if len(compress) == 1:
compress = compress * len(in_file)
- for i, ext in enumerate(out_entities["extension"]):
+ for i, ext in enumerate(out_entities['extension']):
if compress[i] is not None:
- ext = regz.sub("", ext)
- out_entities["extension"][i] = f"{ext}.gz" if compress[i] else ext
+ ext = regz.sub('', ext)
+ out_entities['extension'][i] = f'{ext}.gz' if compress[i] else ext
# Override entities with those set as inputs
for key in self._allowed_entities:
@@ -582,52 +580,51 @@ def _run_interface(self, runtime):
out_entities[key] = value
# Clean up native resolution with space
- if out_entities.get("resolution") == "native" and out_entities.get("space"):
- out_entities.pop("resolution", None)
+ if out_entities.get('resolution') == 'native' and out_entities.get('space'):
+ out_entities.pop('resolution', None)
# Expand templateflow resolutions
- resolution = out_entities.get("resolution")
- space = out_entities.get("space")
+ resolution = out_entities.get('resolution')
+ space = out_entities.get('space')
if resolution:
# Standard spaces
if space in self._standard_spaces:
res = _get_tf_resolution(space, resolution)
else: # TODO: Nonstandard?
- res = "Unknown"
+ res = 'Unknown'
metadata['Resolution'] = res
- if len(set(out_entities["extension"])) == 1:
- out_entities["extension"] = out_entities["extension"][0]
+ if len(set(out_entities['extension'])) == 1:
+ out_entities['extension'] = out_entities['extension'][0]
# Insert custom (non-BIDS) entities from allowed_entities.
custom_entities = set(out_entities) - set(self._config_entities)
patterns = self._file_patterns
if custom_entities:
# Example: f"{key}-{{{key}}}" -> "task-{task}"
- custom_pat = "_".join(f"{key}-{{{key}}}" for key in sorted(custom_entities))
+ custom_pat = '_'.join(f'{key}-{{{key}}}' for key in sorted(custom_entities))
patterns = [
- pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix")))
- for pat in patterns
+ pat.replace('_{suffix', '_'.join(('', custom_pat, '{suffix'))) for pat in patterns
]
# Build the output path(s)
dest_files = build_path(out_entities, path_patterns=patterns)
if not dest_files:
- raise ValueError(f"Could not build path with entities {out_entities}.")
+ raise ValueError(f'Could not build path with entities {out_entities}.')
# Make sure the interpolated values is embedded in a list, and check
dest_files = listify(dest_files)
if len(in_file) != len(dest_files):
raise ValueError(
- f"Input files ({len(in_file)}) not matched "
- f"by interpolated patterns ({len(dest_files)})."
+ f'Input files ({len(in_file)}) not matched '
+ f'by interpolated patterns ({len(dest_files)}).'
)
# Prepare SimpleInterface outputs object
- self._results["out_file"] = []
- self._results["fixed_hdr"] = [False] * len(in_file)
- self._results["out_path"] = dest_files
- self._results["out_meta"] = metadata
+ self._results['out_file'] = []
+ self._results['fixed_hdr'] = [False] * len(in_file)
+ self._results['out_path'] = dest_files
+ self._results['out_meta'] = metadata
for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)):
# Set data and header iff changes need to be made. If these are
@@ -640,9 +637,9 @@ def _run_interface(self, runtime):
new_compression = False
if is_nifti:
- new_compression = (
- os.fspath(orig_file).endswith(".gz") ^ os.fspath(dest_file).endswith(".gz")
- )
+ new_compression = os.fspath(orig_file).endswith('.gz') ^ os.fspath(
+ dest_file
+ ).endswith('.gz')
data_dtype = self.inputs.data_dtype or self._default_dtypes[self.inputs.suffix]
if is_nifti and any((self.inputs.check_hdr, data_dtype)):
@@ -651,39 +648,37 @@ def _run_interface(self, runtime):
if self.inputs.check_hdr:
hdr = nii.header
curr_units = tuple(
- [None if u == "unknown" else u for u in hdr.get_xyzt_units()]
+ [None if u == 'unknown' else u for u in hdr.get_xyzt_units()]
)
- curr_codes = (int(hdr["qform_code"]), int(hdr["sform_code"]))
+ curr_codes = (int(hdr['qform_code']), int(hdr['sform_code']))
# Default to mm, use sec if data type is bold
units = (
- curr_units[0] or "mm",
- "sec" if out_entities["suffix"] == "bold" else None,
+ curr_units[0] or 'mm',
+ 'sec' if out_entities['suffix'] == 'bold' else None,
)
xcodes = (1, 1) # Derivative in its original scanner space
if self.inputs.space:
- xcodes = (
- (4, 4) if self.inputs.space in self._standard_spaces else (2, 2)
- )
+ xcodes = (4, 4) if self.inputs.space in self._standard_spaces else (2, 2)
curr_zooms = zooms = hdr.get_zooms()
- if "RepetitionTime" in self.inputs.get():
+ if 'RepetitionTime' in self.inputs.get():
zooms = curr_zooms[:3] + (self.inputs.RepetitionTime,)
if (curr_codes, curr_units, curr_zooms) != (xcodes, units, zooms):
- self._results["fixed_hdr"][i] = True
+ self._results['fixed_hdr'][i] = True
new_header = hdr.copy()
new_header.set_qform(nii.affine, xcodes[0])
new_header.set_sform(nii.affine, xcodes[1])
new_header.set_xyzt_units(*units)
new_header.set_zooms(zooms)
- if data_dtype == "source": # match source dtype
+ if data_dtype == 'source': # match source dtype
try:
data_dtype = nb.load(self.inputs.source_file[0]).get_data_dtype()
except Exception:
LOGGER.warning(
- f"Could not get data type of file {self.inputs.source_file[0]}"
+ f'Could not get data type of file {self.inputs.source_file[0]}'
)
data_dtype = None
@@ -692,8 +687,8 @@ def _run_interface(self, runtime):
orig_dtype = nii.get_data_dtype()
if orig_dtype != data_dtype:
LOGGER.warning(
- f"Changing {Path(dest_file).name} dtype "
- f"from {orig_dtype} to {data_dtype}"
+ f'Changing {Path(dest_file).name} dtype '
+ f'from {orig_dtype} to {data_dtype}'
)
# coerce dataobj to new data dtype
if np.issubdtype(data_dtype, np.integer):
@@ -722,35 +717,31 @@ def _run_interface(self, runtime):
else:
# Without this, we would be writing nans
# This is our punishment for hacking around nibabel defaults
- new_header.set_slope_inter(slope=1., inter=0.)
+ new_header.set_slope_inter(slope=1.0, inter=0.0)
unsafe_write_nifti_header_and_data(
- fname=out_file,
- header=new_header,
- data=new_data
+ fname=out_file, header=new_header, data=new_data
)
del orig_img
- self._results["out_file"].append(str(out_file))
+ self._results['out_file'].append(str(out_file))
return runtime
class _SaveDerivativeInputSpec(TraitedSpec):
base_directory = Directory(
- exists=True, mandatory=True, desc="Path to the base directory for storing data."
- )
- in_file = InputMultiObject(
- File(exists=True), mandatory=True, desc="the object to be saved"
+ exists=True, mandatory=True, desc='Path to the base directory for storing data.'
)
- metadata = traits.DictStrAny(desc="metadata to be saved alongside the file")
+ in_file = InputMultiObject(File(exists=True), mandatory=True, desc='the object to be saved')
+ metadata = traits.DictStrAny(desc='metadata to be saved alongside the file')
relative_path = InputMultiObject(
- traits.Str, desc="path to the file relative to the base directory"
+ traits.Str, desc='path to the file relative to the base directory'
)
class _SaveDerivativeOutputSpec(TraitedSpec):
- out_file = OutputMultiObject(File, desc="written file path")
- out_meta = OutputMultiObject(File, desc="written JSON sidecar path")
+ out_file = OutputMultiObject(File, desc='written file path')
+ out_meta = OutputMultiObject(File, desc='written JSON sidecar path')
class SaveDerivative(SimpleInterface):
@@ -763,16 +754,18 @@ class SaveDerivative(SimpleInterface):
This ensures that changes to the output directory metadata (e.g., mtime) do not
trigger unnecessary recomputations in the workflow.
"""
+
input_spec = _SaveDerivativeInputSpec
output_spec = _SaveDerivativeOutputSpec
_always_run = True
def _run_interface(self, runtime):
- self._results["out_file"] = []
- self._results["out_meta"] = []
+ self._results['out_file'] = []
+ self._results['out_meta'] = []
for in_file, relative_path in zip(
- self.inputs.in_file, self.inputs.relative_path,
+ self.inputs.in_file,
+ self.inputs.relative_path,
strict=True,
):
out_file = Path(self.inputs.base_directory) / relative_path
@@ -785,49 +778,45 @@ def _run_interface(self, runtime):
sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json"
sidecar.unlink(missing_ok=True)
sidecar.write_text(dumps(self.inputs.metadata, indent=2))
- self._results["out_meta"].append(str(sidecar))
- self._results["out_file"].append(str(out_file))
+ self._results['out_meta'].append(str(sidecar))
+ self._results['out_file'].append(str(out_file))
return runtime
class _DerivativesDataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
- base_directory = traits.Directory(
- desc="Path to the base directory for storing data."
- )
- check_hdr = traits.Bool(True, usedefault=True, desc="fix headers of NIfTI outputs")
+ base_directory = traits.Directory(desc='Path to the base directory for storing data.')
+ check_hdr = traits.Bool(True, usedefault=True, desc='fix headers of NIfTI outputs')
compress = InputMultiObject(
traits.Either(None, traits.Bool),
usedefault=True,
- desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
- "or left unmodified (None, default).",
+ desc='whether ``in_file`` should be compressed (True), uncompressed (False) '
+ 'or left unmodified (None, default).',
)
data_dtype = Str(
- desc="NumPy datatype to coerce NIfTI data to, or `source` to"
- "match the input file dtype"
+ desc='NumPy datatype to coerce NIfTI data to, or `source` to match the input file dtype'
)
dismiss_entities = InputMultiObject(
traits.Either(None, Str),
usedefault=True,
- desc="a list entities that will not be propagated from the source file",
+ desc='a list entities that will not be propagated from the source file',
)
- in_file = InputMultiObject(
- File(exists=True), mandatory=True, desc="the object to be saved"
- )
- meta_dict = traits.DictStrAny(desc="an input dictionary containing metadata")
+ in_file = InputMultiObject(File(exists=True), mandatory=True, desc='the object to be saved')
+ meta_dict = traits.DictStrAny(desc='an input dictionary containing metadata')
source_file = InputMultiObject(
- File(exists=False), mandatory=True, desc="the source file(s) to extract entities from")
+ File(exists=False), mandatory=True, desc='the source file(s) to extract entities from'
+ )
class _DerivativesDataSinkOutputSpec(TraitedSpec):
- out_file = OutputMultiObject(File(exists=True, desc="written file path"))
- out_meta = OutputMultiObject(File(exists=True, desc="written JSON sidecar path"))
+ out_file = OutputMultiObject(File(exists=True, desc='written file path'))
+ out_meta = OutputMultiObject(File(exists=True, desc='written JSON sidecar path'))
compression = OutputMultiObject(
traits.Either(None, traits.Bool),
- desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
- "or left unmodified (None).",
+ desc='whether ``in_file`` should be compressed (True), uncompressed (False) '
+ 'or left unmodified (None).',
)
- fixed_hdr = traits.List(traits.Bool, desc="whether derivative header was fixed")
+ fixed_hdr = traits.List(traits.Bool, desc='whether derivative header was fixed')
class DerivativesDataSink(SimpleInterface):
@@ -1004,9 +993,9 @@ class DerivativesDataSink(SimpleInterface):
input_spec = _DerivativesDataSinkInputSpec
output_spec = _DerivativesDataSinkOutputSpec
- out_path_base = "niworkflows"
+ out_path_base = 'niworkflows'
_always_run = True
- _config_entities = frozenset({e["name"] for e in BIDS_DERIV_ENTITIES})
+ _config_entities = frozenset({e['name'] for e in BIDS_DERIV_ENTITIES})
_config_entities_dict = BIDS_DERIV_ENTITIES
_standard_spaces = STANDARD_SPACES
_file_patterns = BIDS_DERIV_PATTERNS
@@ -1014,9 +1003,7 @@ class DerivativesDataSink(SimpleInterface):
def __init__(self, allowed_entities=None, out_path_base=None, **inputs):
"""Initialize the SimpleInterface and extend inputs with custom entities."""
- self._allowed_entities = set(allowed_entities or []).union(
- set(self._config_entities)
- )
+ self._allowed_entities = set(allowed_entities or []).union(set(self._config_entities))
if out_path_base:
self.out_path_base = out_path_base
@@ -1059,36 +1046,39 @@ def _run_interface(self, runtime):
# Initialize entities with those from the source file.
custom_config = Config(
- name="custom",
+ name='custom',
entities=self._config_entities_dict,
default_path_patterns=self._file_patterns,
)
in_entities = [
parse_file_entities(
str(relative_to_root(source_file)),
- config=["bids", "derivatives", custom_config],
+ config=['bids', 'derivatives', custom_config],
)
for source_file in self.inputs.source_file
]
- out_entities = {k: v for k, v in in_entities[0].items()
- if all(ent.get(k) == v for ent in in_entities[1:])}
+ out_entities = {
+ k: v
+ for k, v in in_entities[0].items()
+ if all(ent.get(k) == v for ent in in_entities[1:])
+ }
for drop_entity in listify(self.inputs.dismiss_entities or []):
out_entities.pop(drop_entity, None)
# Override extension with that of the input file(s)
- out_entities["extension"] = [
+ out_entities['extension'] = [
# _splitext does not accept .surf.gii (for instance)
- "".join(Path(orig_file).suffixes).lstrip(".")
+ ''.join(Path(orig_file).suffixes).lstrip('.')
for orig_file in in_file
]
compress = listify(self.inputs.compress) or [None]
if len(compress) == 1:
compress = compress * len(in_file)
- for i, ext in enumerate(out_entities["extension"]):
+ for i, ext in enumerate(out_entities['extension']):
if compress[i] is not None:
- ext = regz.sub("", ext)
- out_entities["extension"][i] = f"{ext}.gz" if compress[i] else ext
+ ext = regz.sub('', ext)
+ out_entities['extension'][i] = f'{ext}.gz' if compress[i] else ext
# Override entities with those set as inputs
for key in self._allowed_entities:
@@ -1097,56 +1087,55 @@ def _run_interface(self, runtime):
out_entities[key] = value
# Clean up native resolution with space
- if out_entities.get("resolution") == "native" and out_entities.get("space"):
- out_entities.pop("resolution", None)
+ if out_entities.get('resolution') == 'native' and out_entities.get('space'):
+ out_entities.pop('resolution', None)
# Expand templateflow resolutions
- resolution = out_entities.get("resolution")
- space = out_entities.get("space")
+ resolution = out_entities.get('resolution')
+ space = out_entities.get('space')
if resolution:
# Standard spaces
if space in self._standard_spaces:
res = _get_tf_resolution(space, resolution)
else: # TODO: Nonstandard?
- res = "Unknown"
+ res = 'Unknown'
self._metadata['Resolution'] = res
- if len(set(out_entities["extension"])) == 1:
- out_entities["extension"] = out_entities["extension"][0]
+ if len(set(out_entities['extension'])) == 1:
+ out_entities['extension'] = out_entities['extension'][0]
# Insert custom (non-BIDS) entities from allowed_entities.
custom_entities = set(out_entities) - set(self._config_entities)
patterns = self._file_patterns
if custom_entities:
# Example: f"{key}-{{{key}}}" -> "task-{task}"
- custom_pat = "_".join(f"{key}-{{{key}}}" for key in sorted(custom_entities))
+ custom_pat = '_'.join(f'{key}-{{{key}}}' for key in sorted(custom_entities))
patterns = [
- pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix")))
- for pat in patterns
+ pat.replace('_{suffix', '_'.join(('', custom_pat, '{suffix'))) for pat in patterns
]
# Prepare SimpleInterface outputs object
- self._results["out_file"] = []
- self._results["compression"] = []
- self._results["fixed_hdr"] = [False] * len(in_file)
+ self._results['out_file'] = []
+ self._results['compression'] = []
+ self._results['fixed_hdr'] = [False] * len(in_file)
dest_files = build_path(out_entities, path_patterns=patterns)
if not dest_files:
- raise ValueError(f"Could not build path with entities {out_entities}.")
+ raise ValueError(f'Could not build path with entities {out_entities}.')
# Make sure the interpolated values is embedded in a list, and check
dest_files = listify(dest_files)
if len(in_file) != len(dest_files):
raise ValueError(
- f"Input files ({len(in_file)}) not matched "
- f"by interpolated patterns ({len(dest_files)})."
+ f'Input files ({len(in_file)}) not matched '
+ f'by interpolated patterns ({len(dest_files)}).'
)
for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)):
out_file = out_path / dest_file
out_file.parent.mkdir(exist_ok=True, parents=True)
- self._results["out_file"].append(str(out_file))
- self._results["compression"].append(str(dest_file).endswith(".gz"))
+ self._results['out_file'].append(str(out_file))
+ self._results['compression'].append(str(dest_file).endswith('.gz'))
# An odd but possible case is that an input file is in the location of
# the output and we have made no changes to it.
@@ -1176,39 +1165,37 @@ def _run_interface(self, runtime):
if self.inputs.check_hdr:
hdr = nii.header
curr_units = tuple(
- [None if u == "unknown" else u for u in hdr.get_xyzt_units()]
+ [None if u == 'unknown' else u for u in hdr.get_xyzt_units()]
)
- curr_codes = (int(hdr["qform_code"]), int(hdr["sform_code"]))
+ curr_codes = (int(hdr['qform_code']), int(hdr['sform_code']))
# Default to mm, use sec if data type is bold
units = (
- curr_units[0] or "mm",
- "sec" if out_entities["suffix"] == "bold" else None,
+ curr_units[0] or 'mm',
+ 'sec' if out_entities['suffix'] == 'bold' else None,
)
xcodes = (1, 1) # Derivative in its original scanner space
if self.inputs.space:
- xcodes = (
- (4, 4) if self.inputs.space in self._standard_spaces else (2, 2)
- )
+ xcodes = (4, 4) if self.inputs.space in self._standard_spaces else (2, 2)
curr_zooms = zooms = hdr.get_zooms()
- if "RepetitionTime" in self.inputs.get():
+ if 'RepetitionTime' in self.inputs.get():
zooms = curr_zooms[:3] + (self.inputs.RepetitionTime,)
if (curr_codes, curr_units, curr_zooms) != (xcodes, units, zooms):
- self._results["fixed_hdr"][i] = True
+ self._results['fixed_hdr'][i] = True
new_header = hdr.copy()
new_header.set_qform(nii.affine, xcodes[0])
new_header.set_sform(nii.affine, xcodes[1])
new_header.set_xyzt_units(*units)
new_header.set_zooms(zooms)
- if data_dtype == "source": # match source dtype
+ if data_dtype == 'source': # match source dtype
try:
data_dtype = nb.load(self.inputs.source_file[0]).get_data_dtype()
except Exception:
LOGGER.warning(
- f"Could not get data type of file {self.inputs.source_file[0]}"
+ f'Could not get data type of file {self.inputs.source_file[0]}'
)
data_dtype = None
@@ -1217,7 +1204,7 @@ def _run_interface(self, runtime):
orig_dtype = nii.get_data_dtype()
if orig_dtype != data_dtype:
LOGGER.warning(
- f"Changing {out_file} dtype from {orig_dtype} to {data_dtype}"
+ f'Changing {out_file} dtype from {orig_dtype} to {data_dtype}'
)
# coerce dataobj to new data dtype
if np.issubdtype(data_dtype, np.integer):
@@ -1241,33 +1228,27 @@ def _run_interface(self, runtime):
else:
# Without this, we would be writing nans
# This is our punishment for hacking around nibabel defaults
- new_header.set_slope_inter(slope=1., inter=0.)
+ new_header.set_slope_inter(slope=1.0, inter=0.0)
unsafe_write_nifti_header_and_data(
- fname=out_file,
- header=new_header,
- data=new_data
+ fname=out_file, header=new_header, data=new_data
)
del orig_img
- if len(self._results["out_file"]) == 1:
+ if len(self._results['out_file']) == 1:
meta_fields = self.inputs.copyable_trait_names()
self._metadata.update(
- {
- k: getattr(self.inputs, k)
- for k in meta_fields
- if k not in self._static_traits
- }
+ {k: getattr(self.inputs, k) for k in meta_fields if k not in self._static_traits}
)
if self._metadata:
sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json"
unlink(sidecar, missing_ok=True)
sidecar.write_text(dumps(self._metadata, sort_keys=True, indent=2))
- self._results["out_meta"] = str(sidecar)
+ self._results['out_meta'] = str(sidecar)
return runtime
class _ReadSidecarJSONInputSpec(_BIDSBaseInputSpec):
- in_file = File(exists=True, mandatory=True, desc="the input nifti file")
+ in_file = File(exists=True, mandatory=True, desc='the input nifti file')
class _ReadSidecarJSONOutputSpec(_BIDSInfoOutputSpec):
@@ -1342,57 +1323,49 @@ def _run_interface(self, runtime):
self.inputs.in_file,
self.layout,
self.inputs.bids_validate,
- database_path=(
- self.inputs.index_db if isdefined(self.inputs.index_db)
- else None
- )
+ database_path=(self.inputs.index_db if isdefined(self.inputs.index_db) else None),
)
# Fill in BIDS entities of the output ("*_id")
output_keys = list(_BIDSInfoOutputSpec().get().keys())
params = self.layout.parse_file_entities(self.inputs.in_file)
- self._results = {
- key: params.get(key.split("_")[0], Undefined) for key in output_keys
- }
+ self._results = {key: params.get(key.split('_')[0], Undefined) for key in output_keys}
# Fill in metadata
metadata = self.layout.get_metadata(self.inputs.in_file)
- self._results["out_dict"] = metadata
+ self._results['out_dict'] = metadata
# Set dynamic outputs if fields input is present
for fname in self._fields:
if not self._undef_fields and fname not in metadata:
raise KeyError(
- 'Metadata field "%s" not found for file %s'
- % (fname, self.inputs.in_file)
+ 'Metadata field "%s" not found for file %s' % (fname, self.inputs.in_file)
)
self._results[fname] = metadata.get(fname, Undefined)
return runtime
class _BIDSFreeSurferDirInputSpec(BaseInterfaceInputSpec):
- derivatives = Directory(
- exists=True, mandatory=True, desc="BIDS derivatives directory"
- )
+ derivatives = Directory(exists=True, mandatory=True, desc='BIDS derivatives directory')
freesurfer_home = Directory(
- exists=True, mandatory=True, desc="FreeSurfer installation directory"
+ exists=True, mandatory=True, desc='FreeSurfer installation directory'
)
subjects_dir = traits.Either(
traits.Str(),
Directory(),
- default="freesurfer",
+ default='freesurfer',
usedefault=True,
- desc="Name of FreeSurfer subjects directory",
+ desc='Name of FreeSurfer subjects directory',
)
- spaces = traits.List(traits.Str, desc="Set of output spaces to prepare")
+ spaces = traits.List(traits.Str, desc='Set of output spaces to prepare')
overwrite_fsaverage = traits.Bool(
- False, usedefault=True, desc="Overwrite fsaverage directories, if present"
+ False, usedefault=True, desc='Overwrite fsaverage directories, if present'
)
- minimum_fs_version = traits.Enum("7.0.0", desc="Minimum FreeSurfer version for compatibility")
+ minimum_fs_version = traits.Enum('7.0.0', desc='Minimum FreeSurfer version for compatibility')
class _BIDSFreeSurferDirOutputSpec(TraitedSpec):
- subjects_dir = traits.Directory(exists=True, desc="FreeSurfer subjects directory")
+ subjects_dir = traits.Directory(exists=True, desc='FreeSurfer subjects directory')
class BIDSFreeSurferDir(SimpleInterface):
@@ -1425,9 +1398,9 @@ def _run_interface(self, runtime):
if not subjects_dir.is_absolute():
subjects_dir = Path(self.inputs.derivatives) / subjects_dir
subjects_dir.mkdir(parents=True, exist_ok=True)
- self._results["subjects_dir"] = str(subjects_dir)
+ self._results['subjects_dir'] = str(subjects_dir)
- orig_subjects_dir = Path(self.inputs.freesurfer_home) / "subjects"
+ orig_subjects_dir = Path(self.inputs.freesurfer_home) / 'subjects'
# Source is target, so just quit
if subjects_dir == orig_subjects_dir:
@@ -1435,12 +1408,12 @@ def _run_interface(self, runtime):
spaces = list(self.inputs.spaces)
# Always copy fsaverage, for proper recon-all functionality
- if "fsaverage" not in spaces:
- spaces.append("fsaverage")
+ if 'fsaverage' not in spaces:
+ spaces.append('fsaverage')
for space in spaces:
# Skip non-freesurfer spaces and fsnative
- if not space.startswith("fsaverage"):
+ if not space.startswith('fsaverage'):
continue
source = orig_subjects_dir / space
dest = subjects_dir / space
@@ -1455,7 +1428,7 @@ def _run_interface(self, runtime):
if (
space == 'fsaverage'
and dest.exists()
- and self.inputs.minimum_fs_version == "7.0.0"
+ and self.inputs.minimum_fs_version == '7.0.0'
):
label = dest / 'label' / 'rh.FG1.mpm.vpnl.label' # new in FS7
if not label.exists():
@@ -1475,8 +1448,8 @@ def _run_interface(self, runtime):
shutil.copytree(source, dest, copy_function=shutil.copy)
except FileExistsError:
LOGGER.warning(
- "%s exists; if multiple jobs are running in parallel"
- ", this can be safely ignored",
+ '%s exists; if multiple jobs are running in parallel'
+ ', this can be safely ignored',
dest,
)
@@ -1506,11 +1479,11 @@ def _get_tf_resolution(space: str, resolution: str) -> str:
if r in resolutions:
res_meta = resolutions[r]
if res_meta is None:
- return "Unknown"
+ return 'Unknown'
def _fmt_xyz(coords: list) -> str:
- xyz = "x".join([str(c) for c in coords])
- return f"{xyz} mm^3"
+ xyz = 'x'.join([str(c) for c in coords])
+ return f'{xyz} mm^3'
return (
f"Template {space} ({_fmt_xyz(res_meta['zooms'])}),"
diff --git a/niworkflows/interfaces/bold.py b/niworkflows/interfaces/bold.py
index ce373115c5a..a0a494320c3 100644
--- a/niworkflows/interfaces/bold.py
+++ b/niworkflows/interfaces/bold.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Utilities for BOLD fMRI imaging."""
+
import numpy as np
import nibabel as nb
from nipype import logging
@@ -32,34 +33,35 @@
File,
)
-LOGGER = logging.getLogger("nipype.interface")
+LOGGER = logging.getLogger('nipype.interface')
class _NonsteadyStatesDetectorInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="BOLD fMRI timeseries")
- nonnegative = traits.Bool(True, usedefault=True,
- desc="whether image voxels must be nonnegative")
+ in_file = File(exists=True, mandatory=True, desc='BOLD fMRI timeseries')
+ nonnegative = traits.Bool(
+ True, usedefault=True, desc='whether image voxels must be nonnegative'
+ )
n_volumes = traits.Range(
value=40,
low=10,
high=200,
usedefault=True,
- desc="drop volumes in 4D image beyond this timepoint",
+ desc='drop volumes in 4D image beyond this timepoint',
)
zero_dummy_masked = traits.Range(
value=20,
low=2,
high=40,
usedefault=True,
- desc="number of timepoints to average when the number of dummies is zero"
+ desc='number of timepoints to average when the number of dummies is zero',
)
class _NonsteadyStatesDetectorOutputSpec(TraitedSpec):
t_mask = traits.List(
- traits.Bool, desc="list of nonsteady-states (True) and stable (False) volumes"
+ traits.Bool, desc='list of nonsteady-states (True) and stable (False) volumes'
)
- n_dummy = traits.Int(desc="number of volumes identified as nonsteady states")
+ n_dummy = traits.Int(desc='number of volumes identified as nonsteady states')
class NonsteadyStatesDetector(SimpleInterface):
@@ -75,28 +77,28 @@ def _run_interface(self, runtime):
t_mask = np.zeros((ntotal,), dtype=bool)
if ntotal == 1:
- self._results["t_mask"] = [True]
- self._results["n_dummy"] = 1
+ self._results['t_mask'] = [True]
+ self._results['n_dummy'] = 1
return runtime
from nipype.algorithms.confounds import is_outlier
- data = img.get_fdata(dtype="float32")[..., :self.inputs.n_volumes]
+ data = img.get_fdata(dtype='float32')[..., : self.inputs.n_volumes]
# Data can come with outliers showing very high numbers - preemptively prune
data = np.clip(
data,
a_min=0.0 if self.inputs.nonnegative else np.percentile(data, 0.2),
a_max=np.percentile(data, 99.8),
)
- self._results["n_dummy"] = is_outlier(np.mean(data, axis=(0, 1, 2)))
+ self._results['n_dummy'] = is_outlier(np.mean(data, axis=(0, 1, 2)))
start = 0
- stop = self._results["n_dummy"]
+ stop = self._results['n_dummy']
if stop < 2:
stop = min(ntotal, self.inputs.n_volumes)
start = max(0, stop - self.inputs.zero_dummy_masked)
t_mask[start:stop] = True
- self._results["t_mask"] = t_mask.tolist()
+ self._results['t_mask'] = t_mask.tolist()
return runtime
diff --git a/niworkflows/interfaces/cifti.py b/niworkflows/interfaces/cifti.py
index f7928029ccf..95ffd738f3a 100644
--- a/niworkflows/interfaces/cifti.py
+++ b/niworkflows/interfaces/cifti.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Handling connectivity: combines FreeSurfer surfaces with subcortical volumes."""
+
from pathlib import Path
import json
import typing
@@ -45,70 +46,68 @@
CIFTI_STRUCT_WITH_LABELS = { # CITFI structures with corresponding labels
# SURFACES
- "CIFTI_STRUCTURE_CORTEX_LEFT": None,
- "CIFTI_STRUCTURE_CORTEX_RIGHT": None,
+ 'CIFTI_STRUCTURE_CORTEX_LEFT': None,
+ 'CIFTI_STRUCTURE_CORTEX_RIGHT': None,
# SUBCORTICAL
- "CIFTI_STRUCTURE_ACCUMBENS_LEFT": (26,),
- "CIFTI_STRUCTURE_ACCUMBENS_RIGHT": (58,),
- "CIFTI_STRUCTURE_AMYGDALA_LEFT": (18,),
- "CIFTI_STRUCTURE_AMYGDALA_RIGHT": (54,),
- "CIFTI_STRUCTURE_BRAIN_STEM": (16,),
- "CIFTI_STRUCTURE_CAUDATE_LEFT": (11,),
- "CIFTI_STRUCTURE_CAUDATE_RIGHT": (50,),
- "CIFTI_STRUCTURE_CEREBELLUM_LEFT": (8,), # HCP MNI152
- "CIFTI_STRUCTURE_CEREBELLUM_RIGHT": (47,), # HCP MNI152
- "CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT": (28,),
- "CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT": (60,),
- "CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT": (17,),
- "CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT": (53,),
- "CIFTI_STRUCTURE_PALLIDUM_LEFT": (13,),
- "CIFTI_STRUCTURE_PALLIDUM_RIGHT": (52,),
- "CIFTI_STRUCTURE_PUTAMEN_LEFT": (12,),
- "CIFTI_STRUCTURE_PUTAMEN_RIGHT": (51,),
- "CIFTI_STRUCTURE_THALAMUS_LEFT": (10,),
- "CIFTI_STRUCTURE_THALAMUS_RIGHT": (49,),
+ 'CIFTI_STRUCTURE_ACCUMBENS_LEFT': (26,),
+ 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT': (58,),
+ 'CIFTI_STRUCTURE_AMYGDALA_LEFT': (18,),
+ 'CIFTI_STRUCTURE_AMYGDALA_RIGHT': (54,),
+ 'CIFTI_STRUCTURE_BRAIN_STEM': (16,),
+ 'CIFTI_STRUCTURE_CAUDATE_LEFT': (11,),
+ 'CIFTI_STRUCTURE_CAUDATE_RIGHT': (50,),
+ 'CIFTI_STRUCTURE_CEREBELLUM_LEFT': (8,), # HCP MNI152
+ 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT': (47,), # HCP MNI152
+ 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT': (28,),
+ 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT': (60,),
+ 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT': (17,),
+ 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT': (53,),
+ 'CIFTI_STRUCTURE_PALLIDUM_LEFT': (13,),
+ 'CIFTI_STRUCTURE_PALLIDUM_RIGHT': (52,),
+ 'CIFTI_STRUCTURE_PUTAMEN_LEFT': (12,),
+ 'CIFTI_STRUCTURE_PUTAMEN_RIGHT': (51,),
+ 'CIFTI_STRUCTURE_THALAMUS_LEFT': (10,),
+ 'CIFTI_STRUCTURE_THALAMUS_RIGHT': (49,),
}
class _GenerateCiftiInputSpec(BaseInterfaceInputSpec):
- bold_file = File(mandatory=True, exists=True, desc="input BOLD file")
+ bold_file = File(mandatory=True, exists=True, desc='input BOLD file')
volume_target = traits.Enum(
- "MNI152NLin6Asym",
+ 'MNI152NLin6Asym',
usedefault=True,
- desc="CIFTI volumetric output space",
+ desc='CIFTI volumetric output space',
)
surface_target = traits.Enum(
- "fsLR",
+ 'fsLR',
usedefault=True,
- desc="CIFTI surface target space",
- )
- grayordinates = traits.Enum(
- "91k", "170k", usedefault=True, desc="Final CIFTI grayordinates"
+ desc='CIFTI surface target space',
)
- TR = traits.Float(mandatory=True, desc="Repetition time")
+ grayordinates = traits.Enum('91k', '170k', usedefault=True, desc='Final CIFTI grayordinates')
+ TR = traits.Float(mandatory=True, desc='Repetition time')
surface_bolds = traits.List(
File(exists=True),
mandatory=True,
- desc="list of surface BOLD GIFTI files (length 2 with order [L,R])",
+ desc='list of surface BOLD GIFTI files (length 2 with order [L,R])',
)
class _GenerateCiftiOutputSpec(TraitedSpec):
- out_file = File(desc="generated CIFTI file")
- out_metadata = File(desc="CIFTI metadata JSON")
+ out_file = File(desc='generated CIFTI file')
+ out_metadata = File(desc='CIFTI metadata JSON')
class GenerateCifti(SimpleInterface):
"""
Generate a HCP-style CIFTI image from BOLD file in target spaces.
"""
+
input_spec = _GenerateCiftiInputSpec
output_spec = _GenerateCiftiOutputSpec
def _run_interface(self, runtime):
-
surface_labels, volume_labels, metadata = _prepare_cifti(self.inputs.grayordinates)
- self._results["out_file"] = _create_cifti_image(
+ self._results['out_file'] = _create_cifti_image(
self.inputs.bold_file,
volume_labels,
self.inputs.surface_bolds,
@@ -116,22 +115,22 @@ def _run_interface(self, runtime):
self.inputs.TR,
metadata,
)
- metadata_file = Path("bold.dtseries.json").absolute()
+ metadata_file = Path('bold.dtseries.json').absolute()
metadata_file.write_text(json.dumps(metadata, indent=2))
- self._results["out_metadata"] = str(metadata_file)
+ self._results['out_metadata'] = str(metadata_file)
return runtime
class _CiftiNameSourceInputSpec(BaseInterfaceInputSpec):
space = traits.Str(
mandatory=True,
- desc="the space identifier",
+ desc='the space identifier',
)
- density = traits.Str(desc="density label")
+ density = traits.Str(desc='density label')
class _CiftiNameSourceOutputSpec(TraitedSpec):
- out_name = traits.Str(desc="(partial) filename formatted according to template")
+ out_name = traits.Str(desc='(partial) filename formatted according to template')
class CiftiNameSource(SimpleInterface):
@@ -162,7 +161,7 @@ def _run_interface(self, runtime):
entities.append(('den', self.inputs.density))
out_name = '_'.join([f'{k}-{v}' for k, v in entities] + ['bold.dtseries'])
- self._results["out_name"] = out_name
+ self._results['out_name'] = out_name
return runtime
@@ -199,21 +198,11 @@ def _prepare_cifti(grayordinates: str) -> typing.Tuple[list, str, dict]:
"""
grayord_key = {
- "91k": {
- "surface-den": "32k",
- "tf-res": "02",
- "grayords": "91,282",
- "res-mm": "2mm"
- },
- "170k": {
- "surface-den": "59k",
- "tf-res": "06",
- "grayords": "170,494",
- "res-mm": "1.6mm"
- }
+ '91k': {'surface-den': '32k', 'tf-res': '02', 'grayords': '91,282', 'res-mm': '2mm'},
+ '170k': {'surface-den': '59k', 'tf-res': '06', 'grayords': '170,494', 'res-mm': '1.6mm'},
}
if grayordinates not in grayord_key:
- raise NotImplementedError("Grayordinates {grayordinates} is not supported.")
+ raise NotImplementedError('Grayordinates {grayordinates} is not supported.')
tf_vol_res = grayord_key[grayordinates]['tf-res']
total_grayords = grayord_key[grayordinates]['grayords']
@@ -223,41 +212,37 @@ def _prepare_cifti(grayordinates: str) -> typing.Tuple[list, str, dict]:
surface_labels = [
str(
tf.get(
- "fsLR",
+ 'fsLR',
density=surface_density,
hemi=hemi,
- desc="nomedialwall",
- suffix="dparc",
+ desc='nomedialwall',
+ suffix='dparc',
raise_empty=True,
)
)
- for hemi in ("L", "R")
+ for hemi in ('L', 'R')
]
volume_label = str(
tf.get(
- "MNI152NLin6Asym",
- suffix="dseg",
- atlas="HCP",
- resolution=tf_vol_res,
- raise_empty=True
+ 'MNI152NLin6Asym', suffix='dseg', atlas='HCP', resolution=tf_vol_res, raise_empty=True
)
)
- tf_url = "https://templateflow.s3.amazonaws.com"
- volume_url = f"{tf_url}/tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-{tf_vol_res}_T1w.nii.gz"
+ tf_url = 'https://templateflow.s3.amazonaws.com'
+ volume_url = f'{tf_url}/tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-{tf_vol_res}_T1w.nii.gz'
surfaces_url = ( # midthickness is the default, but varying levels of inflation are all valid
- f"{tf_url}/tpl-fsLR/tpl-fsLR_den-{surface_density}_hemi-%s_midthickness.surf.gii"
+ f'{tf_url}/tpl-fsLR/tpl-fsLR_den-{surface_density}_hemi-%s_midthickness.surf.gii'
)
metadata = {
- "Density": (
- f"{total_grayords} grayordinates corresponding to all of the grey matter sampled at a "
- f"{res_mm} average vertex spacing on the surface and as {res_mm} voxels subcortically"
+ 'Density': (
+ f'{total_grayords} grayordinates corresponding to all of the grey matter sampled at a '
+ f'{res_mm} average vertex spacing on the surface and as {res_mm} voxels subcortically'
),
- "SpatialReference": {
- "VolumeReference": volume_url,
- "CIFTI_STRUCTURE_LEFT_CORTEX": surfaces_url % "L",
- "CIFTI_STRUCTURE_RIGHT_CORTEX": surfaces_url % "R",
- }
+ 'SpatialReference': {
+ 'VolumeReference': volume_url,
+ 'CIFTI_STRUCTURE_LEFT_CORTEX': surfaces_url % 'L',
+ 'CIFTI_STRUCTURE_RIGHT_CORTEX': surfaces_url % 'R',
+ },
}
return surface_labels, volume_label, metadata
@@ -296,31 +281,31 @@ def _create_cifti_image(
bold_img = nb.load(bold_file)
label_img = nb.load(volume_label)
if label_img.shape != bold_img.shape[:3]:
- warnings.warn("Resampling bold volume to match label dimensions")
+ warnings.warn('Resampling bold volume to match label dimensions')
bold_img = resample_to_img(bold_img, label_img)
# ensure images match HCP orientation (LAS)
- bold_img = reorient_image(bold_img, target_ornt="LAS")
- label_img = reorient_image(label_img, target_ornt="LAS")
+ bold_img = reorient_image(bold_img, target_ornt='LAS')
+ label_img = reorient_image(label_img, target_ornt='LAS')
- bold_data = bold_img.get_fdata(dtype="float32")
+ bold_data = bold_img.get_fdata(dtype='float32')
timepoints = bold_img.shape[3]
- label_data = np.asanyarray(label_img.dataobj).astype("int16")
+ label_data = np.asanyarray(label_img.dataobj).astype('int16')
# Create brain models
idx_offset = 0
brainmodels = []
- bm_ts = np.empty((timepoints, 0), dtype="float32")
+ bm_ts = np.empty((timepoints, 0), dtype='float32')
for structure, labels in CIFTI_STRUCT_WITH_LABELS.items():
if labels is None: # surface model
- model_type = "CIFTI_MODEL_TYPE_SURFACE"
+ model_type = 'CIFTI_MODEL_TYPE_SURFACE'
# use the corresponding annotation
- hemi = structure.split("_")[-1]
+ hemi = structure.split('_')[-1]
# currently only supports L/R cortex
- surf_ts = nb.load(bold_surfs[hemi == "RIGHT"])
+ surf_ts = nb.load(bold_surfs[hemi == 'RIGHT'])
surf_verts = len(surf_ts.darrays[0].data)
- labels = nb.load(surface_labels[hemi == "RIGHT"])
+ labels = nb.load(surface_labels[hemi == 'RIGHT'])
medial = np.nonzero(labels.darrays[0].data)[0]
# extract values across volumes
ts = np.array([tsarr.data[medial] for tsarr in surf_ts.darrays])
@@ -337,7 +322,7 @@ def _create_cifti_image(
idx_offset += len(vert_idx)
bm_ts = np.column_stack((bm_ts, ts))
else:
- model_type = "CIFTI_MODEL_TYPE_VOXELS"
+ model_type = 'CIFTI_MODEL_TYPE_VOXELS'
vox = []
ts = []
for label in labels:
@@ -374,21 +359,21 @@ def _create_cifti_image(
# generate Matrix information
series_map = ci.Cifti2MatrixIndicesMap(
(0,),
- "CIFTI_INDEX_TYPE_SERIES",
+ 'CIFTI_INDEX_TYPE_SERIES',
number_of_series_points=timepoints,
series_exponent=0,
series_start=0.0,
series_step=tr,
- series_unit="SECOND",
+ series_unit='SECOND',
)
geometry_map = ci.Cifti2MatrixIndicesMap(
- (1,), "CIFTI_INDEX_TYPE_BRAIN_MODELS", maps=brainmodels
+ (1,), 'CIFTI_INDEX_TYPE_BRAIN_MODELS', maps=brainmodels
)
# provide some metadata to CIFTI matrix
if not metadata:
metadata = {
- "surface": "fsLR",
- "volume": "MNI152NLin6Asym",
+ 'surface': 'fsLR',
+ 'volume': 'MNI152NLin6Asym',
}
# generate and save CIFTI image
matrix = ci.Cifti2Matrix()
@@ -398,8 +383,8 @@ def _create_cifti_image(
hdr = ci.Cifti2Header(matrix)
img = ci.Cifti2Image(dataobj=bm_ts, header=hdr)
img.set_data_dtype(bold_img.get_data_dtype())
- img.nifti_header.set_intent("NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES")
+ img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES')
- out_file = "{}.dtseries.nii".format(split_filename(bold_file)[1])
+ out_file = '{}.dtseries.nii'.format(split_filename(bold_file)[1])
ci.save(img, out_file)
return Path.cwd() / out_file
diff --git a/niworkflows/interfaces/confounds.py b/niworkflows/interfaces/confounds.py
index 0a7e7c1ac7b..32ab1529d76 100644
--- a/niworkflows/interfaces/confounds.py
+++ b/niworkflows/interfaces/confounds.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Select terms for a confound model, and compute any requisite expansions."""
+
import os
import re
import numpy as np
@@ -41,14 +42,12 @@
class _NormalizeMotionParamsInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="the input parameters file")
- format = traits.Enum(
- "FSL", "AFNI", "FSFAST", "NIPY", usedefault=True, desc="output format"
- )
+ in_file = File(exists=True, mandatory=True, desc='the input parameters file')
+ format = traits.Enum('FSL', 'AFNI', 'FSFAST', 'NIPY', usedefault=True, desc='output format')
class _NormalizeMotionParamsOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="written file path")
+ out_file = File(exists=True, desc='written file path')
class NormalizeMotionParams(SimpleInterface):
@@ -62,8 +61,8 @@ def _run_interface(self, runtime):
mpars = np.apply_along_axis(
func1d=normalize_mc_params, axis=1, arr=mpars, source=self.inputs.format
)
- self._results["out_file"] = os.path.join(runtime.cwd, "motion_params.txt")
- np.savetxt(self._results["out_file"], mpars)
+ self._results['out_file'] = os.path.join(runtime.cwd, 'motion_params.txt')
+ np.savetxt(self._results['out_file'], mpars)
return runtime
@@ -71,11 +70,11 @@ class _ExpandModelInputSpec(BaseInterfaceInputSpec):
confounds_file = File(
exists=True,
mandatory=True,
- desc="TSV containing confound time series for "
- "expansion according to the specified formula.",
+ desc='TSV containing confound time series for '
+ 'expansion according to the specified formula.',
)
model_formula = traits.Str(
- "(dd1(rps + wm + csf + gsr))^^2 + others",
+ '(dd1(rps + wm + csf + gsr))^^2 + others',
usedefault=True,
desc="""\
Formula for generating model expansions. By default, the
@@ -104,11 +103,11 @@ class _ExpandModelInputSpec(BaseInterfaceInputSpec):
""",
)
- output_file = File(desc="Output path")
+ output_file = File(desc='Output path')
class _ExpandModelOutputSpec(TraitedSpec):
- confounds_file = File(exists=True, desc="Output confounds file")
+ confounds_file = File(exists=True, desc='Output confounds file')
class ExpandModel(SimpleInterface):
@@ -123,19 +122,19 @@ def _run_interface(self, runtime):
else:
out_file = fname_presuffix(
self.inputs.confounds_file,
- suffix="_expansion.tsv",
+ suffix='_expansion.tsv',
newpath=runtime.cwd,
use_ext=False,
)
- confounds_data = pd.read_csv(self.inputs.confounds_file, sep="\t")
+ confounds_data = pd.read_csv(self.inputs.confounds_file, sep='\t')
_, confounds_data = parse_formula(
model_formula=self.inputs.model_formula,
parent_data=confounds_data,
unscramble=True,
)
- confounds_data.to_csv(out_file, sep="\t", index=False, na_rep="n/a")
- self._results["confounds_file"] = out_file
+ confounds_data.to_csv(out_file, sep='\t', index=False, na_rep='n/a')
+ self._results['confounds_file'] = out_file
return runtime
@@ -143,52 +142,50 @@ class _SpikeRegressorsInputSpec(BaseInterfaceInputSpec):
confounds_file = File(
exists=True,
mandatory=True,
- desc="TSV containing criterion time series (e.g., framewise "
- "displacement, DVARS) to be used for creating spike regressors.",
+ desc='TSV containing criterion time series (e.g., framewise '
+ 'displacement, DVARS) to be used for creating spike regressors.',
)
fd_thresh = traits.Float(
0.5,
usedefault=True,
- desc="Minimum framewise displacement threshold for flagging a frame "
- "as a spike.",
+ desc='Minimum framewise displacement threshold for flagging a frame as a spike.',
)
dvars_thresh = traits.Float(
1.5,
usedefault=True,
- desc="Minimum standardised DVARS threshold for flagging a frame as a spike.",
+ desc='Minimum standardised DVARS threshold for flagging a frame as a spike.',
)
header_prefix = traits.Str(
- "motion_outlier",
+ 'motion_outlier',
usedefault=True,
- desc="Prefix for spikes in the output TSV header",
+ desc='Prefix for spikes in the output TSV header',
)
lags = traits.List(
traits.Int,
value=[0],
usedefault=True,
- desc="Relative indices of lagging frames to flag for each flagged frame",
+ desc='Relative indices of lagging frames to flag for each flagged frame',
)
minimum_contiguous = traits.Either(
None,
traits.Int,
usedefault=True,
- desc="Minimum number of contiguous volumes required to avoid "
- "flagging as a spike",
+ desc='Minimum number of contiguous volumes required to avoid flagging as a spike',
)
concatenate = traits.Bool(
True,
usedefault=True,
- desc="Indicates whether to concatenate spikes to existing confounds "
- "or return spikes only",
+ desc='Indicates whether to concatenate spikes to existing confounds '
+ 'or return spikes only',
)
output_format = traits.Enum(
- "spikes", "mask", usedefault=True, desc="Format of output (spikes or mask)"
+ 'spikes', 'mask', usedefault=True, desc='Format of output (spikes or mask)'
)
- output_file = File(desc="Output path")
+ output_file = File(desc='Output path')
class _SpikeRegressorsOutputSpec(TraitedSpec):
- confounds_file = File(exists=True, desc="Output confounds file")
+ confounds_file = File(exists=True, desc='Output confounds file')
class SpikeRegressors(SimpleInterface):
@@ -203,17 +200,17 @@ def _run_interface(self, runtime):
else:
out_file = fname_presuffix(
self.inputs.confounds_file,
- suffix="_desc-motion_outliers.tsv",
+ suffix='_desc-motion_outliers.tsv',
newpath=runtime.cwd,
use_ext=False,
)
spike_criteria = {
- "framewise_displacement": (">", self.inputs.fd_thresh),
- "std_dvars": (">", self.inputs.dvars_thresh),
+ 'framewise_displacement': ('>', self.inputs.fd_thresh),
+ 'std_dvars': ('>', self.inputs.dvars_thresh),
}
- confounds_data = pd.read_csv(self.inputs.confounds_file, sep="\t")
+ confounds_data = pd.read_csv(self.inputs.confounds_file, sep='\t')
confounds_data = spike_regressors(
data=confounds_data,
criteria=spike_criteria,
@@ -223,19 +220,19 @@ def _run_interface(self, runtime):
concatenate=self.inputs.concatenate,
output=self.inputs.output_format,
)
- confounds_data.to_csv(out_file, sep="\t", index=False, na_rep="n/a")
- self._results["confounds_file"] = out_file
+ confounds_data.to_csv(out_file, sep='\t', index=False, na_rep='n/a')
+ self._results['confounds_file'] = out_file
return runtime
def spike_regressors(
data,
criteria=None,
- header_prefix="motion_outlier",
+ header_prefix='motion_outlier',
lags=None,
minimum_contiguous=None,
concatenate=True,
- output="spikes",
+ output='spikes',
):
"""
Add spike regressors to a confound/nuisance matrix.
@@ -288,13 +285,13 @@ def spike_regressors(
indices = range(data.shape[0])
lags = lags or [0]
criteria = criteria or {
- "framewise_displacement": (">", 0.5),
- "std_dvars": (">", 1.5),
+ 'framewise_displacement': ('>', 0.5),
+ 'std_dvars': ('>', 1.5),
}
for metric, (criterion, threshold) in criteria.items():
- if criterion == "<":
+ if criterion == '<':
mask[metric] = set(np.where(data[metric] < threshold)[0])
- elif criterion == ">":
+ elif criterion == '>':
mask[metric] = set(np.where(data[metric] > threshold)[0])
mask = reduce(operator.or_, mask.values())
@@ -311,7 +308,7 @@ def spike_regressors(
mask = mask | set(range(end - length, end))
mask = mask.intersection(indices)
- if output == "mask":
+ if output == 'mask':
spikes = np.zeros(data.shape[0])
spikes[list(mask)] = 1
spikes = pd.DataFrame(data=spikes, columns=[header_prefix])
@@ -319,7 +316,7 @@ def spike_regressors(
spikes = np.zeros((max(indices) + 1, len(mask)))
for i, m in enumerate(sorted(mask)):
spikes[m, i] = 1
- header = ["{:s}{:02d}".format(header_prefix, vol) for vol in range(len(mask))]
+ header = ['{:s}{:02d}'.format(header_prefix, vol) for vol in range(len(mask))]
spikes = pd.DataFrame(data=spikes, columns=header)
if concatenate:
return pd.concat((data, spikes), axis=1)
@@ -360,7 +357,7 @@ def temporal_derivatives(order, variables, data):
variables_deriv[0] = variables
order = set(order) - set([0])
for o in order:
- variables_deriv[o] = ["{}_derivative{}".format(v, o) for v in variables]
+ variables_deriv[o] = ['{}_derivative{}'.format(v, o) for v in variables]
data_deriv[o] = np.tile(np.nan, data[variables].shape)
data_deriv[o][o:, :] = np.diff(data[variables], n=o, axis=0)
variables_deriv = reduce(operator.add, variables_deriv.values())
@@ -403,7 +400,7 @@ def exponential_terms(order, variables, data):
variables_exp[1] = variables
order = set(order) - set([1])
for o in order:
- variables_exp[o] = ["{}_power{}".format(v, o) for v in variables]
+ variables_exp[o] = ['{}_power{}'.format(v, o) for v in variables]
data_exp[o] = data[variables] ** o
variables_exp = reduce(operator.add, variables_exp.values())
data_exp = pd.DataFrame(
@@ -416,7 +413,7 @@ def _order_as_range(order):
"""Convert a hyphenated string representing order for derivative or
exponential terms into a range object that can be passed as input to the
appropriate expansion function."""
- order = order.split("-")
+ order = order.split('-')
order = [int(o) for o in order]
if len(order) > 1:
order = range(order[0], (order[-1] + 1))
@@ -427,12 +424,12 @@ def _check_and_expand_exponential(expr, variables, data):
"""Check if the current operation specifies exponential expansion. ^^6
specifies all powers up to the 6th, ^5-6 the 5th and 6th powers, ^6 the
6th only."""
- if re.search(r"\^\^[0-9]+$", expr):
- order = re.compile(r"\^\^([0-9]+)$").findall(expr)
+ if re.search(r'\^\^[0-9]+$', expr):
+ order = re.compile(r'\^\^([0-9]+)$').findall(expr)
order = range(1, int(*order) + 1)
variables, data = exponential_terms(order, variables, data)
- elif re.search(r"\^[0-9]+[\-]?[0-9]*$", expr):
- order = re.compile(r"\^([0-9]+[\-]?[0-9]*)").findall(expr)
+ elif re.search(r'\^[0-9]+[\-]?[0-9]*$', expr):
+ order = re.compile(r'\^([0-9]+[\-]?[0-9]*)').findall(expr)
order = _order_as_range(*order)
variables, data = exponential_terms(order, variables, data)
return variables, data
@@ -442,12 +439,12 @@ def _check_and_expand_derivative(expr, variables, data):
"""Check if the current operation specifies a temporal derivative. dd6x
specifies all derivatives up to the 6th, d5-6x the 5th and 6th, d6x the
6th only."""
- if re.search(r"^dd[0-9]+", expr):
- order = re.compile(r"^dd([0-9]+)").findall(expr)
+ if re.search(r'^dd[0-9]+', expr):
+ order = re.compile(r'^dd([0-9]+)').findall(expr)
order = range(0, int(*order) + 1)
(variables, data) = temporal_derivatives(order, variables, data)
- elif re.search(r"^d[0-9]+[\-]?[0-9]*", expr):
- order = re.compile(r"^d([0-9]+[\-]?[0-9]*)").findall(expr)
+ elif re.search(r'^d[0-9]+[\-]?[0-9]*', expr):
+ order = re.compile(r'^d([0-9]+[\-]?[0-9]*)').findall(expr)
order = _order_as_range(*order)
(variables, data) = temporal_derivatives(order, variables, data)
return variables, data
@@ -458,11 +455,11 @@ def _check_and_expand_subformula(expression, parent_data, variables, data):
where appropriate."""
grouping_depth = 0
for i, char in enumerate(expression):
- if char == "(":
+ if char == '(':
if grouping_depth == 0:
formula_delimiter = i + 1
grouping_depth += 1
- elif char == ")":
+ elif char == ')':
grouping_depth -= 1
if grouping_depth == 0:
expr = expression[formula_delimiter:i].strip()
@@ -492,9 +489,7 @@ def parse_expression(expression, parent_data):
"""
variables = None
data = None
- variables, data = _check_and_expand_subformula(
- expression, parent_data, variables, data
- )
+ variables, data = _check_and_expand_subformula(expression, parent_data, variables, data)
variables, data = _check_and_expand_exponential(expression, variables, data)
variables, data = _check_and_expand_derivative(expression, variables, data)
if variables is None:
@@ -506,53 +501,53 @@ def parse_expression(expression, parent_data):
def _get_matches_from_data(regex, variables):
matches = re.compile(regex)
- matches = " + ".join([v for v in variables if matches.match(v)])
+ matches = ' + '.join([v for v in variables if matches.match(v)])
return matches
def _get_variables_from_formula(model_formula):
symbols_to_clear = [
- " ",
- r"\(",
- r"\)",
- "dd[0-9]+",
- r"d[0-9]+[\-]?[0-9]*",
- r"\^\^[0-9]+",
- r"\^[0-9]+[\-]?[0-9]*",
+ ' ',
+ r'\(',
+ r'\)',
+ 'dd[0-9]+',
+ r'd[0-9]+[\-]?[0-9]*',
+ r'\^\^[0-9]+',
+ r'\^[0-9]+[\-]?[0-9]*',
]
for symbol in symbols_to_clear:
- model_formula = re.sub(symbol, "", model_formula)
- variables = model_formula.split("+")
+ model_formula = re.sub(symbol, '', model_formula)
+ variables = model_formula.split('+')
return variables
def _expand_shorthand(model_formula, variables):
"""Expand shorthand terms in the model formula."""
- wm = "white_matter"
- gsr = "global_signal"
- rps = "trans_x + trans_y + trans_z + rot_x + rot_y + rot_z"
- fd = "framewise_displacement"
- acc = _get_matches_from_data("a_comp_cor_[0-9]+", variables)
- tcc = _get_matches_from_data("t_comp_cor_[0-9]+", variables)
- dv = _get_matches_from_data("^std_dvars$", variables)
- dvall = _get_matches_from_data(".*dvars", variables)
- nss = _get_matches_from_data("non_steady_state_outlier[0-9]+", variables)
- spikes = _get_matches_from_data("motion_outlier[0-9]+", variables)
-
- model_formula = re.sub("wm", wm, model_formula)
- model_formula = re.sub("gsr", gsr, model_formula)
- model_formula = re.sub("rps", rps, model_formula)
- model_formula = re.sub("fd", fd, model_formula)
- model_formula = re.sub("acc", acc, model_formula)
- model_formula = re.sub("tcc", tcc, model_formula)
- model_formula = re.sub("dv", dv, model_formula)
- model_formula = re.sub("dvall", dvall, model_formula)
- model_formula = re.sub("nss", nss, model_formula)
- model_formula = re.sub("spikes", spikes, model_formula)
+ wm = 'white_matter'
+ gsr = 'global_signal'
+ rps = 'trans_x + trans_y + trans_z + rot_x + rot_y + rot_z'
+ fd = 'framewise_displacement'
+ acc = _get_matches_from_data('a_comp_cor_[0-9]+', variables)
+ tcc = _get_matches_from_data('t_comp_cor_[0-9]+', variables)
+ dv = _get_matches_from_data('^std_dvars$', variables)
+ dvall = _get_matches_from_data('.*dvars', variables)
+ nss = _get_matches_from_data('non_steady_state_outlier[0-9]+', variables)
+ spikes = _get_matches_from_data('motion_outlier[0-9]+', variables)
+
+ model_formula = re.sub('wm', wm, model_formula)
+ model_formula = re.sub('gsr', gsr, model_formula)
+ model_formula = re.sub('rps', rps, model_formula)
+ model_formula = re.sub('fd', fd, model_formula)
+ model_formula = re.sub('acc', acc, model_formula)
+ model_formula = re.sub('tcc', tcc, model_formula)
+ model_formula = re.sub('dv', dv, model_formula)
+ model_formula = re.sub('dvall', dvall, model_formula)
+ model_formula = re.sub('nss', nss, model_formula)
+ model_formula = re.sub('spikes', spikes, model_formula)
formula_variables = _get_variables_from_formula(model_formula)
- others = " + ".join(set(variables) - set(formula_variables))
- model_formula = re.sub("others", others, model_formula)
+ others = ' + '.join(set(variables) - set(formula_variables))
+ model_formula = re.sub('others', others, model_formula)
return model_formula
@@ -561,12 +556,12 @@ def _unscramble_regressor_columns(parent_data, data):
the same order as the input data with any expansion columns inserted
immediately after the originals.
"""
- matches = ["_power[0-9]+", "_derivative[0-9]+"]
+ matches = ['_power[0-9]+', '_derivative[0-9]+']
var = OrderedDict((c, deque()) for c in parent_data.columns)
for c in data.columns:
col = c
for m in matches:
- col = re.sub(m, "", col)
+ col = re.sub(m, '', col)
if col == c:
var[col].appendleft(c)
else:
@@ -629,11 +624,11 @@ def parse_formula(model_formula, parent_data, unscramble=False):
grouping_depth = 0
model_formula = _expand_shorthand(model_formula, parent_data.columns)
for i, char in enumerate(model_formula):
- if char == "(":
+ if char == '(':
grouping_depth += 1
- elif char == ")":
+ elif char == ')':
grouping_depth -= 1
- elif grouping_depth == 0 and char == "+":
+ elif grouping_depth == 0 and char == '+':
expression = model_formula[expr_delimiter:i].strip()
variables[expression] = None
data[expression] = None
@@ -642,14 +637,12 @@ def parse_formula(model_formula, parent_data, unscramble=False):
variables[expression] = None
data[expression] = None
for expression in list(variables):
- if expression[0] == "(" and expression[-1] == ")":
+ if expression[0] == '(' and expression[-1] == ')':
(variables[expression], data[expression]) = parse_formula(
expression[1:-1], parent_data
)
else:
- (variables[expression], data[expression]) = parse_expression(
- expression, parent_data
- )
+ (variables[expression], data[expression]) = parse_expression(expression, parent_data)
variables = list(set(reduce(operator.add, variables.values())))
data = pd.concat((data.values()), axis=1)
diff --git a/niworkflows/interfaces/conftest.py b/niworkflows/interfaces/conftest.py
index 9b42b4002a4..a8511d04bb2 100644
--- a/niworkflows/interfaces/conftest.py
+++ b/niworkflows/interfaces/conftest.py
@@ -19,17 +19,17 @@ def _chdir(path):
os.chdir(cwd)
-@pytest.fixture(scope="module")
+@pytest.fixture(scope='module')
def data_dir():
- return Path(__file__).parent / "tests" / "data"
+ return Path(__file__).parent / 'tests' / 'data'
@pytest.fixture(autouse=True)
def _docdir(request, tmp_path):
# Trigger ONLY for the doctests.
- doctest_plugin = request.config.pluginmanager.getplugin("doctest")
+ doctest_plugin = request.config.pluginmanager.getplugin('doctest')
if isinstance(request.node, doctest_plugin.DoctestItem):
- copytree(Path(__file__).parent / "tests" / "data", tmp_path, dirs_exist_ok=True)
+ copytree(Path(__file__).parent / 'tests' / 'data', tmp_path, dirs_exist_ok=True)
# Chdir only for the duration of the test.
with _chdir(tmp_path):
diff --git a/niworkflows/interfaces/fixes.py b/niworkflows/interfaces/fixes.py
index f9aef937281..f6cc26b802a 100644
--- a/niworkflows/interfaces/fixes.py
+++ b/niworkflows/interfaces/fixes.py
@@ -43,10 +43,10 @@
class _FixTraitApplyTransformsInputSpec(ApplyTransformsInputSpec):
transforms = InputMultiObject(
traits.Either(File(exists=True), 'identity'),
- argstr="%s",
+ argstr='%s',
mandatory=True,
- desc="transform files: will be applied in reverse order. For "
- "example, the last specified transform will be applied first.",
+ desc='transform files: will be applied in reverse order. For '
+ 'example, the last specified transform will be applied first.',
)
@@ -61,14 +61,12 @@ class FixHeaderApplyTransforms(ApplyTransforms):
def _run_interface(self, runtime, correct_return_codes=(0,)):
# Run normally
- runtime = super()._run_interface(
- runtime, correct_return_codes
- )
+ runtime = super()._run_interface(runtime, correct_return_codes)
_copyxform(
self.inputs.reference_image,
- os.path.abspath(self._gen_filename("output_image")),
- message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__),
+ os.path.abspath(self._gen_filename('output_image')),
+ message='%s (niworkflows v%s)' % (self.__class__.__name__, __version__),
)
return runtime
@@ -77,14 +75,14 @@ class _FixHeaderRegistrationInputSpec(_RegistrationInputSpec):
restrict_deformation = traits.List(
traits.List(traits.Range(low=0.0, high=1.0)),
desc=(
- "This option allows the user to restrict the optimization of "
- "the displacement field, translation, rigid or affine transform "
- "on a per-component basis. For example, if one wants to limit "
- "the deformation or rotation of 3-D volume to the first two "
- "dimensions, this is possible by specifying a weight vector of "
+ 'This option allows the user to restrict the optimization of '
+ 'the displacement field, translation, rigid or affine transform '
+ 'on a per-component basis. For example, if one wants to limit '
+ 'the deformation or rotation of 3-D volume to the first two '
+ 'dimensions, this is possible by specifying a weight vector of '
"'1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid "
- "transformation. Low-dimensional restriction only works if "
- "there are no preceding transformations."
+ 'transformation. Low-dimensional restriction only works if '
+ 'there are no preceding transformations.'
),
)
@@ -100,9 +98,7 @@ class FixHeaderRegistration(Registration):
def _run_interface(self, runtime, correct_return_codes=(0,)):
# Run normally
- runtime = super()._run_interface(
- runtime, correct_return_codes
- )
+ runtime = super()._run_interface(runtime, correct_return_codes)
# Forward transform
out_file = self._get_outputfilenames(inverse=False)
@@ -110,7 +106,7 @@ def _run_interface(self, runtime, correct_return_codes=(0,)):
_copyxform(
self.inputs.fixed_image[0],
os.path.abspath(out_file),
- message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__),
+ message='%s (niworkflows v%s)' % (self.__class__.__name__, __version__),
)
# Inverse transform
@@ -119,7 +115,7 @@ def _run_interface(self, runtime, correct_return_codes=(0,)):
_copyxform(
self.inputs.moving_image[0],
os.path.abspath(out_file),
- message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__),
+ message='%s (niworkflows v%s)' % (self.__class__.__name__, __version__),
)
return runtime
@@ -129,8 +125,8 @@ class _FixN4BiasFieldCorrectionOutputSpec(VanillaN4OutputSpec):
negative_values = traits.Bool(
False,
usedefault=True,
- desc="Indicates whether the input was corrected for "
- "nonpositive values by adding a constant offset.",
+ desc='Indicates whether the input was corrected for '
+ 'nonpositive values by adding a constant offset.',
)
@@ -146,11 +142,9 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _format_arg(self, name, trait_spec, value):
- if name == "input_image":
+ if name == 'input_image':
return trait_spec.argstr % self._input_image
- return super()._format_arg(
- name, trait_spec, value
- )
+ return super()._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
self._input_image = self.inputs.input_image
@@ -159,7 +153,7 @@ def _parse_inputs(self, skip=None):
datamin = input_nii.get_fdata().min()
if datamin < 0:
self._input_image = fname_presuffix(
- self.inputs.input_image, suffix="_scaled", newpath=os.getcwd()
+ self.inputs.input_image, suffix='_scaled', newpath=os.getcwd()
)
data = input_nii.get_fdata() - datamin
newnii = input_nii.__class__(data, input_nii.affine, input_nii.header)
@@ -170,5 +164,5 @@ def _parse_inputs(self, skip=None):
def _list_outputs(self):
outputs = super()._list_outputs()
- outputs["negative_values"] = self._negative_values
+ outputs['negative_values'] = self._negative_values
return outputs
diff --git a/niworkflows/interfaces/freesurfer.py b/niworkflows/interfaces/freesurfer.py
index 5b31360387f..16776b4d8c2 100644
--- a/niworkflows/interfaces/freesurfer.py
+++ b/niworkflows/interfaces/freesurfer.py
@@ -79,18 +79,18 @@ def _num_vols(self):
@property
def cmdline(self):
if self._num_vols() == 1:
- return "echo Only one time point!"
+ return 'echo Only one time point!'
return super().cmdline
def _list_outputs(self):
outputs = super()._list_outputs()
if self._num_vols() == 1:
in_file = self.inputs.in_files[0]
- outputs["out_file"] = in_file
- if isdefined(outputs["transform_outputs"]):
- transform_file = outputs["transform_outputs"][0]
+ outputs['out_file'] = in_file
+ if isdefined(outputs['transform_outputs']):
+ transform_file = outputs['transform_outputs'][0]
fs.utils.LTAConvert(
- in_lta="identity.nofile",
+ in_lta='identity.nofile',
source_file=in_file,
target_file=in_file,
out_lta=transform_file,
@@ -99,7 +99,7 @@ def _list_outputs(self):
class _MakeMidthicknessInputSpec(fs.utils.MRIsExpandInputSpec):
- graymid = InputMultiPath(desc="Existing graymid/midthickness file")
+ graymid = InputMultiPath(desc='Existing graymid/midthickness file')
class MakeMidthickness(fs.MRIsExpand):
@@ -126,8 +126,8 @@ def cmdline(self):
# as input
source = None
in_base = Path(self.inputs.in_file).name
- mt = self._associated_file(in_base, "midthickness")
- gm = self._associated_file(in_base, "graymid")
+ mt = self._associated_file(in_base, 'midthickness')
+ gm = self._associated_file(in_base, 'graymid')
for surf in self.inputs.graymid:
if Path(surf).name == mt:
@@ -139,18 +139,18 @@ def cmdline(self):
if source is None:
return cmd
- return "cp {} {}".format(source, self._list_outputs()["out_file"])
+ return 'cp {} {}'.format(source, self._list_outputs()['out_file'])
class _FSInjectBrainExtractedInputSpec(BaseInterfaceInputSpec):
- subjects_dir = Directory(mandatory=True, desc="FreeSurfer SUBJECTS_DIR")
- subject_id = traits.Str(mandatory=True, desc="Subject ID")
- in_brain = File(mandatory=True, exists=True, desc="input file, part of a BIDS tree")
+ subjects_dir = Directory(mandatory=True, desc='FreeSurfer SUBJECTS_DIR')
+ subject_id = traits.Str(mandatory=True, desc='Subject ID')
+ in_brain = File(mandatory=True, exists=True, desc='input file, part of a BIDS tree')
class _FSInjectBrainExtractedOutputSpec(TraitedSpec):
- subjects_dir = Directory(desc="FreeSurfer SUBJECTS_DIR")
- subject_id = traits.Str(desc="Subject ID")
+ subjects_dir = Directory(desc='FreeSurfer SUBJECTS_DIR')
+ subject_id = traits.Str(desc='Subject ID')
class FSInjectBrainExtracted(SimpleInterface):
@@ -162,31 +162,27 @@ def _run_interface(self, runtime):
subjects_dir, subject_id = inject_skullstripped(
self.inputs.subjects_dir, self.inputs.subject_id, self.inputs.in_brain
)
- self._results["subjects_dir"] = subjects_dir
- self._results["subject_id"] = subject_id
+ self._results['subjects_dir'] = subjects_dir
+ self._results['subject_id'] = subject_id
return runtime
class _FSDetectInputsInputSpec(BaseInterfaceInputSpec):
t1w_list = InputMultiPath(
- File(exists=True), mandatory=True, desc="input file, part of a BIDS tree"
- )
- t2w_list = InputMultiPath(File(exists=True), desc="input file, part of a BIDS tree")
- flair_list = InputMultiPath(
- File(exists=True), desc="input file, part of a BIDS tree"
- )
- hires_enabled = traits.Bool(
- True, usedefault=True, desc="enable hi-resolution processing"
+ File(exists=True), mandatory=True, desc='input file, part of a BIDS tree'
)
+ t2w_list = InputMultiPath(File(exists=True), desc='input file, part of a BIDS tree')
+ flair_list = InputMultiPath(File(exists=True), desc='input file, part of a BIDS tree')
+ hires_enabled = traits.Bool(True, usedefault=True, desc='enable hi-resolution processing')
class _FSDetectInputsOutputSpec(TraitedSpec):
- t2w = File(desc="reference T2w image")
- use_t2w = traits.Bool(desc="enable use of T2w downstream computation")
- flair = File(desc="reference FLAIR image")
- use_flair = traits.Bool(desc="enable use of FLAIR downstream computation")
- hires = traits.Bool(desc="enable hi-res processing")
- mris_inflate = traits.Str(desc="mris_inflate argument")
+ t2w = File(desc='reference T2w image')
+ use_t2w = traits.Bool(desc='enable use of T2w downstream computation')
+ flair = File(desc='reference FLAIR image')
+ use_flair = traits.Bool(desc='enable use of FLAIR downstream computation')
+ hires = traits.Bool(desc='enable hi-res processing')
+ mris_inflate = traits.Str(desc='mris_inflate argument')
class FSDetectInputs(SimpleInterface):
@@ -194,25 +190,23 @@ class FSDetectInputs(SimpleInterface):
output_spec = _FSDetectInputsOutputSpec
def _run_interface(self, runtime):
- t2w, flair, self._results["hires"], mris_inflate = detect_inputs(
+ t2w, flair, self._results['hires'], mris_inflate = detect_inputs(
self.inputs.t1w_list,
t2w_list=self.inputs.t2w_list if isdefined(self.inputs.t2w_list) else None,
- flair_list=self.inputs.flair_list
- if isdefined(self.inputs.flair_list)
- else None,
+ flair_list=self.inputs.flair_list if isdefined(self.inputs.flair_list) else None,
hires_enabled=self.inputs.hires_enabled,
)
- self._results["use_t2w"] = t2w is not None
- if self._results["use_t2w"]:
- self._results["t2w"] = t2w
+ self._results['use_t2w'] = t2w is not None
+ if self._results['use_t2w']:
+ self._results['t2w'] = t2w
- self._results["use_flair"] = flair is not None
- if self._results["use_flair"]:
- self._results["flair"] = flair
+ self._results['use_flair'] = flair is not None
+ if self._results['use_flair']:
+ self._results['flair'] = flair
- if self._results["hires"]:
- self._results["mris_inflate"] = mris_inflate
+ if self._results['hires']:
+ self._results['mris_inflate'] = mris_inflate
return runtime
@@ -234,10 +228,9 @@ class TruncateLTA:
"""
# Use a tuple in case some object produces multiple transforms
- lta_outputs = ("out_lta_file",)
+ lta_outputs = ('out_lta_file',)
def _post_run_hook(self, runtime):
-
outputs = self._list_outputs()
for lta_name in self.lta_outputs:
@@ -264,7 +257,7 @@ class PatchedConcatenateLTA(TruncateLTA, ConcatenateLTA):
the fix is now done through mixin with TruncateLTA
"""
- lta_outputs = ["out_file"]
+ lta_outputs = ['out_file']
class PatchedLTAConvert(TruncateLTA, LTAConvert):
@@ -273,7 +266,7 @@ class PatchedLTAConvert(TruncateLTA, LTAConvert):
truncate filename through mixin TruncateLTA
"""
- lta_outputs = ("out_lta",)
+ lta_outputs = ('out_lta',)
class PatchedBBRegisterRPT(TruncateLTA, BBRegisterRPT):
@@ -285,25 +278,21 @@ class PatchedMRICoregRPT(TruncateLTA, MRICoregRPT):
class PatchedRobustRegister(TruncateLTA, RobustRegister):
- lta_outputs = ("out_reg_file", "half_source_xfm", "half_targ_xfm")
+ lta_outputs = ('out_reg_file', 'half_source_xfm', 'half_targ_xfm')
class _RefineBrainMaskInputSpec(BaseInterfaceInputSpec):
- in_anat = File(
- exists=True, mandatory=True, desc="input anatomical reference (INU corrected)"
- )
- in_aseg = File(
- exists=True, mandatory=True, desc="input ``aseg`` file, in NifTi format."
- )
+ in_anat = File(exists=True, mandatory=True, desc='input anatomical reference (INU corrected)')
+ in_aseg = File(exists=True, mandatory=True, desc='input ``aseg`` file, in NifTi format.')
in_ants = File(
exists=True,
mandatory=True,
- desc="brain tissue segmentation generated with antsBrainExtraction.sh",
+ desc='brain tissue segmentation generated with antsBrainExtraction.sh',
)
class _RefineBrainMaskOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="new mask")
+ out_file = File(exists=True, desc='new mask')
class RefineBrainMask(SimpleInterface):
@@ -317,35 +306,34 @@ class RefineBrainMask(SimpleInterface):
output_spec = _RefineBrainMaskOutputSpec
def _run_interface(self, runtime):
-
- self._results["out_file"] = fname_presuffix(
- self.inputs.in_anat, suffix="_rbrainmask", newpath=runtime.cwd
+ self._results['out_file'] = fname_presuffix(
+ self.inputs.in_anat, suffix='_rbrainmask', newpath=runtime.cwd
)
anatnii = nb.load(self.inputs.in_anat)
msknii = nb.Nifti1Image(
grow_mask(
- anatnii.get_fdata(dtype="float32"),
- np.asanyarray(nb.load(self.inputs.in_aseg).dataobj).astype("int16"),
- np.asanyarray(nb.load(self.inputs.in_ants).dataobj).astype("int16"),
+ anatnii.get_fdata(dtype='float32'),
+ np.asanyarray(nb.load(self.inputs.in_aseg).dataobj).astype('int16'),
+ np.asanyarray(nb.load(self.inputs.in_ants).dataobj).astype('int16'),
),
anatnii.affine,
anatnii.header,
)
msknii.set_data_dtype(np.uint8)
- msknii.to_filename(self._results["out_file"])
+ msknii.to_filename(self._results['out_file'])
return runtime
class _MedialNaNsInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input surface file")
- subjects_dir = Directory(mandatory=True, desc="FreeSurfer SUBJECTS_DIR")
- density = traits.Enum("32k", "59k", "164k", desc="Input file density (fsLR only)")
+ in_file = File(exists=True, mandatory=True, desc='input surface file')
+ subjects_dir = Directory(mandatory=True, desc='FreeSurfer SUBJECTS_DIR')
+ density = traits.Enum('32k', '59k', '164k', desc='Input file density (fsLR only)')
class _MedialNaNsOutputSpec(TraitedSpec):
- out_file = File(desc="the output surface file")
+ out_file = File(desc='the output surface file')
class MedialNaNs(SimpleInterface):
@@ -355,7 +343,7 @@ class MedialNaNs(SimpleInterface):
output_spec = _MedialNaNsOutputSpec
def _run_interface(self, runtime):
- self._results["out_file"] = medial_wall_to_nan(
+ self._results['out_file'] = medial_wall_to_nan(
self.inputs.in_file,
self.inputs.subjects_dir,
self.inputs.density,
@@ -401,33 +389,31 @@ def fix_lta_length(lta_file):
fixed = False
newfile = []
for line in lines:
- if line.startswith("filename = ") and len(line.strip("\n")) >= 255:
+ if line.startswith('filename = ') and len(line.strip('\n')) >= 255:
fixed = True
- newfile.append("filename = path_too_long\n")
+ newfile.append('filename = path_too_long\n')
else:
newfile.append(line)
if fixed:
- Path(lta_file).write_text("".join(newfile))
+ Path(lta_file).write_text(''.join(newfile))
return fixed
def inject_skullstripped(subjects_dir, subject_id, skullstripped):
from nilearn.image import resample_to_img, new_img_like
- mridir = op.join(subjects_dir, subject_id, "mri")
- t1 = op.join(mridir, "T1.mgz")
- bm_auto = op.join(mridir, "brainmask.auto.mgz")
- bm = op.join(mridir, "brainmask.mgz")
+ mridir = op.join(subjects_dir, subject_id, 'mri')
+ t1 = op.join(mridir, 'T1.mgz')
+ bm_auto = op.join(mridir, 'brainmask.auto.mgz')
+ bm = op.join(mridir, 'brainmask.mgz')
if not op.exists(bm_auto):
img = nb.load(t1)
mask = nb.load(skullstripped)
bmask = new_img_like(mask, np.asanyarray(mask.dataobj) > 0)
- resampled_mask = resample_to_img(bmask, img, "nearest")
- masked_image = new_img_like(
- img, np.asanyarray(img.dataobj) * resampled_mask.dataobj
- )
+ resampled_mask = resample_to_img(bmask, img, 'nearest')
+ masked_image = new_img_like(img, np.asanyarray(img.dataobj) * resampled_mask.dataobj)
masked_image.to_filename(bm_auto)
if not op.exists(bm):
@@ -455,7 +441,7 @@ def detect_inputs(t1w_list, t2w_list=None, flair_list=None, hires_enabled=True):
flair = flair_list[0]
# https://surfer.nmr.mgh.harvard.edu/fswiki/SubmillimeterRecon
- mris_inflate = "-n 50" if hires else None
+ mris_inflate = '-n 50' if hires else None
return (t2w, flair, hires, mris_inflate)
@@ -522,9 +508,9 @@ def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4):
continue
window = gm[
- pixel[0] - ww:pixel[0] + ww,
- pixel[1] - ww:pixel[1] + ww,
- pixel[2] - ww:pixel[2] + ww,
+ pixel[0] - ww : pixel[0] + ww,
+ pixel[1] - ww : pixel[1] + ww,
+ pixel[2] - ww : pixel[2] + ww,
]
if np.any(window > 0):
mu = window[window > 0].mean()
@@ -544,22 +530,20 @@ def medial_wall_to_nan(in_file, subjects_dir, den=None, newpath=None):
import templateflow.api as tf
fn = os.path.basename(in_file)
- target_subject = in_file.split(".")[1]
- if not target_subject.startswith("fs"):
+ target_subject = in_file.split('.')[1]
+ if not target_subject.startswith('fs'):
return in_file
func = nb.load(in_file)
- if target_subject.startswith("fsaverage"):
+ if target_subject.startswith('fsaverage'):
cortex = nb.freesurfer.read_label(
- os.path.join(
- subjects_dir, target_subject, "label", "{}.cortex.label".format(fn[:2])
- )
+ os.path.join(subjects_dir, target_subject, 'label', '{}.cortex.label'.format(fn[:2]))
)
medial = np.delete(np.arange(len(func.darrays[0].data)), cortex)
- elif target_subject == "fslr" and den is not None:
+ elif target_subject == 'fslr' and den is not None:
hemi = fn[0].upper()
label_file = str(
- tf.get("fsLR", hemi=hemi, desc="nomedialwall", density=den, suffix="dparc")
+ tf.get('fsLR', hemi=hemi, desc='nomedialwall', density=den, suffix='dparc')
)
label = nb.load(label_file)
medial = np.invert(label.darrays[0].data.astype(bool))
@@ -578,9 +562,9 @@ def mri_info(fname, argument):
import subprocess as sp
import numpy as np
- cmd_info = "mri_info --%s %s" % (argument, fname)
+ cmd_info = 'mri_info --%s %s' % (argument, fname)
proc = sp.Popen(cmd_info, stdout=sp.PIPE, shell=True)
data = bytearray(proc.stdout.read())
- mstring = np.fromstring(data.decode("utf-8"), sep="\n")
+ mstring = np.fromstring(data.decode('utf-8'), sep='\n')
result = np.reshape(mstring, (4, -1))
return result
diff --git a/niworkflows/interfaces/header.py b/niworkflows/interfaces/header.py
index 18808ed6a5b..28557b184cb 100644
--- a/niworkflows/interfaces/header.py
+++ b/niworkflows/interfaces/header.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Handling NIfTI headers."""
+
import os
import shutil
from textwrap import indent
@@ -43,11 +44,11 @@
from .. import __version__
-LOGGER = logging.getLogger("nipype.interface")
+LOGGER = logging.getLogger('nipype.interface')
class _CopyXFormInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
- hdr_file = File(exists=True, mandatory=True, desc="the file we get the header from")
+ hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from')
class CopyXForm(SimpleInterface):
@@ -63,7 +64,7 @@ class CopyXForm(SimpleInterface):
output_spec = DynamicTraitedSpec
def __init__(self, fields=None, **inputs):
- self._fields = fields or ["in_file"]
+ self._fields = fields or ['in_file']
if isinstance(self._fields, str):
self._fields = [self._fields]
@@ -77,10 +78,10 @@ def _outputs(self):
base = super()._outputs()
if self._fields:
fields = self._fields.copy()
- if "in_file" in fields:
- idx = fields.index("in_file")
+ if 'in_file' in fields:
+ idx = fields.index('in_file')
fields.pop(idx)
- fields.insert(idx, "out_file")
+ fields.insert(idx, 'out_file')
base = add_traits(base, fields)
return base
@@ -92,15 +93,13 @@ def _run_interface(self, runtime):
if isinstance(in_files, str):
in_files = [in_files]
for in_file in in_files:
- out_name = fname_presuffix(
- in_file, suffix="_xform", newpath=runtime.cwd
- )
+ out_name = fname_presuffix(in_file, suffix='_xform', newpath=runtime.cwd)
# Copy and replace header
shutil.copy(in_file, out_name)
_copyxform(
self.inputs.hdr_file,
out_name,
- message="CopyXForm (niworkflows v%s)" % __version__,
+ message='CopyXForm (niworkflows v%s)' % __version__,
)
self._results[f].append(out_name)
@@ -108,19 +107,19 @@ def _run_interface(self, runtime):
if len(self._results[f]) == 1:
self._results[f] = self._results[f][0]
- default = self._results.pop("in_file", None)
+ default = self._results.pop('in_file', None)
if default:
- self._results["out_file"] = default
+ self._results['out_file'] = default
return runtime
class _CopyHeaderInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="the file we get the data from")
- hdr_file = File(exists=True, mandatory=True, desc="the file we get the header from")
+ in_file = File(exists=True, mandatory=True, desc='the file we get the data from')
+ hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from')
class _CopyHeaderOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="written file path")
+ out_file = File(exists=True, desc='written file path')
class CopyHeader(SimpleInterface):
@@ -138,19 +137,19 @@ def _run_interface(self, runtime):
new_img = out_img.__class__(out_img.dataobj, in_img.affine, in_img.header)
new_img.set_data_dtype(out_img.get_data_dtype())
- out_name = fname_presuffix(self.inputs.in_file, suffix="_fixhdr", newpath=".")
+ out_name = fname_presuffix(self.inputs.in_file, suffix='_fixhdr', newpath='.')
new_img.to_filename(out_name)
- self._results["out_file"] = out_name
+ self._results['out_file'] = out_name
return runtime
class _ValidateImageInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input image")
+ in_file = File(exists=True, mandatory=True, desc='input image')
class _ValidateImageOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="validated image")
- out_report = File(exists=True, desc="HTML segment containing warning")
+ out_file = File(exists=True, desc='validated image')
+ out_report = File(exists=True, desc='HTML segment containing warning')
class ValidateImage(SimpleInterface):
@@ -207,11 +206,11 @@ class ValidateImage(SimpleInterface):
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file)
- out_report = os.path.join(runtime.cwd, "report.html")
+ out_report = os.path.join(runtime.cwd, 'report.html')
# Retrieve xform codes
- sform_code = int(img.header._structarr["sform_code"])
- qform_code = int(img.header._structarr["qform_code"])
+ sform_code = int(img.header._structarr['sform_code'])
+ qform_code = int(img.header._structarr['qform_code'])
# Check qform is valid
valid_qform = False
@@ -234,50 +233,44 @@ def _run_interface(self, runtime):
# Both match, qform valid (implicit with match), codes okay -> do nothing, empty report
if matching_affines and qform_code > 0 and sform_code > 0:
- self._results["out_file"] = self.inputs.in_file
- open(out_report, "w").close()
- self._results["out_report"] = out_report
+ self._results['out_file'] = self.inputs.in_file
+ open(out_report, 'w').close()
+ self._results['out_report'] = out_report
return runtime
# A new file will be written
- out_fname = fname_presuffix(
- self.inputs.in_file, suffix="_valid", newpath=runtime.cwd
- )
- self._results["out_file"] = out_fname
+ out_fname = fname_presuffix(self.inputs.in_file, suffix='_valid', newpath=runtime.cwd)
+ self._results['out_file'] = out_fname
# Row 2:
if valid_qform and qform_code > 0 and (sform_code == 0 or not valid_sform):
img.set_sform(qform, qform_code)
- warning_txt = "Note on orientation: sform matrix set"
+ warning_txt = 'Note on orientation: sform matrix set'
description = """\
The sform has been copied from qform.
"""
# Rows 3-4:
# Note: if qform is not valid, matching_affines is False
- elif (valid_sform and sform_code > 0) and (
- not matching_affines or qform_code == 0
- ):
+ elif (valid_sform and sform_code > 0) and (not matching_affines or qform_code == 0):
img.set_qform(sform, sform_code)
new_qform = img.get_qform()
if valid_qform:
# False alarm - the difference is due to precision loss of qform
if np.allclose(new_qform, qform) and qform_code > 0:
- self._results["out_file"] = self.inputs.in_file
- open(out_report, "w").close()
- self._results["out_report"] = out_report
+ self._results['out_file'] = self.inputs.in_file
+ open(out_report, 'w').close()
+ self._results['out_report'] = out_report
return runtime
# Replacing an existing, valid qform. Report magnitude of change.
diff = np.linalg.inv(qform) @ new_qform
trans, rot, _, _ = transforms3d.affines.decompose44(diff)
angle = transforms3d.axangles.mat2axangle(rot)[1]
xyz_unit = img.header.get_xyzt_units()[0]
- if xyz_unit == "unknown":
- xyz_unit = "mm"
+ if xyz_unit == 'unknown':
+ xyz_unit = 'mm'
- total_trans = np.sqrt(
- np.sum(trans * trans)
- ) # Add angle and total_trans to report
- warning_txt = "Note on orientation: qform matrix overwritten"
+ total_trans = np.sqrt(np.sum(trans * trans)) # Add angle and total_trans to report
+ warning_txt = 'Note on orientation: qform matrix overwritten'
description = f"""\
The qform has been copied from sform.
@@ -287,7 +280,7 @@ def _run_interface(self, runtime):
"""
elif qform_code > 0:
# qform code indicates the qform is supposed to be valid. Use more stridency.
- warning_txt = "WARNING - Invalid qform information"
+ warning_txt = 'WARNING - Invalid qform information'
description = """\
The qform matrix found in the file header is invalid.
@@ -298,16 +291,14 @@ def _run_interface(self, runtime):
"""
else: # qform_code == 0
# qform is not expected to be valids. Simple note.
- warning_txt = "Note on orientation: qform matrix overwritten"
- description = (
- '
Orientation information could not be retrieved from the image header.
@@ -524,7 +505,7 @@ def _run_interface(self, runtime):
in_data = img.dataobj
img = nb.Nifti1Image(
- in_data[:, :, :, self.inputs.n_volumes_to_discard:],
+ in_data[:, :, :, self.inputs.n_volumes_to_discard :],
img.affine,
img.header,
)
@@ -536,10 +517,8 @@ def _run_interface(self, runtime):
# Store new file
if save_file:
- out_fname = fname_presuffix(
- self.inputs.in_file, suffix="_valid", newpath=runtime.cwd
- )
- self._results["out_file"] = out_fname
+ out_fname = fname_presuffix(self.inputs.in_file, suffix='_valid', newpath=runtime.cwd)
+ self._results['out_file'] = out_fname
img.to_filename(out_fname)
if warning_txt:
@@ -547,8 +526,8 @@ def _run_interface(self, runtime):
warning_txt,
description,
)
- with open(out_report, "w") as fobj:
- fobj.write(indent(snippet, "\t" * 3))
+ with open(out_report, 'w') as fobj:
+ fobj.write(indent(snippet, '\t' * 3))
- self._results["out_report"] = out_report
+ self._results['out_report'] = out_report
return runtime
diff --git a/niworkflows/interfaces/images.py b/niworkflows/interfaces/images.py
index 916c200c7ab..4fcce72099b 100644
--- a/niworkflows/interfaces/images.py
+++ b/niworkflows/interfaces/images.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Image tools interfaces."""
+
import os
from functools import partial
import numpy as np
@@ -40,21 +41,19 @@
)
-LOGGER = logging.getLogger("nipype.interface")
+LOGGER = logging.getLogger('nipype.interface')
class _RegridToZoomsInputSpec(BaseInterfaceInputSpec):
- in_file = File(
- exists=True, mandatory=True, desc="a file whose resolution is to change"
- )
+ in_file = File(exists=True, mandatory=True, desc='a file whose resolution is to change')
zooms = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
mandatory=True,
- desc="the new resolution",
+ desc='the new resolution',
)
- order = traits.Int(3, usedefault=True, desc="order of interpolator")
+ order = traits.Int(3, usedefault=True, desc='order of interpolator')
clip = traits.Bool(
True,
usedefault=True,
@@ -65,12 +64,12 @@ class _RegridToZoomsInputSpec(BaseInterfaceInputSpec):
traits.Float(),
default=False,
usedefault=True,
- desc="apply gaussian smoothing before resampling",
+ desc='apply gaussian smoothing before resampling',
)
class _RegridToZoomsOutputSpec(TraitedSpec):
- out_file = File(exists=True, dec="the regridded file")
+ out_file = File(exists=True, dec='the regridded file')
class RegridToZooms(SimpleInterface):
@@ -82,8 +81,8 @@ class RegridToZooms(SimpleInterface):
def _run_interface(self, runtime):
from ..utils.images import resample_by_spacing
- self._results["out_file"] = fname_presuffix(
- self.inputs.in_file, suffix="_regrid", newpath=runtime.cwd
+ self._results['out_file'] = fname_presuffix(
+ self.inputs.in_file, suffix='_regrid', newpath=runtime.cwd
)
resample_by_spacing(
self.inputs.in_file,
@@ -91,13 +90,13 @@ def _run_interface(self, runtime):
order=self.inputs.order,
clip=self.inputs.clip,
smooth=self.inputs.smooth,
- ).to_filename(self._results["out_file"])
+ ).to_filename(self._results['out_file'])
return runtime
class _IntraModalMergeInputSpec(BaseInterfaceInputSpec):
- in_files = InputMultiObject(File(exists=True), mandatory=True, desc="input files")
- in_mask = File(exists=True, desc="input mask for grand mean scaling")
+ in_files = InputMultiObject(File(exists=True), mandatory=True, desc='input files')
+ in_mask = File(exists=True, desc='input mask for grand mean scaling')
hmc = traits.Bool(True, usedefault=True)
zero_based_avg = traits.Bool(True, usedefault=True)
to_ras = traits.Bool(True, usedefault=True)
@@ -105,10 +104,10 @@ class _IntraModalMergeInputSpec(BaseInterfaceInputSpec):
class _IntraModalMergeOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="merged image")
- out_avg = File(exists=True, desc="average image")
- out_mats = OutputMultiObject(File(exists=True), desc="output matrices")
- out_movpar = OutputMultiObject(File(exists=True), desc="output movement parameters")
+ out_file = File(exists=True, desc='merged image')
+ out_avg = File(exists=True, desc='average image')
+ out_mats = OutputMultiObject(File(exists=True), desc='output matrices')
+ out_movpar = OutputMultiObject(File(exists=True), desc='output movement parameters')
class IntraModalMerge(SimpleInterface):
@@ -139,7 +138,7 @@ def _run_interface(self, runtime):
filenii = nb.load(f)
filenii = nb.squeeze_image(filenii)
if len(filenii.shape) == 5:
- raise RuntimeError("Input image (%s) is 5D." % f)
+ raise RuntimeError('Input image (%s) is 5D.' % f)
if filenii.dataobj.ndim == 4:
nii_list += nb.four_to_three(filenii)
else:
@@ -151,11 +150,11 @@ def _run_interface(self, runtime):
filenii = nii_list[0]
merged_fname = fname_presuffix(
- self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd
+ self.inputs.in_files[0], suffix='_merged', newpath=runtime.cwd
)
filenii.to_filename(merged_fname)
- self._results["out_file"] = merged_fname
- self._results["out_avg"] = merged_fname
+ self._results['out_file'] = merged_fname
+ self._results['out_avg'] = merged_fname
if filenii.dataobj.ndim < 4:
# TODO: generate identity out_mats and zero-filled out_movpar
@@ -165,7 +164,7 @@ def _run_interface(self, runtime):
from nipype.interfaces.fsl import MCFLIRT
mcflirt = MCFLIRT(
- cost="normcorr",
+ cost='normcorr',
save_mats=True,
save_plots=True,
ref_vol=0,
@@ -173,23 +172,21 @@ def _run_interface(self, runtime):
)
mcres = mcflirt.run()
filenii = nb.load(mcres.outputs.out_file)
- self._results["out_file"] = mcres.outputs.out_file
- self._results["out_mats"] = mcres.outputs.mat_file
- self._results["out_movpar"] = mcres.outputs.par_file
+ self._results['out_file'] = mcres.outputs.out_file
+ self._results['out_mats'] = mcres.outputs.mat_file
+ self._results['out_movpar'] = mcres.outputs.par_file
- hmcdata = filenii.get_fdata(dtype="float32")
+ hmcdata = filenii.get_fdata(dtype='float32')
if self.inputs.grand_mean_scaling:
if not isdefined(self.inputs.in_mask):
mean = np.median(hmcdata, axis=-1)
thres = np.percentile(mean, 25)
mask = mean > thres
else:
- mask = nb.load(self.inputs.in_mask).get_fdata(dtype="float32") > 0.5
+ mask = nb.load(self.inputs.in_mask).get_fdata(dtype='float32') > 0.5
nimgs = hmcdata.shape[-1]
- means = np.median(
- hmcdata[mask[..., np.newaxis]].reshape((-1, nimgs)).T, axis=-1
- )
+ means = np.median(hmcdata[mask[..., np.newaxis]].reshape((-1, nimgs)).T, axis=-1)
max_mean = means.max()
for i in range(nimgs):
hmcdata[..., i] *= max_mean / means[i]
@@ -198,11 +195,11 @@ def _run_interface(self, runtime):
if self.inputs.zero_based_avg:
hmcdata -= hmcdata.min()
- self._results["out_avg"] = fname_presuffix(
- self.inputs.in_files[0], suffix="_avg", newpath=runtime.cwd
+ self._results['out_avg'] = fname_presuffix(
+ self.inputs.in_files[0], suffix='_avg', newpath=runtime.cwd
)
nb.Nifti1Image(hmcdata, filenii.affine, filenii.header).to_filename(
- self._results["out_avg"]
+ self._results['out_avg']
)
return runtime
@@ -212,33 +209,33 @@ class _RobustAverageInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True,
mandatory=True,
- desc="Either a 3D reference or 4D file to average through the last axis"
+ desc='Either a 3D reference or 4D file to average through the last axis',
)
- t_mask = traits.List(traits.Bool, desc="List of selected timepoints to be averaged")
+ t_mask = traits.List(traits.Bool, desc='List of selected timepoints to be averaged')
mc_method = traits.Enum(
- "AFNI",
- "FSL",
+ 'AFNI',
+ 'FSL',
None,
usedefault=True,
- desc="Which software to use to perform motion correction",
+ desc='Which software to use to perform motion correction',
)
nonnegative = traits.Bool(
- True, usedefault=True, desc="whether the output should be clipped below zero"
+ True, usedefault=True, desc='whether the output should be clipped below zero'
)
- num_threads = traits.Int(desc="number of threads")
+ num_threads = traits.Int(desc='number of threads')
two_pass = traits.Bool(
- True, usedefault=True, desc="whether two passes of correction is necessary"
+ True, usedefault=True, desc='whether two passes of correction is necessary'
)
class _RobustAverageOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="the averaged image")
- out_volumes = File(exists=True, desc="the volumes selected that have been averaged")
+ out_file = File(exists=True, desc='the averaged image')
+ out_volumes = File(exists=True, desc='the volumes selected that have been averaged')
out_drift = traits.List(
- traits.Float, desc="the ratio to the grand mean or global signal drift"
+ traits.Float, desc='the ratio to the grand mean or global signal drift'
)
- out_hmc = OutputMultiObject(File(exists=True), desc="head-motion correction matrices")
- out_hmc_volumes = OutputMultiObject(File(exists=True), desc="head-motion correction volumes")
+ out_hmc = OutputMultiObject(File(exists=True), desc='head-motion correction matrices')
+ out_hmc_volumes = OutputMultiObject(File(exists=True), desc='head-motion correction volumes')
class RobustAverage(SimpleInterface):
@@ -252,9 +249,9 @@ def _run_interface(self, runtime):
# If reference is 3D, return it directly
if img.dataobj.ndim == 3:
- self._results["out_file"] = self.inputs.in_file
- self._results["out_volumes"] = self.inputs.in_file
- self._results["out_drift"] = [1.0]
+ self._results['out_file'] = self.inputs.in_file
+ self._results['out_volumes'] = self.inputs.in_file
+ self._results['out_drift'] = [1.0]
return runtime
fname = partial(fname_presuffix, self.inputs.in_file, newpath=runtime.cwd)
@@ -267,34 +264,30 @@ def _run_interface(self, runtime):
# If reference was 4D, but single-volume - write out squeezed and return.
if img.dataobj.ndim == 3:
- self._results["out_file"] = fname(suffix="_squeezed")
- img.to_filename(self._results["out_file"])
- self._results["out_volumes"] = self.inputs.in_file
- self._results["out_drift"] = [1.0]
+ self._results['out_file'] = fname(suffix='_squeezed')
+ img.to_filename(self._results['out_file'])
+ self._results['out_volumes'] = self.inputs.in_file
+ self._results['out_drift'] = [1.0]
return runtime
img_len = img.shape[3]
- t_mask = (
- self.inputs.t_mask if isdefined(self.inputs.t_mask) else [True] * img_len
- )
+ t_mask = self.inputs.t_mask if isdefined(self.inputs.t_mask) else [True] * img_len
if len(t_mask) != img_len:
raise ValueError(
- f"Image length ({img_len} timepoints) unmatched by mask ({len(t_mask)})"
+ f'Image length ({img_len} timepoints) unmatched by mask ({len(t_mask)})'
)
n_volumes = sum(t_mask)
if n_volumes < 1:
- raise ValueError("At least one volume should be selected for slicing")
+ raise ValueError('At least one volume should be selected for slicing')
- self._results["out_file"] = fname(suffix="_average")
- self._results["out_volumes"] = fname(suffix="_sliced")
+ self._results['out_file'] = fname(suffix='_average')
+ self._results['out_volumes'] = fname(suffix='_sliced')
- sliced = nb.concat_images(
- i for i, t in zip(nb.four_to_three(img), t_mask) if t
- )
+ sliced = nb.concat_images(i for i, t in zip(nb.four_to_three(img), t_mask) if t)
- data = sliced.get_fdata(dtype="float32")
+ data = sliced.get_fdata(dtype='float32')
# Data can come with outliers showing very high numbers - preemptively prune
data = np.clip(
data,
@@ -304,7 +297,7 @@ def _run_interface(self, runtime):
gs_drift = np.mean(data, axis=(0, 1, 2))
gs_drift /= gs_drift.max()
- self._results["out_drift"] = [float(i) for i in gs_drift]
+ self._results['out_drift'] = [float(i) for i in gs_drift]
data /= gs_drift
data = np.clip(
@@ -313,43 +306,43 @@ def _run_interface(self, runtime):
a_max=data.max(),
)
sliced.__class__(data, sliced.affine, sliced.header).to_filename(
- self._results["out_volumes"]
+ self._results['out_volumes']
)
if n_volumes == 1:
- nb.squeeze_image(sliced).to_filename(self._results["out_file"])
- self._results["out_drift"] = [1.0]
+ nb.squeeze_image(sliced).to_filename(self._results['out_file'])
+ self._results['out_drift'] = [1.0]
return runtime
- if self.inputs.mc_method == "AFNI":
+ if self.inputs.mc_method == 'AFNI':
from nipype.interfaces.afni import Volreg
volreg = Volreg(
- in_file=self._results["out_volumes"],
- interp="Fourier",
- args="-twopass" if self.inputs.two_pass else "",
+ in_file=self._results['out_volumes'],
+ interp='Fourier',
+ args='-twopass' if self.inputs.two_pass else '',
zpad=4,
- outputtype="NIFTI_GZ",
+ outputtype='NIFTI_GZ',
)
if isdefined(self.inputs.num_threads):
volreg.inputs.num_threads = self.inputs.num_threads
res = volreg.run()
- self._results["out_hmc"] = res.outputs.oned_matrix_save
+ self._results['out_hmc'] = res.outputs.oned_matrix_save
- elif self.inputs.mc_method == "FSL":
+ elif self.inputs.mc_method == 'FSL':
from nipype.interfaces.fsl import MCFLIRT
res = MCFLIRT(
- in_file=self._results["out_volumes"],
+ in_file=self._results['out_volumes'],
ref_vol=0,
- interpolation="sinc",
+ interpolation='sinc',
).run()
- self._results["out_hmc"] = res.outputs.mat_file
+ self._results['out_hmc'] = res.outputs.mat_file
if self.inputs.mc_method:
- self._results["out_hmc_volumes"] = res.outputs.out_file
- data = nb.load(res.outputs.out_file).get_fdata(dtype="float32")
+ self._results['out_hmc_volumes'] = res.outputs.out_file
+ data = nb.load(res.outputs.out_file).get_fdata(dtype='float32')
data = np.clip(
data,
@@ -357,9 +350,9 @@ def _run_interface(self, runtime):
a_max=data.max(),
)
- sliced.__class__(
- np.median(data, axis=3), sliced.affine, sliced.header
- ).to_filename(self._results["out_file"])
+ sliced.__class__(np.median(data, axis=3), sliced.affine, sliced.header).to_filename(
+ self._results['out_file']
+ )
return runtime
@@ -378,31 +371,31 @@ def _run_interface(self, runtime):
class _TemplateDimensionsInputSpec(BaseInterfaceInputSpec):
- anat_type = traits.Enum("T1w", "T2w", usedefault=True, desc="Anatomical image type")
+ anat_type = traits.Enum('T1w', 'T2w', usedefault=True, desc='Anatomical image type')
anat_list = InputMultiObject(
- File(exists=True), xor=["t1w_list"], desc="input anatomical images"
+ File(exists=True), xor=['t1w_list'], desc='input anatomical images'
)
t1w_list = InputMultiObject(
File(exists=True),
- xor=["anat_list"],
- deprecated="1.14.0",
- new_name="anat_list",
+ xor=['anat_list'],
+ deprecated='1.14.0',
+ new_name='anat_list',
)
max_scale = traits.Float(
- 3.0, usedefault=True, desc="Maximum scaling factor in images to accept"
+ 3.0, usedefault=True, desc='Maximum scaling factor in images to accept'
)
class _TemplateDimensionsOutputSpec(TraitedSpec):
- t1w_valid_list = OutputMultiObject(exists=True, desc="valid T1w images")
- anat_valid_list = OutputMultiObject(exists=True, desc="valid anatomical images")
+ t1w_valid_list = OutputMultiObject(exists=True, desc='valid T1w images')
+ anat_valid_list = OutputMultiObject(exists=True, desc='valid anatomical images')
target_zooms = traits.Tuple(
- traits.Float, traits.Float, traits.Float, desc="Target zoom information"
+ traits.Float, traits.Float, traits.Float, desc='Target zoom information'
)
target_shape = traits.Tuple(
- traits.Int, traits.Int, traits.Int, desc="Target shape information"
+ traits.Int, traits.Int, traits.Int, desc='Target shape information'
)
- out_report = File(exists=True, desc="conformation report")
+ out_report = File(exists=True, desc='conformation report')
class TemplateDimensions(SimpleInterface):
@@ -429,14 +422,12 @@ def _generate_segment(self, discards, dims, zooms):
DISCARD_TEMPLATE.format(path=path, basename=os.path.basename(path))
for path in discards
]
- discard_list = (
- "\n".join(["\t\t\t
"] + items + ["\t\t\t
"]) if items else ""
- )
- zoom_fmt = "{:.02g}mm x {:.02g}mm x {:.02g}mm".format(*zooms)
+ discard_list = '\n'.join(['\t\t\t
'] + items + ['\t\t\t
']) if items else ''
+ zoom_fmt = '{:.02g}mm x {:.02g}mm x {:.02g}mm'.format(*zooms)
return CONFORMATION_TEMPLATE.format(
anat=self.inputs.anat_type,
n_anat=len(self.inputs.anat_list),
- dims="x".join(map(str, dims)),
+ dims='x'.join(map(str, dims)),
zooms=zoom_fmt,
n_discards=len(discards),
discard_list=discard_list,
@@ -464,41 +455,41 @@ def _run_interface(self, runtime):
# Ignore dropped images
valid_fnames = np.atleast_1d(in_names[valid]).tolist()
- self._results["anat_valid_list"] = valid_fnames
- self._results["t1w_valid_list"] = valid_fnames # Deprecate: 1.14.0
+ self._results['anat_valid_list'] = valid_fnames
+ self._results['t1w_valid_list'] = valid_fnames # Deprecate: 1.14.0
# Set target shape information
target_zooms = all_zooms[valid].min(axis=0)
target_shape = all_shapes[valid].max(axis=0)
- self._results["target_zooms"] = tuple(target_zooms.tolist())
- self._results["target_shape"] = tuple(target_shape.tolist())
+ self._results['target_zooms'] = tuple(target_zooms.tolist())
+ self._results['target_shape'] = tuple(target_shape.tolist())
# Create report
dropped_images = in_names[~valid]
segment = self._generate_segment(dropped_images, target_shape, target_zooms)
- out_report = os.path.join(runtime.cwd, "report.html")
- with open(out_report, "w") as fobj:
+ out_report = os.path.join(runtime.cwd, 'report.html')
+ with open(out_report, 'w') as fobj:
fobj.write(segment)
- self._results["out_report"] = out_report
+ self._results['out_report'] = out_report
return runtime
class _ConformInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="Input image")
+ in_file = File(exists=True, mandatory=True, desc='Input image')
target_zooms = traits.Tuple(
- traits.Float, traits.Float, traits.Float, desc="Target zoom information"
+ traits.Float, traits.Float, traits.Float, desc='Target zoom information'
)
target_shape = traits.Tuple(
- traits.Int, traits.Int, traits.Int, desc="Target shape information"
+ traits.Int, traits.Int, traits.Int, desc='Target shape information'
)
class _ConformOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="Conformed image")
- transform = File(exists=True, desc="Conformation transform (voxel-to-voxel)")
+ out_file = File(exists=True, desc='Conformed image')
+ transform = File(exists=True, desc='Conformation transform (voxel-to-voxel)')
class Conform(SimpleInterface):
@@ -533,22 +524,20 @@ def _run_interface(self, runtime):
shape = np.array(reoriented.shape[:3])
# Reconstruct transform from orig to reoriented image
- ornt_xfm = nb.orientations.inv_ornt_aff(
- nb.io_orientation(orig_img.affine), orig_img.shape
- )
+ ornt_xfm = nb.orientations.inv_ornt_aff(nb.io_orientation(orig_img.affine), orig_img.shape)
# Identity unless proven otherwise
target_affine = reoriented.affine.copy()
conform_xfm = np.eye(4)
xyz_unit = reoriented.header.get_xyzt_units()[0]
- if xyz_unit == "unknown":
+ if xyz_unit == 'unknown':
# Common assumption; if we're wrong, unlikely to be the only thing that breaks
- xyz_unit = "mm"
+ xyz_unit = 'mm'
# Set a 0.05mm threshold to performing rescaling
- atol_gross = {"meter": 5e-5, "mm": 0.05, "micron": 50}[xyz_unit]
+ atol_gross = {'meter': 5e-5, 'mm': 0.05, 'micron': 50}[xyz_unit]
# if 0.01 > difference > 0.001mm, freesurfer won't be able to merge the images
- atol_fine = {"meter": 1e-6, "mm": 0.001, "micron": 1}[xyz_unit]
+ atol_fine = {'meter': 1e-6, 'mm': 0.001, 'micron': 1}[xyz_unit]
# Update zooms => Modify affine
# Rescale => Resample to resized voxels
@@ -561,18 +550,14 @@ def _run_interface(self, runtime):
# Use an affine with the corrected zooms, whether or not we resample
if update_zooms:
scale_factor = target_zooms / zooms
- target_affine[:3, :3] = reoriented.affine[:3, :3] @ np.diag(
- scale_factor
- )
+ target_affine[:3, :3] = reoriented.affine[:3, :3] @ np.diag(scale_factor)
if resize:
# The shift is applied after scaling.
# Use a proportional shift to maintain relative position in dataset
size_factor = target_span / (zooms * shape)
# Use integer shifts to avoid unnecessary interpolation
- offset = (
- reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3]
- )
+ offset = reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3]
target_affine[:3, 3] = reoriented.affine[:3, 3] + offset.astype(int)
conform_xfm = np.linalg.inv(reoriented.affine) @ target_affine
@@ -587,29 +572,27 @@ def _run_interface(self, runtime):
# Image may be reoriented, rescaled, and/or resized
if reoriented is not orig_img:
- out_name = fname_presuffix(fname, suffix="_ras", newpath=runtime.cwd)
+ out_name = fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd)
reoriented.to_filename(out_name)
else:
out_name = fname
transform = ornt_xfm.dot(conform_xfm)
if not np.allclose(orig_img.affine.dot(transform), target_affine):
- raise ValueError("Original and target affines are not similar")
+ raise ValueError('Original and target affines are not similar')
- mat_name = fname_presuffix(
- fname, suffix=".mat", newpath=runtime.cwd, use_ext=False
- )
- np.savetxt(mat_name, transform, fmt="%.08f")
+ mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False)
+ np.savetxt(mat_name, transform, fmt='%.08f')
- self._results["out_file"] = out_name
- self._results["transform"] = mat_name
+ self._results['out_file'] = out_name
+ self._results['transform'] = mat_name
return runtime
def reorient(in_file, newpath=None):
"""Reorient Nifti files to RAS."""
- out_file = fname_presuffix(in_file, suffix="_ras", newpath=newpath)
+ out_file = fname_presuffix(in_file, suffix='_ras', newpath=newpath)
nb.as_closest_canonical(nb.load(in_file)).to_filename(out_file)
return out_file
@@ -655,47 +638,46 @@ def normalize_xform(img):
class _SignalExtractionInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="4-D fMRI nii file")
+ in_file = File(exists=True, mandatory=True, desc='4-D fMRI nii file')
label_files = InputMultiObject(
File(exists=True),
mandatory=True,
- desc="a 3D label image, with 0 denoting "
- "background, or a list of 3D probability "
- "maps (one per label) or the equivalent 4D "
- "file.",
+ desc='a 3D label image, with 0 denoting '
+ 'background, or a list of 3D probability '
+ 'maps (one per label) or the equivalent 4D '
+ 'file.',
)
prob_thres = traits.Range(
low=0.0,
high=1.0,
value=0.5,
usedefault=True,
- desc="If label_files are probability masks, threshold "
- "at specified probability.",
+ desc='If label_files are probability masks, threshold at specified probability.',
)
class_labels = traits.List(
mandatory=True,
- desc="Human-readable labels for each segment "
- "in the label file, in order. The length of "
- "class_labels must be equal to the number of "
- "segments (background excluded). This list "
- "corresponds to the class labels in label_file "
- "in ascending order",
+ desc='Human-readable labels for each segment '
+ 'in the label file, in order. The length of '
+ 'class_labels must be equal to the number of '
+ 'segments (background excluded). This list '
+ 'corresponds to the class labels in label_file '
+ 'in ascending order',
)
out_file = File(
- "signals.tsv",
+ 'signals.tsv',
usedefault=True,
exists=False,
- desc="The name of the file to output to. signals.tsv by default",
+ desc='The name of the file to output to. signals.tsv by default',
)
class _SignalExtractionOutputSpec(TraitedSpec):
out_file = File(
exists=True,
- desc="tsv file containing the computed "
- "signals, with as many columns as there are labels and as "
- "many rows as there are timepoints in in_file, plus a "
- "header row with values from class_labels",
+ desc='tsv file containing the computed '
+ 'signals, with as many columns as there are labels and as '
+ 'many rows as there are timepoints in in_file, plus a '
+ 'header row with values from class_labels',
)
@@ -721,16 +703,15 @@ def _run_interface(self, runtime):
# This check assumes all input masks have same dimensions
if img.shape[:3] != mask_imgs[0].shape[:3]:
raise NotImplementedError(
- "Input image and mask should be of "
- "same dimensions before running SignalExtraction"
+ 'Input image and mask should be of '
+ 'same dimensions before running SignalExtraction'
)
# Load the mask.
# If mask is a list, each mask is treated as its own ROI/parcel
# If mask is a 3D, each integer is treated as its own ROI/parcel
if len(mask_imgs) > 1:
masks = [
- np.asanyarray(mask_img.dataobj) >= self.inputs.prob_thres
- for mask_img in mask_imgs
+ np.asanyarray(mask_img.dataobj) >= self.inputs.prob_thres for mask_img in mask_imgs
]
else:
labelsmap = np.asanyarray(mask_imgs[0].dataobj)
@@ -739,7 +720,7 @@ def _run_interface(self, runtime):
masks = [labelsmap == label for label in labels]
if len(masks) != len(self.inputs.class_labels):
- raise ValueError("Number of masks must match number of labels")
+ raise ValueError('Number of masks must match number of labels')
series = np.zeros((img.shape[3], len(masks)))
@@ -748,7 +729,7 @@ def _run_interface(self, runtime):
series[:, j] = data[mask, :].mean(axis=0)
output = np.vstack((self.inputs.class_labels, series.astype(str)))
- self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file)
- np.savetxt(self._results["out_file"], output, fmt="%s", delimiter="\t")
+ self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file)
+ np.savetxt(self._results['out_file'], output, fmt='%s', delimiter='\t')
return runtime
diff --git a/niworkflows/interfaces/itk.py b/niworkflows/interfaces/itk.py
index ce9ad7fed22..efdb7f7b120 100644
--- a/niworkflows/interfaces/itk.py
+++ b/niworkflows/interfaces/itk.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""ITK files handling."""
+
import os
from mimetypes import guess_type
from tempfile import TemporaryDirectory
@@ -43,20 +44,20 @@
from .fixes import _FixTraitApplyTransformsInputSpec
-LOGGER = logging.getLogger("nipype.interface")
+LOGGER = logging.getLogger('nipype.interface')
class _MCFLIRT2ITKInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiObject(
- File(exists=True), mandatory=True, desc="list of MAT files from MCFLIRT"
+ File(exists=True), mandatory=True, desc='list of MAT files from MCFLIRT'
)
- in_reference = File(exists=True, mandatory=True, desc="input image for spatial reference")
- in_source = File(exists=True, mandatory=True, desc="input image for spatial source")
- num_threads = traits.Int(nohash=True, desc="number of parallel processes")
+ in_reference = File(exists=True, mandatory=True, desc='input image for spatial reference')
+ in_source = File(exists=True, mandatory=True, desc='input image for spatial source')
+ num_threads = traits.Int(nohash=True, desc='number of parallel processes')
class _MCFLIRT2ITKOutputSpec(TraitedSpec):
- out_file = File(desc="the output ITKTransform file")
+ out_file = File(desc='the output ITKTransform file')
class MCFLIRT2ITK(SimpleInterface):
@@ -67,7 +68,7 @@ class MCFLIRT2ITK(SimpleInterface):
def _run_interface(self, runtime):
if isdefined(self.inputs.num_threads):
- LOGGER.warning("Multithreading is deprecated. Remove the num_threads input.")
+ LOGGER.warning('Multithreading is deprecated. Remove the num_threads input.')
source = nb.load(self.inputs.in_source)
reference = nb.load(self.inputs.in_reference)
@@ -80,8 +81,8 @@ def _run_interface(self, runtime):
np.stack([a.matrix for a in affines], axis=0),
)
- self._results["out_file"] = os.path.join(runtime.cwd, "mat2itk.txt")
- affarray.to_filename(self._results["out_file"])
+ self._results['out_file'] = os.path.join(runtime.cwd, 'mat2itk.txt')
+ affarray.to_filename(self._results['out_file'])
return runtime
@@ -90,23 +91,19 @@ class _MultiApplyTransformsInputSpec(_FixTraitApplyTransformsInputSpec):
input_image = InputMultiObject(
File(exists=True),
mandatory=True,
- desc="input time-series as a list of volumes after splitting"
- " through the fourth dimension",
- )
- num_threads = traits.Int(
- 1, usedefault=True, nohash=True, desc="number of parallel processes"
+ desc='input time-series as a list of volumes after splitting'
+ ' through the fourth dimension',
)
+ num_threads = traits.Int(1, usedefault=True, nohash=True, desc='number of parallel processes')
save_cmd = traits.Bool(
- True, usedefault=True, desc="write a log of command lines that were applied"
- )
- copy_dtype = traits.Bool(
- False, usedefault=True, desc="copy dtype from inputs to outputs"
+ True, usedefault=True, desc='write a log of command lines that were applied'
)
+ copy_dtype = traits.Bool(False, usedefault=True, desc='copy dtype from inputs to outputs')
class _MultiApplyTransformsOutputSpec(TraitedSpec):
- out_files = OutputMultiObject(File(), desc="the output ITKTransform file")
- log_cmdline = File(desc="a list of command lines used to apply transforms")
+ out_files = OutputMultiObject(File(), desc='the output ITKTransform file')
+ log_cmdline = File(desc='a list of command lines used to apply transforms')
class MultiApplyTransforms(SimpleInterface):
@@ -120,25 +117,23 @@ def _run_interface(self, runtime):
ifargs = self.inputs.get()
# Extract number of input images and transforms
- in_files = ifargs.pop("input_image")
+ in_files = ifargs.pop('input_image')
num_files = len(in_files)
- transforms = ifargs.pop("transforms")
+ transforms = ifargs.pop('transforms')
# Get number of parallel jobs
- num_threads = ifargs.pop("num_threads")
- save_cmd = ifargs.pop("save_cmd")
+ num_threads = ifargs.pop('num_threads')
+ save_cmd = ifargs.pop('save_cmd')
# Remove certain keys
- for key in ["environ", "ignore_exception", "terminal_output", "output_image"]:
+ for key in ['environ', 'ignore_exception', 'terminal_output', 'output_image']:
ifargs.pop(key, None)
# Get a temp folder ready
- tmp_folder = TemporaryDirectory(prefix="tmp-", dir=runtime.cwd)
+ tmp_folder = TemporaryDirectory(prefix='tmp-', dir=runtime.cwd)
xfms_list = _arrange_xfms(transforms, num_files, tmp_folder)
if len(xfms_list) != num_files:
- raise ValueError(
- "Number of files and entries in the transforms list do not match"
- )
+ raise ValueError('Number of files and entries in the transforms list do not match')
# Inputs are ready to run in parallel
if num_threads < 1:
@@ -158,21 +153,19 @@ def _run_interface(self, runtime):
_applytfms,
[
(in_file, in_xfm, ifargs, i, runtime.cwd)
- for i, (in_file, in_xfm) in enumerate(
- zip(in_files, xfms_list)
- )
+ for i, (in_file, in_xfm) in enumerate(zip(in_files, xfms_list))
],
)
)
tmp_folder.cleanup()
# Collect output file names, after sorting by index
- self._results["out_files"] = [el[0] for el in out_files]
+ self._results['out_files'] = [el[0] for el in out_files]
if save_cmd:
- self._results["log_cmdline"] = os.path.join(runtime.cwd, "command.txt")
- with open(self._results["log_cmdline"], "w") as cmdfile:
- print("\n-------\n".join([el[1] for el in out_files]), file=cmdfile)
+ self._results['log_cmdline'] = os.path.join(runtime.cwd, 'command.txt')
+ with open(self._results['log_cmdline'], 'w') as cmdfile:
+ print('\n-------\n'.join([el[1] for el in out_files]), file=cmdfile)
return runtime
@@ -188,14 +181,14 @@ def _applytfms(args):
in_file, in_xform, ifargs, index, newpath = args
out_file = fname_presuffix(
- in_file, suffix="_xform-%05d" % index, newpath=newpath, use_ext=True
+ in_file, suffix='_xform-%05d' % index, newpath=newpath, use_ext=True
)
- copy_dtype = ifargs.pop("copy_dtype", False)
+ copy_dtype = ifargs.pop('copy_dtype', False)
xfm = ApplyTransforms(
input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs
)
- xfm.terminal_output = "allatonce"
+ xfm.terminal_output = 'allatonce'
xfm.resource_monitor = False
runtime = xfm.run().runtime
@@ -216,16 +209,16 @@ def _arrange_xfms(transforms, num_files, tmp_folder):
Convenience method to arrange the list of transforms that should be applied
to each input file
"""
- base_xform = ["#Insight Transform File V1.0", "#Transform 0"]
+ base_xform = ['#Insight Transform File V1.0', '#Transform 0']
# Initialize the transforms matrix
xfms_T = []
for i, tf_file in enumerate(transforms):
- if tf_file == "identity":
+ if tf_file == 'identity':
xfms_T.append([tf_file] * num_files)
continue
# If it is a deformation field, copy to the tfs_matrix directly
- if guess_type(tf_file)[0] != "text/plain":
+ if guess_type(tf_file)[0] != 'text/plain':
xfms_T.append([tf_file] * num_files)
continue
@@ -233,15 +226,15 @@ def _arrange_xfms(transforms, num_files, tmp_folder):
tfdata = tf_fh.read().strip()
# If it is not an ITK transform file, copy to the tfs_matrix directly
- if not tfdata.startswith("#Insight Transform File"):
+ if not tfdata.startswith('#Insight Transform File'):
xfms_T.append([tf_file] * num_files)
continue
# Count number of transforms in ITK transform file
- nxforms = tfdata.count("#Transform")
+ nxforms = tfdata.count('#Transform')
# Remove first line
- tfdata = tfdata.split("\n")[1:]
+ tfdata = tfdata.split('\n')[1:]
# If it is a ITK transform file with only 1 xform, copy to the tfs_matrix directly
if nxforms == 1:
@@ -250,23 +243,23 @@ def _arrange_xfms(transforms, num_files, tmp_folder):
if nxforms != num_files:
raise RuntimeError(
- "Number of transforms (%d) found in the ITK file does not match"
- " the number of input image files (%d)." % (nxforms, num_files)
+ 'Number of transforms (%d) found in the ITK file does not match'
+ ' the number of input image files (%d).' % (nxforms, num_files)
)
# At this point splitting transforms will be necessary, generate a base name
out_base = fname_presuffix(
- tf_file, suffix="_pos-%03d_xfm-{:05d}" % i, newpath=tmp_folder.name
+ tf_file, suffix='_pos-%03d_xfm-{:05d}' % i, newpath=tmp_folder.name
).format
# Split combined ITK transforms file
split_xfms = []
for xform_i in range(nxforms):
# Find start token to extract
- startidx = tfdata.index("#Transform %d" % xform_i)
- next_xform = base_xform + tfdata[startidx + 1:startidx + 4] + [""]
+ startidx = tfdata.index('#Transform %d' % xform_i)
+ next_xform = base_xform + tfdata[startidx + 1 : startidx + 4] + ['']
xfm_file = out_base(xform_i)
- with open(xfm_file, "w") as out_xfm:
- out_xfm.write("\n".join(next_xform))
+ with open(xfm_file, 'w') as out_xfm:
+ out_xfm.write('\n'.join(next_xform))
split_xfms.append(xfm_file)
xfms_T.append(split_xfms)
diff --git a/niworkflows/interfaces/morphology.py b/niworkflows/interfaces/morphology.py
index 8591911f144..afb871465e5 100644
--- a/niworkflows/interfaces/morphology.py
+++ b/niworkflows/interfaces/morphology.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Mathematical morphology operations as nipype interfaces."""
+
from pathlib import Path
import numpy as np
import nibabel as nb
@@ -35,12 +36,12 @@
class _BinaryDilationInputSpec(BaseInterfaceInputSpec):
- in_mask = File(exists=True, mandatory=True, desc="input mask")
- radius = traits.Int(2, usedefault=True, desc="Radius of dilation")
+ in_mask = File(exists=True, mandatory=True, desc='input mask')
+ radius = traits.Int(2, usedefault=True, desc='Radius of dilation')
class _BinaryDilationOutputSpec(TraitedSpec):
- out_mask = File(exists=False, desc="dilated mask")
+ out_mask = File(exists=False, desc='dilated mask')
class BinaryDilation(SimpleInterface):
@@ -59,21 +60,21 @@ def _run_interface(self, runtime):
maskdata,
radius=self.inputs.radius,
)
- out_file = str((Path(runtime.cwd) / "dilated_mask.nii.gz").absolute())
+ out_file = str((Path(runtime.cwd) / 'dilated_mask.nii.gz').absolute())
out_img = mask_img.__class__(dilated, mask_img.affine, mask_img.header)
- out_img.set_data_dtype("uint8")
+ out_img.set_data_dtype('uint8')
out_img.to_filename(out_file)
- self._results["out_mask"] = out_file
+ self._results['out_mask'] = out_file
return runtime
class _BinarySubtractInputSpec(BaseInterfaceInputSpec):
- in_base = File(exists=True, mandatory=True, desc="input base mask")
- in_subtract = File(exists=True, mandatory=True, desc="input subtract mask")
+ in_base = File(exists=True, mandatory=True, desc='input base mask')
+ in_subtract = File(exists=True, mandatory=True, desc='input subtract mask')
class _BinarySubtractionOutputSpec(TraitedSpec):
- out_mask = File(exists=False, desc="subtracted mask")
+ out_mask = File(exists=False, desc='subtracted mask')
class BinarySubtraction(SimpleInterface):
@@ -88,15 +89,11 @@ def _run_interface(self, runtime):
data = np.bool_(base_img.dataobj)
data[np.bool_(nb.load(self.inputs.in_subtract).dataobj)] = False
- out_file = str((Path(runtime.cwd) / "subtracted_mask.nii.gz").absolute())
- out_img = base_img.__class__(
- data,
- base_img.affine,
- base_img.header
- )
- out_img.set_data_dtype("uint8")
+ out_file = str((Path(runtime.cwd) / 'subtracted_mask.nii.gz').absolute())
+ out_img = base_img.__class__(data, base_img.affine, base_img.header)
+ out_img.set_data_dtype('uint8')
out_img.to_filename(out_file)
- self._results["out_mask"] = out_file
+ self._results['out_mask'] = out_file
return runtime
diff --git a/niworkflows/interfaces/nibabel.py b/niworkflows/interfaces/nibabel.py
index 611870a8386..e259ed8d7a5 100644
--- a/niworkflows/interfaces/nibabel.py
+++ b/niworkflows/interfaces/nibabel.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Nibabel-based interfaces."""
+
from pathlib import Path
from warnings import warn
@@ -38,19 +39,19 @@
InputMultiObject,
)
-IFLOGGER = logging.getLogger("nipype.interface")
+IFLOGGER = logging.getLogger('nipype.interface')
class _ApplyMaskInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="an image")
- in_mask = File(exists=True, mandatory=True, desc="a mask")
+ in_file = File(exists=True, mandatory=True, desc='an image')
+ in_mask = File(exists=True, mandatory=True, desc='a mask')
threshold = traits.Float(
- 0.5, usedefault=True, desc="a threshold to the mask, if it is nonbinary"
+ 0.5, usedefault=True, desc='a threshold to the mask, if it is nonbinary'
)
class _ApplyMaskOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="masked file")
+ out_file = File(exists=True, desc='masked file')
class ApplyMask(SimpleInterface):
@@ -64,32 +65,32 @@ def _run_interface(self, runtime):
msknii = nb.load(self.inputs.in_mask)
msk = msknii.get_fdata() > self.inputs.threshold
- self._results["out_file"] = fname_presuffix(
- self.inputs.in_file, suffix="_masked", newpath=runtime.cwd
+ self._results['out_file'] = fname_presuffix(
+ self.inputs.in_file, suffix='_masked', newpath=runtime.cwd
)
if img.dataobj.shape[:3] != msk.shape:
- raise ValueError("Image and mask sizes do not match.")
+ raise ValueError('Image and mask sizes do not match.')
if not np.allclose(img.affine, msknii.affine):
- raise ValueError("Image and mask affines are not similar enough.")
+ raise ValueError('Image and mask affines are not similar enough.')
if img.dataobj.ndim == msk.ndim + 1:
msk = msk[..., np.newaxis]
masked = img.__class__(img.dataobj * msk, None, img.header)
- masked.to_filename(self._results["out_file"])
+ masked.to_filename(self._results['out_file'])
return runtime
class _BinarizeInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input image")
- thresh_low = traits.Float(mandatory=True, desc="non-inclusive lower threshold")
+ in_file = File(exists=True, mandatory=True, desc='input image')
+ thresh_low = traits.Float(mandatory=True, desc='non-inclusive lower threshold')
class _BinarizeOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="masked file")
- out_mask = File(exists=True, desc="output mask")
+ out_file = File(exists=True, desc='masked file')
+ out_mask = File(exists=True, desc='output mask')
class Binarize(SimpleInterface):
@@ -101,38 +102,39 @@ class Binarize(SimpleInterface):
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file)
- self._results["out_file"] = fname_presuffix(
- self.inputs.in_file, suffix="_masked", newpath=runtime.cwd
+ self._results['out_file'] = fname_presuffix(
+ self.inputs.in_file, suffix='_masked', newpath=runtime.cwd
)
- self._results["out_mask"] = fname_presuffix(
- self.inputs.in_file, suffix="_mask", newpath=runtime.cwd
+ self._results['out_mask'] = fname_presuffix(
+ self.inputs.in_file, suffix='_mask', newpath=runtime.cwd
)
data = img.get_fdata()
mask = data > self.inputs.thresh_low
data[~mask] = 0.0
masked = img.__class__(data, img.affine, img.header)
- masked.to_filename(self._results["out_file"])
+ masked.to_filename(self._results['out_file'])
- img.header.set_data_dtype("uint8")
- maskimg = img.__class__(mask.astype("uint8"), img.affine, img.header)
- maskimg.to_filename(self._results["out_mask"])
+ img.header.set_data_dtype('uint8')
+ maskimg = img.__class__(mask.astype('uint8'), img.affine, img.header)
+ maskimg.to_filename(self._results['out_mask'])
return runtime
class _BinaryDilationInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="binary file to dilate")
- radius = traits.Float(3, usedefault=True, desc="structure element (ball) radius")
- iterations = traits.Range(low=0, value=1, usedefault=True, desc="repeat dilation")
+ in_file = File(exists=True, mandatory=True, desc='binary file to dilate')
+ radius = traits.Float(3, usedefault=True, desc='structure element (ball) radius')
+ iterations = traits.Range(low=0, value=1, usedefault=True, desc='repeat dilation')
class _BinaryDilationOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="the input file, after binary dilation")
+ out_file = File(exists=True, desc='the input file, after binary dilation')
class BinaryDilation(SimpleInterface):
"""Morphological binary dilation using Scipy."""
+
# DEPRECATED in 1.7.0
# To remove in 1.9.0
@@ -140,15 +142,19 @@ class BinaryDilation(SimpleInterface):
output_spec = _BinaryDilationOutputSpec
def __init__(self, from_file=None, resource_monitor=None, **inputs):
- warn("""\
+ warn(
+ """\
niworkflows.interfaces.nibabel.BinaryDilation is deprecated in favor of
niworkflows.interfaces.morphology.BinaryDilation. Please validate that
interface for your use case and switch.
-""", DeprecationWarning, stacklevel=2)
+""",
+ DeprecationWarning,
+ stacklevel=2,
+ )
super().__init__(from_file=from_file, resource_monitor=resource_monitor, **inputs)
def _run_interface(self, runtime):
- self._results["out_file"] = _dilate(
+ self._results['out_file'] = _dilate(
self.inputs.in_file,
radius=self.inputs.radius,
iterations=self.inputs.iterations,
@@ -158,11 +164,11 @@ def _run_interface(self, runtime):
class _SplitSeriesInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input 4d image")
+ in_file = File(exists=True, mandatory=True, desc='input 4d image')
class _SplitSeriesOutputSpec(TraitedSpec):
- out_files = OutputMultiObject(File(exists=True), desc="output list of 3d images")
+ out_files = OutputMultiObject(File(exists=True), desc='output list of 3d images')
class SplitSeries(SimpleInterface):
@@ -181,29 +187,25 @@ def _run_interface(self, runtime):
img.dataobj.reshape(img.shape[:3] + extra_dims), img.affine, img.header
)
- self._results["out_files"] = []
+ self._results['out_files'] = []
for i, img_3d in enumerate(nb.four_to_three(img)):
- out_file = fname_presuffix(
- in_file, suffix=f"_idx-{i:03}", newpath=runtime.cwd
- )
+ out_file = fname_presuffix(in_file, suffix=f'_idx-{i:03}', newpath=runtime.cwd)
img_3d.to_filename(out_file)
- self._results["out_files"].append(out_file)
+ self._results['out_files'].append(out_file)
return runtime
class _MergeSeriesInputSpec(BaseInterfaceInputSpec):
- in_files = InputMultiObject(
- File(exists=True, mandatory=True, desc="input list of 3d images")
- )
+ in_files = InputMultiObject(File(exists=True, mandatory=True, desc='input list of 3d images'))
allow_4D = traits.Bool(
- True, usedefault=True, desc="whether 4D images are allowed to be concatenated"
+ True, usedefault=True, desc='whether 4D images are allowed to be concatenated'
)
- affine_tolerance = traits.Float(desc="Absolute tolerance allowed between image affines")
+ affine_tolerance = traits.Float(desc='Absolute tolerance allowed between image affines')
class _MergeSeriesOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="output 4d image")
+ out_file = File(exists=True, desc='output 4d image')
class MergeSeries(SimpleInterface):
@@ -222,8 +224,8 @@ def _run_interface(self, runtime):
aff0 = filenii.affine
elif not np.allclose(aff0, filenii.affine, atol=self.inputs.affine_tolerance):
raise ValueError(
- "Difference in affines greater than allowed tolerance "
- f"{self.inputs.affine_tolerance}"
+ 'Difference in affines greater than allowed tolerance '
+ f'{self.inputs.affine_tolerance}'
)
ndim = filenii.dataobj.ndim
if ndim == 3:
@@ -233,29 +235,22 @@ def _run_interface(self, runtime):
nii_list += nb.four_to_three(filenii)
continue
else:
- raise ValueError(
- f"Input image has an incorrect number of dimensions ({ndim})."
- )
+ raise ValueError(f'Input image has an incorrect number of dimensions ({ndim}).')
- img_4d = nb.concat_images(
- nii_list,
- check_affines=not bool(self.inputs.affine_tolerance)
- )
- out_file = fname_presuffix(
- self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd
- )
+ img_4d = nb.concat_images(nii_list, check_affines=not bool(self.inputs.affine_tolerance))
+ out_file = fname_presuffix(self.inputs.in_files[0], suffix='_merged', newpath=runtime.cwd)
img_4d.to_filename(out_file)
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
class _MergeROIsInputSpec(BaseInterfaceInputSpec):
- in_files = InputMultiObject(File(exists=True), desc="ROI files to be merged")
+ in_files = InputMultiObject(File(exists=True), desc='ROI files to be merged')
class _MergeROIsOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="NIfTI containing all ROIs")
+ out_file = File(exists=True, desc='NIfTI containing all ROIs')
class MergeROIs(SimpleInterface):
@@ -265,22 +260,20 @@ class MergeROIs(SimpleInterface):
output_spec = _MergeROIsOutputSpec
def _run_interface(self, runtime):
- self._results["out_file"] = _merge_rois(self.inputs.in_files, newpath=runtime.cwd)
+ self._results['out_file'] = _merge_rois(self.inputs.in_files, newpath=runtime.cwd)
return runtime
class _RegridToZoomsInputSpec(BaseInterfaceInputSpec):
- in_file = File(
- exists=True, mandatory=True, desc="a file whose resolution is to change"
- )
+ in_file = File(exists=True, mandatory=True, desc='a file whose resolution is to change')
zooms = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
mandatory=True,
- desc="the new resolution",
+ desc='the new resolution',
)
- order = traits.Int(3, usedefault=True, desc="order of interpolator")
+ order = traits.Int(3, usedefault=True, desc='order of interpolator')
clip = traits.Bool(
True,
usedefault=True,
@@ -291,12 +284,12 @@ class _RegridToZoomsInputSpec(BaseInterfaceInputSpec):
traits.Float(),
default=False,
usedefault=True,
- desc="apply gaussian smoothing before resampling",
+ desc='apply gaussian smoothing before resampling',
)
class _RegridToZoomsOutputSpec(TraitedSpec):
- out_file = File(exists=True, dec="the regridded file")
+ out_file = File(exists=True, dec='the regridded file')
class RegridToZooms(SimpleInterface):
@@ -308,8 +301,8 @@ class RegridToZooms(SimpleInterface):
def _run_interface(self, runtime):
from ..utils.images import resample_by_spacing
- self._results["out_file"] = fname_presuffix(
- self.inputs.in_file, suffix="_regrid", newpath=runtime.cwd
+ self._results['out_file'] = fname_presuffix(
+ self.inputs.in_file, suffix='_regrid', newpath=runtime.cwd
)
resample_by_spacing(
self.inputs.in_file,
@@ -317,20 +310,18 @@ def _run_interface(self, runtime):
order=self.inputs.order,
clip=self.inputs.clip,
smooth=self.inputs.smooth,
- ).to_filename(self._results["out_file"])
+ ).to_filename(self._results['out_file'])
return runtime
class _DemeanImageInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="image to be demeaned")
- in_mask = File(
- exists=True, mandatory=True, desc="mask where median will be calculated"
- )
- only_mask = traits.Bool(False, usedefault=True, desc="demean only within mask")
+ in_file = File(exists=True, mandatory=True, desc='image to be demeaned')
+ in_mask = File(exists=True, mandatory=True, desc='mask where median will be calculated')
+ only_mask = traits.Bool(False, usedefault=True, desc='demean only within mask')
class _DemeanImageOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="demeaned image")
+ out_file = File(exists=True, desc='demeaned image')
class DemeanImage(SimpleInterface):
@@ -340,7 +331,7 @@ class DemeanImage(SimpleInterface):
def _run_interface(self, runtime):
from ..utils.images import demean
- self._results["out_file"] = demean(
+ self._results['out_file'] = demean(
self.inputs.in_file,
self.inputs.in_mask,
only_mask=self.inputs.only_mask,
@@ -350,15 +341,13 @@ def _run_interface(self, runtime):
class _FilledImageLikeInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="image to be demeaned")
- fill_value = traits.Float(1.0, usedefault=True, desc="value to fill")
- dtype = traits.Enum(
- "float32", "uint8", usedefault=True, desc="force output data type"
- )
+ in_file = File(exists=True, mandatory=True, desc='image to be demeaned')
+ fill_value = traits.Float(1.0, usedefault=True, desc='value to fill')
+ dtype = traits.Enum('float32', 'uint8', usedefault=True, desc='force output data type')
class _FilledImageLikeOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="demeaned image")
+ out_file = File(exists=True, desc='demeaned image')
class FilledImageLike(SimpleInterface):
@@ -368,7 +357,7 @@ class FilledImageLike(SimpleInterface):
def _run_interface(self, runtime):
from ..utils.images import nii_ones_like
- self._results["out_file"] = nii_ones_like(
+ self._results['out_file'] = nii_ones_like(
self.inputs.in_file,
self.inputs.fill_value,
self.inputs.dtype,
@@ -378,28 +367,26 @@ def _run_interface(self, runtime):
class _GenerateSamplingReferenceInputSpec(BaseInterfaceInputSpec):
- fixed_image = File(
- exists=True, mandatory=True, desc="the reference file, defines the FoV"
- )
- moving_image = File(exists=True, mandatory=True, desc="the pixel size reference")
- xform_code = traits.Enum(None, 2, 4, usedefault=True, desc="force xform code")
+ fixed_image = File(exists=True, mandatory=True, desc='the reference file, defines the FoV')
+ moving_image = File(exists=True, mandatory=True, desc='the pixel size reference')
+ xform_code = traits.Enum(None, 2, 4, usedefault=True, desc='force xform code')
fov_mask = traits.Either(
None,
File(exists=True),
usedefault=True,
- desc="mask to clip field of view (in fixed_image space)",
+ desc='mask to clip field of view (in fixed_image space)',
)
keep_native = traits.Bool(
True,
usedefault=True,
- desc="calculate a grid with native resolution covering "
- "the volume extent given by fixed_image, fast forward "
- "fixed_image otherwise.",
+ desc='calculate a grid with native resolution covering '
+ 'the volume extent given by fixed_image, fast forward '
+ 'fixed_image otherwise.',
)
class _GenerateSamplingReferenceOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="one file with all inputs flattened")
+ out_file = File(exists=True, desc='one file with all inputs flattened')
class GenerateSamplingReference(SimpleInterface):
@@ -422,39 +409,35 @@ class GenerateSamplingReference(SimpleInterface):
def _run_interface(self, runtime):
if not self.inputs.keep_native:
- self._results["out_file"] = self.inputs.fixed_image
+ self._results['out_file'] = self.inputs.fixed_image
return runtime
from .. import __version__
- self._results["out_file"] = _gen_reference(
+ self._results['out_file'] = _gen_reference(
self.inputs.fixed_image,
self.inputs.moving_image,
fov_mask=self.inputs.fov_mask,
force_xform_code=self.inputs.xform_code,
- message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__),
+ message='%s (niworkflows v%s)' % (self.__class__.__name__, __version__),
newpath=runtime.cwd,
)
return runtime
class _IntensityClipInputSpec(BaseInterfaceInputSpec):
- in_file = File(
- exists=True, mandatory=True, desc="3D file which intensity will be clipped"
- )
- p_min = traits.Float(35.0, usedefault=True, desc="percentile for the lower bound")
- p_max = traits.Float(99.98, usedefault=True, desc="percentile for the upper bound")
+ in_file = File(exists=True, mandatory=True, desc='3D file which intensity will be clipped')
+ p_min = traits.Float(35.0, usedefault=True, desc='percentile for the lower bound')
+ p_max = traits.Float(99.98, usedefault=True, desc='percentile for the upper bound')
nonnegative = traits.Bool(
- True, usedefault=True, desc="whether input intensities must be positive"
- )
- dtype = traits.Enum(
- "int16", "float32", "uint8", usedefault=True, desc="output datatype"
+ True, usedefault=True, desc='whether input intensities must be positive'
)
- invert = traits.Bool(False, usedefault=True, desc="finalize by inverting contrast")
+ dtype = traits.Enum('int16', 'float32', 'uint8', usedefault=True, desc='output datatype')
+ invert = traits.Bool(False, usedefault=True, desc='finalize by inverting contrast')
class _IntensityClipOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="file after clipping")
+ out_file = File(exists=True, desc='file after clipping')
class IntensityClip(SimpleInterface):
@@ -464,7 +447,7 @@ class IntensityClip(SimpleInterface):
output_spec = _IntensityClipOutputSpec
def _run_interface(self, runtime):
- self._results["out_file"] = _advanced_clip(
+ self._results['out_file'] = _advanced_clip(
self.inputs.in_file,
p_min=self.inputs.p_min,
p_max=self.inputs.p_max,
@@ -477,18 +460,18 @@ def _run_interface(self, runtime):
class _MapLabelsInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, desc="Segmented NIfTI")
+ in_file = File(exists=True, desc='Segmented NIfTI')
mappings = traits.Dict(
- xor=["mappings_file"],
- desc="Dictionary of label / replacement label pairs",
+ xor=['mappings_file'],
+ desc='Dictionary of label / replacement label pairs',
)
mappings_file = File(
- exists=True, xor=["mappings"], help="JSON composed of label / replacement label pairs."
+ exists=True, xor=['mappings'], help='JSON composed of label / replacement label pairs.'
)
class _MapLabelsOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="Labeled file")
+ out_file = File(exists=True, desc='Labeled file')
class MapLabels(SimpleInterface):
@@ -499,7 +482,7 @@ class MapLabels(SimpleInterface):
def _run_interface(self, runtime):
mapping = self.inputs.mappings or _load_int_json(self.inputs.mappings_file)
- self._results["out_file"] = _remap_labels(
+ self._results['out_file'] = _remap_labels(
self.inputs.in_file,
mapping,
newpath=runtime.cwd,
@@ -508,17 +491,17 @@ def _run_interface(self, runtime):
class ReorientImageInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="Moving file")
+ in_file = File(exists=True, mandatory=True, desc='Moving file')
target_file = File(
- exists=True, xor=["target_orientation"], desc="Reference file to reorient to"
+ exists=True, xor=['target_orientation'], desc='Reference file to reorient to'
)
target_orientation = traits.Str(
- xor=["target_file"], desc="Axis codes of coordinate system to reorient to"
+ xor=['target_file'], desc='Axis codes of coordinate system to reorient to'
)
class ReorientImageOutputSpec(TraitedSpec):
- out_file = File(desc="Reoriented file")
+ out_file = File(desc='Reoriented file')
class ReorientImage(SimpleInterface):
@@ -526,7 +509,7 @@ class ReorientImage(SimpleInterface):
output_spec = ReorientImageOutputSpec
def _run_interface(self, runtime):
- self._results["out_file"] = reorient_file(
+ self._results['out_file'] = reorient_file(
self.inputs.in_file,
target_file=self.inputs.target_file,
target_ornt=self.inputs.target_orientation,
@@ -535,7 +518,11 @@ def _run_interface(self, runtime):
def reorient_file(
- in_file: str, *, target_file: str = None, target_ornt: str = None, newpath: str = None,
+ in_file: str,
+ *,
+ target_file: str = None,
+ target_ornt: str = None,
+ newpath: str = None,
) -> str:
"""
Reorient an image.
@@ -553,7 +540,7 @@ def reorient_file(
img = nb.load(in_file)
if not target_file and not target_ornt:
- raise TypeError("No target orientation or file is specified.")
+ raise TypeError('No target orientation or file is specified.')
if target_file:
target_img = nb.load(target_file)
@@ -563,7 +550,7 @@ def reorient_file(
if newpath is None:
newpath = Path()
- out_file = str((Path(newpath) / "reoriented.nii.gz").absolute())
+ out_file = str((Path(newpath) / 'reoriented.nii.gz').absolute())
reoriented.to_filename(out_file)
return out_file
@@ -593,9 +580,7 @@ def _gen_reference(
import nilearn.image as nli
if out_file is None:
- out_file = fname_presuffix(
- fixed_image, suffix="_reference", newpath=newpath
- )
+ out_file = fname_presuffix(fixed_image, suffix='_reference', newpath=newpath)
# Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial)
reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image))
@@ -606,9 +591,7 @@ def _gen_reference(
# A positive diagonal affine is RAS, hence the need to reorient above.
new_affine = np.diag(np.round(new_zooms, 3))
- resampled = nli.resample_img(
- fixed_image, target_affine=new_affine, interpolation="nearest"
- )
+ resampled = nli.resample_img(fixed_image, target_affine=new_affine, interpolation='nearest')
if fov_mask is not None:
# If we have a mask, resample again dropping (empty) samples
@@ -617,15 +600,13 @@ def _gen_reference(
masknii = nb.load(fov_mask)
if np.all(masknii.shape[:3] != fixednii.shape[:3]):
- raise RuntimeError("Fixed image and mask do not have the same dimensions.")
+ raise RuntimeError('Fixed image and mask do not have the same dimensions.')
if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5):
- raise RuntimeError("Fixed image and mask have different affines")
+ raise RuntimeError('Fixed image and mask have different affines')
# Get mask into reference space
- masknii = nli.resample_img(
- masknii, target_affine=new_affine, interpolation="nearest"
- )
+ masknii = nli.resample_img(masknii, target_affine=new_affine, interpolation='nearest')
res_shape = np.array(masknii.shape[:3])
# Calculate a bounding box for the input mask
@@ -644,7 +625,7 @@ def _gen_reference(
fixed_image,
target_affine=new_affine_4,
target_shape=new_shape.tolist(),
- interpolation="nearest",
+ interpolation='nearest',
)
xform = resampled.affine # nibabel will pick the best affine
@@ -661,15 +642,21 @@ def _gen_reference(
# Keep 0, 2, 3, 4 unchanged
resampled.header.set_qform(xform, int(xform_code))
resampled.header.set_sform(xform, int(xform_code))
- resampled.header["descrip"] = "reference image generated by %s." % (
- message or "(unknown software)"
+ resampled.header['descrip'] = 'reference image generated by %s.' % (
+ message or '(unknown software)'
)
resampled.to_filename(out_file)
return out_file
def _advanced_clip(
- in_file, p_min=35, p_max=99.98, nonnegative=True, dtype="int16", invert=False, newpath=None,
+ in_file,
+ p_min=35,
+ p_max=99.98,
+ nonnegative=True,
+ dtype='int16',
+ invert=False,
+ newpath=None,
):
"""
Remove outliers at both ends of the intensity distribution and fit into a given dtype.
@@ -691,25 +678,19 @@ def _advanced_clip(
from scipy import ndimage
from skimage.morphology import ball
- out_file = (Path(newpath or "") / "clipped.nii.gz").absolute()
+ out_file = (Path(newpath or '') / 'clipped.nii.gz').absolute()
# Load data
img = nb.squeeze_image(nb.load(in_file))
if len(img.shape) != 3:
- raise RuntimeError(f"<{in_file}> is not a 3D file.")
- data = img.get_fdata(dtype="float32")
+ raise RuntimeError(f'<{in_file}> is not a 3D file.')
+ data = img.get_fdata(dtype='float32')
# Calculate stats on denoised version, to preempt outliers from biasing
denoised = ndimage.median_filter(data, footprint=ball(3))
- a_min = np.percentile(
- denoised[denoised > 0] if nonnegative else denoised,
- p_min
- )
- a_max = np.percentile(
- denoised[denoised > 0] if nonnegative else denoised,
- p_max
- )
+ a_min = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_min)
+ a_max = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_max)
# Clip and cast
data = np.clip(data, a_min=a_min, a_max=a_max)
@@ -719,12 +700,12 @@ def _advanced_clip(
if invert:
data = 1.0 - data
- if dtype in ("uint8", "int16"):
+ if dtype in ('uint8', 'int16'):
data = np.round(255 * data).astype(dtype)
hdr = img.header.copy()
hdr.set_data_dtype(dtype)
- hdr["cal_max"] = data.max()
+ hdr['cal_max'] = data.max()
img.__class__(data, img.affine, hdr).to_filename(out_file)
@@ -748,9 +729,9 @@ def _dilate(in_file, radius=3, iterations=1, newpath=None):
)
hdr = mask.header.copy()
- hdr.set_data_dtype("uint8")
- out_file = fname_presuffix(in_file, suffix="_dil", newpath=newpath or Path.cwd())
- mask.__class__(newdata.astype("uint8"), mask.affine, hdr).to_filename(out_file)
+ hdr.set_data_dtype('uint8')
+ out_file = fname_presuffix(in_file, suffix='_dil', newpath=newpath or Path.cwd())
+ mask.__class__(newdata.astype('uint8'), mask.affine, hdr).to_filename(out_file)
return out_file
@@ -776,18 +757,18 @@ def _merge_rois(in_files, newpath=None):
nonzero = np.any(data, axis=3)
for roi in in_files[1:]:
img = nb.load(roi)
- assert img.shape == data.shape, "Mismatch in image shape"
- assert np.allclose(img.affine, affine), "Mismatch in affine"
+ assert img.shape == data.shape, 'Mismatch in image shape'
+ assert np.allclose(img.affine, affine), 'Mismatch in affine'
roi_data = np.asanyarray(img.dataobj)
roi_nonzero = np.any(roi_data, axis=3)
- assert not np.any(roi_nonzero & nonzero), "Overlapping ROIs"
+ assert not np.any(roi_nonzero & nonzero), 'Overlapping ROIs'
nonzero |= roi_nonzero
data += roi_data
del roi_data
if newpath is None:
newpath = Path()
- out_file = str((Path(newpath) / "combined.nii.gz").absolute())
+ out_file = str((Path(newpath) / 'combined.nii.gz').absolute())
img.__class__(data, affine, header).to_filename(out_file)
return out_file
@@ -814,7 +795,7 @@ def _relabel(label):
if newpath is None:
newpath = Path()
- out_file = str((Path(newpath) / "relabeled.nii.gz").absolute())
+ out_file = str((Path(newpath) / 'relabeled.nii.gz').absolute())
img.__class__(out, img.affine, header=img.header).to_filename(out_file)
return out_file
diff --git a/niworkflows/interfaces/nilearn.py b/niworkflows/interfaces/nilearn.py
index 74694dd74c5..270a8c0bb8a 100644
--- a/niworkflows/interfaces/nilearn.py
+++ b/niworkflows/interfaces/nilearn.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Utilities based on nilearn."""
+
import os
import nibabel as nb
import numpy as np
@@ -42,38 +43,30 @@
try:
from nilearn import __version__ as NILEARN_VERSION
except ImportError:
- NILEARN_VERSION = "unknown"
+ NILEARN_VERSION = 'unknown'
-LOGGER = logging.getLogger("nipype.interface")
-__all__ = ["NILEARN_VERSION", "MaskEPI", "Merge", "ComputeEPIMask"]
+LOGGER = logging.getLogger('nipype.interface')
+__all__ = ['NILEARN_VERSION', 'MaskEPI', 'Merge', 'ComputeEPIMask']
class _MaskEPIInputSpec(BaseInterfaceInputSpec):
- in_files = InputMultiPath(
- File(exists=True), mandatory=True, desc="input EPI or list of files"
- )
+ in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input EPI or list of files')
lower_cutoff = traits.Float(0.2, usedefault=True)
upper_cutoff = traits.Float(0.85, usedefault=True)
connected = traits.Bool(True, usedefault=True)
- enhance_t2 = traits.Bool(
- False, usedefault=True, desc="enhance T2 contrast on image"
- )
+ enhance_t2 = traits.Bool(False, usedefault=True, desc='enhance T2 contrast on image')
opening = traits.Int(2, usedefault=True)
closing = traits.Bool(True, usedefault=True)
fill_holes = traits.Bool(True, usedefault=True)
exclude_zeros = traits.Bool(False, usedefault=True)
ensure_finite = traits.Bool(True, usedefault=True)
- target_affine = traits.Either(
- None, traits.File(exists=True), default=None, usedefault=True
- )
- target_shape = traits.Either(
- None, traits.File(exists=True), default=None, usedefault=True
- )
+ target_affine = traits.Either(None, traits.File(exists=True), default=None, usedefault=True)
+ target_shape = traits.Either(None, traits.File(exists=True), default=None, usedefault=True)
no_sanitize = traits.Bool(False, usedefault=True)
class _MaskEPIOutputSpec(TraitedSpec):
- out_mask = File(exists=True, desc="output mask")
+ out_mask = File(exists=True, desc='output mask')
class MaskEPI(SimpleInterface):
@@ -126,38 +119,34 @@ def _run_interface(self, runtime):
sform, code = nii.get_sform(coded=True)
masknii.set_sform(sform, int(code))
- self._results["out_mask"] = fname_presuffix(
- self.inputs.in_files[0], suffix="_mask", newpath=runtime.cwd
+ self._results['out_mask'] = fname_presuffix(
+ self.inputs.in_files[0], suffix='_mask', newpath=runtime.cwd
)
- masknii.to_filename(self._results["out_mask"])
+ masknii.to_filename(self._results['out_mask'])
return runtime
class _MergeInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(
- File(exists=True), mandatory=True, desc="input list of files to merge"
+ File(exists=True), mandatory=True, desc='input list of files to merge'
)
dtype = traits.Enum(
- "f4",
- "f8",
- "u1",
- "u2",
- "u4",
- "i2",
- "i4",
+ 'f4',
+ 'f8',
+ 'u1',
+ 'u2',
+ 'u4',
+ 'i2',
+ 'i4',
usedefault=True,
- desc="numpy dtype of output image",
- )
- header_source = File(
- exists=True, desc="a Nifti file from which the header should be copied"
- )
- compress = traits.Bool(
- True, usedefault=True, desc="Use gzip compression on .nii output"
+ desc='numpy dtype of output image',
)
+ header_source = File(exists=True, desc='a Nifti file from which the header should be copied')
+ compress = traits.Bool(True, usedefault=True, desc='Use gzip compression on .nii output')
class _MergeOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="output merged file")
+ out_file = File(exists=True, desc='output merged file')
class Merge(SimpleInterface):
@@ -169,10 +158,10 @@ class Merge(SimpleInterface):
def _run_interface(self, runtime):
from nilearn.image import concat_imgs
- ext = ".nii.gz" if self.inputs.compress else ".nii"
- self._results["out_file"] = fname_presuffix(
+ ext = '.nii.gz' if self.inputs.compress else '.nii'
+ self._results['out_file'] = fname_presuffix(
self.inputs.in_files[0],
- suffix="_merged" + ext,
+ suffix='_merged' + ext,
newpath=runtime.cwd,
use_ext=False,
)
@@ -185,18 +174,18 @@ def _run_interface(self, runtime):
list(new_nii.header.get_zooms()[:3]) + [src_hdr.get_zooms()[3]]
)
- new_nii.to_filename(self._results["out_file"])
+ new_nii.to_filename(self._results['out_file'])
return runtime
class _ComputeEPIMaskInputSpec(nrb._SVGReportCapableInputSpec, BaseInterfaceInputSpec):
- in_file = File(exists=True, desc="3D or 4D EPI file")
- dilation = traits.Int(desc="binary dilation on the nilearn output")
+ in_file = File(exists=True, desc='3D or 4D EPI file')
+ dilation = traits.Int(desc='binary dilation on the nilearn output')
class _ComputeEPIMaskOutputSpec(reporting.ReportCapableOutputSpec):
- mask_file = File(exists=True, desc="Binary brain mask")
+ mask_file = File(exists=True, desc='Binary brain mask')
class ComputeEPIMask(nrb.SegmentationRC):
@@ -211,9 +200,7 @@ def _run_interface(self, runtime):
in_file_data = orig_file_nii.get_fdata()
# pad the data to avoid the mask estimation running into edge effects
- in_file_data_padded = np.pad(
- in_file_data, (1, 1), "constant", constant_values=(0, 0)
- )
+ in_file_data_padded = np.pad(in_file_data, (1, 1), 'constant', constant_values=(0, 0))
padded_nii = nb.Nifti1Image(
in_file_data_padded, orig_file_nii.affine, orig_file_nii.header
@@ -232,20 +219,18 @@ def _run_interface(self, runtime):
mask_data[in_file_data == 0] = 0
mask_data[np.isnan(in_file_data)] = 0
- better_mask = nb.Nifti1Image(
- mask_data, orig_file_nii.affine, orig_file_nii.header
- )
+ better_mask = nb.Nifti1Image(mask_data, orig_file_nii.affine, orig_file_nii.header)
better_mask.set_data_dtype(np.uint8)
- better_mask.to_filename("mask_file.nii.gz")
+ better_mask.to_filename('mask_file.nii.gz')
- self._mask_file = os.path.join(runtime.cwd, "mask_file.nii.gz")
+ self._mask_file = os.path.join(runtime.cwd, 'mask_file.nii.gz')
runtime.returncode = 0
return super()._run_interface(runtime)
def _list_outputs(self):
outputs = super()._list_outputs()
- outputs["mask_file"] = self._mask_file
+ outputs['mask_file'] = self._mask_file
return outputs
def _post_run_hook(self, runtime):
@@ -272,7 +257,7 @@ def _enhance_t2_contrast(in_file, newpath=None, offset=0.5):
effectively splits brain and background and makes the
overall distribution more Gaussian.
"""
- out_file = fname_presuffix(in_file, suffix="_t1enh", newpath=newpath)
+ out_file = fname_presuffix(in_file, suffix='_t1enh', newpath=newpath)
nii = nb.load(in_file)
data = nii.get_fdata()
maxd = data.max()
diff --git a/niworkflows/interfaces/nitransforms.py b/niworkflows/interfaces/nitransforms.py
index ea7dff18f41..09e85946617 100644
--- a/niworkflows/interfaces/nitransforms.py
+++ b/niworkflows/interfaces/nitransforms.py
@@ -34,32 +34,30 @@
)
XFM_FMT = {
- ".lta": "fs",
- ".txt": "itk",
- ".mat": "itk",
- ".tfm": "itk",
+ '.lta': 'fs',
+ '.txt': 'itk',
+ '.mat': 'itk',
+ '.tfm': 'itk',
}
class _ConcatenateXFMsInputSpec(BaseInterfaceInputSpec):
- in_xfms = InputMultiObject(File(exists=True), desc="input transform piles")
- inverse = traits.Bool(False, usedefault=True, desc="generate inverse")
- out_fmt = traits.Enum("itk", "fs", usedefault=True, desc="output format")
+ in_xfms = InputMultiObject(File(exists=True), desc='input transform piles')
+ inverse = traits.Bool(False, usedefault=True, desc='generate inverse')
+ out_fmt = traits.Enum('itk', 'fs', usedefault=True, desc='output format')
reference = File(
exists=True,
- desc="reference file (only for writing LTA format, if not "
- "concatenating another LTA).",
+ desc='reference file (only for writing LTA format, if not concatenating another LTA).',
)
moving = File(
exists=True,
- desc="moving file (only for writing LTA format, if not "
- "concatenating another LTA).",
+ desc='moving file (only for writing LTA format, if not concatenating another LTA).',
)
class _ConcatenateXFMsOutputSpec(TraitedSpec):
- out_xfm = File(exists=True, desc="output, combined transform")
- out_inv = File(desc="output, combined transform")
+ out_xfm = File(exists=True, desc='output, combined transform')
+ out_inv = File(desc='output, combined transform')
class ConcatenateXFMs(SimpleInterface):
@@ -69,15 +67,15 @@ class ConcatenateXFMs(SimpleInterface):
output_spec = _ConcatenateXFMsOutputSpec
def _run_interface(self, runtime):
- out_ext = "lta" if self.inputs.out_fmt == "fs" else "tfm"
+ out_ext = 'lta' if self.inputs.out_fmt == 'fs' else 'tfm'
reference = self.inputs.reference if isdefined(self.inputs.reference) else None
moving = self.inputs.moving if isdefined(self.inputs.moving) else None
- out_file = Path(runtime.cwd) / f"out_fwd.{out_ext}"
- self._results["out_xfm"] = str(out_file)
+ out_file = Path(runtime.cwd) / f'out_fwd.{out_ext}'
+ self._results['out_xfm'] = str(out_file)
out_inv = None
if self.inputs.inverse:
- out_inv = Path(runtime.cwd) / f"out_inv.{out_ext}"
- self._results["out_inv"] = str(out_inv)
+ out_inv = Path(runtime.cwd) / f'out_inv.{out_ext}'
+ self._results['out_inv'] = str(out_inv)
concatenate_xfms(
self.inputs.in_xfms,
@@ -90,9 +88,7 @@ def _run_interface(self, runtime):
return runtime
-def concatenate_xfms(
- in_files, out_file, out_inv=None, reference=None, moving=None, fmt="itk"
-):
+def concatenate_xfms(in_files, out_file, out_inv=None, reference=None, moving=None, fmt='itk'):
"""Concatenate linear transforms."""
from nitransforms.manip import TransformChain
from nitransforms.linear import load as load_affine
diff --git a/niworkflows/interfaces/norm.py b/niworkflows/interfaces/norm.py
index 50c010afc19..3b5c47cc1a1 100644
--- a/niworkflows/interfaces/norm.py
+++ b/niworkflows/interfaces/norm.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""A robust ANTs T1-to-MNI registration workflow with fallback retry."""
+
from os import path as op
from multiprocessing import cpu_count
@@ -51,60 +52,58 @@ class _SpatialNormalizationInputSpec(BaseInterfaceInputSpec):
package_version = niworkflows_version
# Moving image.
- moving_image = File(
- exists=True, mandatory=True, desc="image to apply transformation to"
- )
+ moving_image = File(exists=True, mandatory=True, desc='image to apply transformation to')
# Reference image (optional).
- reference_image = File(exists=True, desc="override the reference image")
+ reference_image = File(exists=True, desc='override the reference image')
# Moving mask (optional).
- moving_mask = File(exists=True, desc="moving image mask")
+ moving_mask = File(exists=True, desc='moving image mask')
# Reference mask (optional).
- reference_mask = File(exists=True, desc="reference image mask")
+ reference_mask = File(exists=True, desc='reference image mask')
# Lesion mask (optional).
- lesion_mask = File(exists=True, desc="lesion mask image")
+ lesion_mask = File(exists=True, desc='lesion mask image')
# Number of threads to use for ANTs/ITK processes.
num_threads = traits.Int(
- cpu_count(), usedefault=True, nohash=True, desc="Number of ITK threads to use"
+ cpu_count(), usedefault=True, nohash=True, desc='Number of ITK threads to use'
)
# ANTs parameter set to use.
flavor = traits.Enum(
- "precise",
- "testing",
- "fast",
+ 'precise',
+ 'testing',
+ 'fast',
usedefault=True,
- desc="registration settings parameter set",
+ desc='registration settings parameter set',
)
# Template orientation.
orientation = traits.Enum(
- "RAS",
- "LAS",
+ 'RAS',
+ 'LAS',
mandatory=True,
usedefault=True,
- desc="modify template orientation (should match input image)",
+ desc='modify template orientation (should match input image)',
)
# Modality of the reference image.
reference = traits.Enum(
- "T1w",
- "T2w",
- "boldref",
- "PDw",
+ 'T1w',
+ 'T2w',
+ 'boldref',
+ 'PDw',
mandatory=True,
usedefault=True,
- desc="set the reference modality for registration",
+ desc='set the reference modality for registration',
)
# T1 or EPI registration?
moving = traits.Enum(
- "T1w", "boldref", usedefault=True, mandatory=True, desc="registration type"
+ 'T1w', 'boldref', usedefault=True, mandatory=True, desc='registration type'
)
# Template to use as the default reference image.
template = traits.Str(
- "MNI152NLin2009cAsym", usedefault=True, desc="define the template to be used"
+ 'MNI152NLin2009cAsym', usedefault=True, desc='define the template to be used'
)
# Load other settings from file.
- settings = traits.List(File(exists=True), desc="pass on the list of settings files")
+ settings = traits.List(File(exists=True), desc='pass on the list of settings files')
# Resolution of the default template.
- template_spec = traits.DictStrAny(desc="template specifications")
- template_resolution = traits.Enum(1, 2, None, desc="(DEPRECATED) template resolution")
+ template_spec = traits.DictStrAny(desc='template specifications')
+ template_resolution = traits.Enum(1, 2, None, desc='(DEPRECATED) template resolution')
# Use explicit masking?
explicit_masking = traits.Bool(
True,
@@ -115,16 +114,12 @@ class _SpatialNormalizationInputSpec(BaseInterfaceInputSpec):
See https://sourceforge.net/p/advants/discussion/840261/thread/27216e69/#c7ba\
""",
)
- initial_moving_transform = File(exists=True, desc="transform for initialization")
- float = traits.Bool(
- False, usedefault=True, desc="use single precision calculations"
- )
+ initial_moving_transform = File(exists=True, desc='transform for initialization')
+ float = traits.Bool(False, usedefault=True, desc='use single precision calculations')
class _SpatialNormalizationOutputSpec(RegistrationOutputSpec):
- reference_image = File(
- exists=True, desc="reference image used for registration target"
- )
+ reference_image = File(exists=True, desc='reference image used for registration target')
class SpatialNormalization(BaseInterface):
@@ -140,14 +135,14 @@ class SpatialNormalization(BaseInterface):
def _list_outputs(self):
outputs = self.norm._list_outputs()
- outputs["reference_image"] = self._reference_image
+ outputs['reference_image'] = self._reference_image
return outputs
def __init__(self, **inputs):
self.norm = None
self._reference_image = None
self.retry = 1
- self.terminal_output = "file"
+ self.terminal_output = 'file'
super().__init__(**inputs)
def _get_settings(self):
@@ -158,14 +153,19 @@ def _get_settings(self):
# If user-defined settings exist...
if isdefined(self.inputs.settings):
# Note this in the log and return those settings.
- NIWORKFLOWS_LOG.info("User-defined settings, overriding defaults")
+ NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults')
return self.inputs.settings
data_dir = load_data()
# Get a list of settings files that match the flavor.
- return sorted([str(path) for path in data_dir.glob(
- f"{self.inputs.moving.lower()}-mni_registration_{self.inputs.flavor}_*.json"
- )])
+ return sorted(
+ [
+ str(path)
+ for path in data_dir.glob(
+ f'{self.inputs.moving.lower()}-mni_registration_{self.inputs.flavor}_*.json'
+ )
+ ]
+ )
def _run_interface(self, runtime):
# Get a list of settings files.
@@ -173,29 +173,28 @@ def _run_interface(self, runtime):
ants_args = self._get_ants_args()
if not isdefined(self.inputs.initial_moving_transform):
- NIWORKFLOWS_LOG.info("Estimating initial transform using AffineInitializer")
+ NIWORKFLOWS_LOG.info('Estimating initial transform using AffineInitializer')
init = AffineInitializer(
- fixed_image=ants_args["fixed_image"],
- moving_image=ants_args["moving_image"],
+ fixed_image=ants_args['fixed_image'],
+ moving_image=ants_args['moving_image'],
num_threads=self.inputs.num_threads,
)
init.resource_monitor = False
- init.terminal_output = "allatonce"
+ init.terminal_output = 'allatonce'
init_result = init.run()
# Save outputs (if available)
- init_out = _write_outputs(init_result.runtime, ".nipype-init")
+ init_out = _write_outputs(init_result.runtime, '.nipype-init')
if init_out:
NIWORKFLOWS_LOG.info(
- "Terminal outputs of initialization saved (%s).",
- ", ".join(init_out),
+ 'Terminal outputs of initialization saved (%s).',
+ ', '.join(init_out),
)
- ants_args["initial_moving_transform"] = init_result.outputs.out_file
+ ants_args['initial_moving_transform'] = init_result.outputs.out_file
# For each settings file...
for ants_settings in settings_files:
-
- NIWORKFLOWS_LOG.info("Loading settings from file %s.", ants_settings)
+ NIWORKFLOWS_LOG.info('Loading settings from file %s.', ants_settings)
# Configure an ANTs run based on these settings.
self.norm = Registration(from_file=ants_settings, **ants_args)
self.norm.resource_monitor = False
@@ -203,30 +202,24 @@ def _run_interface(self, runtime):
cmd = self.norm.cmdline
# Print the retry number and command line call to the log.
- NIWORKFLOWS_LOG.info("Retry #%d, commandline: \n%s", self.retry, cmd)
+ NIWORKFLOWS_LOG.info('Retry #%d, commandline: \n%s', self.retry, cmd)
self.norm.ignore_exception = True
- with open("command.txt", "w") as cmdfile:
- print(cmd + "\n", file=cmdfile)
+ with open('command.txt', 'w') as cmdfile:
+ print(cmd + '\n', file=cmdfile)
# Try running registration.
interface_result = self.norm.run()
if interface_result.runtime.returncode != 0:
- NIWORKFLOWS_LOG.warning("Retry #%d failed.", self.retry)
+ NIWORKFLOWS_LOG.warning('Retry #%d failed.', self.retry)
# Save outputs (if available)
- term_out = _write_outputs(
- interface_result.runtime, ".nipype-%04d" % self.retry
- )
+ term_out = _write_outputs(interface_result.runtime, '.nipype-%04d' % self.retry)
if term_out:
- NIWORKFLOWS_LOG.warning(
- "Log of failed retry saved (%s).", ", ".join(term_out)
- )
+ NIWORKFLOWS_LOG.warning('Log of failed retry saved (%s).', ', '.join(term_out))
else:
runtime.returncode = 0
# Note this in the log.
- NIWORKFLOWS_LOG.info(
- "Successful spatial normalization (retry #%d).", self.retry
- )
+ NIWORKFLOWS_LOG.info('Successful spatial normalization (retry #%d).', self.retry)
# Break out of the retry loop.
return runtime
@@ -234,17 +227,17 @@ def _run_interface(self, runtime):
# If all tries fail, raise an error.
raise RuntimeError(
- "Robust spatial normalization failed after %d retries." % (self.retry - 1)
+ 'Robust spatial normalization failed after %d retries.' % (self.retry - 1)
)
def _get_ants_args(self):
args = {
- "moving_image": self.inputs.moving_image,
- "num_threads": self.inputs.num_threads,
- "float": self.inputs.float,
- "terminal_output": "file",
- "write_composite_transform": True,
- "initial_moving_transform": self.inputs.initial_moving_transform,
+ 'moving_image': self.inputs.moving_image,
+ 'num_threads': self.inputs.num_threads,
+ 'float': self.inputs.float,
+ 'terminal_output': 'file',
+ 'write_composite_transform': True,
+ 'initial_moving_transform': self.inputs.initial_moving_transform,
}
"""
@@ -281,17 +274,17 @@ def _get_ants_args(self):
if self.inputs.explicit_masking:
# Mask the moving image.
# Do not use a moving mask during registration.
- args["moving_image"] = mask(
+ args['moving_image'] = mask(
self.inputs.moving_image,
self.inputs.moving_mask,
- "moving_masked.nii.gz",
+ 'moving_masked.nii.gz',
)
# If explicit masking is disabled...
else:
# Use the moving mask during registration.
# Do not mask the moving image.
- args["moving_image_masks"] = self.inputs.moving_mask
+ args['moving_image_masks'] = self.inputs.moving_mask
# If a lesion mask is also provided...
if isdefined(self.inputs.lesion_mask):
@@ -299,7 +292,7 @@ def _get_ants_args(self):
# [global mask - lesion mask] (if explicit masking is enabled)
# [moving mask - lesion mask] (if explicit masking is disabled)
# Use this as the moving mask.
- args["moving_image_masks"] = create_cfm(
+ args['moving_image_masks'] = create_cfm(
self.inputs.moving_mask,
lesion_mask=self.inputs.lesion_mask,
global_mask=self.inputs.explicit_masking,
@@ -310,7 +303,7 @@ def _get_ants_args(self):
elif isdefined(self.inputs.lesion_mask):
# Create a cost function mask with the form: [global mask - lesion mask]
# Use this as the moving mask.
- args["moving_image_masks"] = create_cfm(
+ args['moving_image_masks'] = create_cfm(
self.inputs.moving_image,
lesion_mask=self.inputs.lesion_mask,
global_mask=True,
@@ -347,7 +340,7 @@ def _get_ants_args(self):
# If a reference image is provided...
if isdefined(self.inputs.reference_image):
# Use the reference image as the fixed image.
- args["fixed_image"] = self.inputs.reference_image
+ args['fixed_image'] = self.inputs.reference_image
self._reference_image = self.inputs.reference_image
# If a reference mask is provided...
@@ -356,17 +349,17 @@ def _get_ants_args(self):
if self.inputs.explicit_masking:
# Mask the reference image.
# Do not use a fixed mask during registration.
- args["fixed_image"] = mask(
+ args['fixed_image'] = mask(
self.inputs.reference_image,
self.inputs.reference_mask,
- "fixed_masked.nii.gz",
+ 'fixed_masked.nii.gz',
)
# If a lesion mask is also provided...
if isdefined(self.inputs.lesion_mask):
# Create a cost function mask with the form: [global mask]
# Use this as the fixed mask.
- args["fixed_image_masks"] = create_cfm(
+ args['fixed_image_masks'] = create_cfm(
self.inputs.reference_mask,
lesion_mask=None,
global_mask=True,
@@ -377,14 +370,14 @@ def _get_ants_args(self):
else:
# Use the reference mask as the fixed mask during registration.
# Do not mask the fixed image.
- args["fixed_image_masks"] = self.inputs.reference_mask
+ args['fixed_image_masks'] = self.inputs.reference_mask
# If no reference mask is provided...
# But a lesion mask *IS* provided ...
elif isdefined(self.inputs.lesion_mask):
# Create a cost function mask with the form: [global mask]
# Use this as the fixed mask
- args["fixed_image_masks"] = create_cfm(
+ args['fixed_image_masks'] = create_cfm(
self.inputs.reference_image, lesion_mask=None, global_mask=True
)
@@ -393,28 +386,22 @@ def _get_ants_args(self):
from ..utils.misc import get_template_specs
# Raise an error if the user specifies an unsupported image orientation.
- if self.inputs.orientation == "LAS":
+ if self.inputs.orientation == 'LAS':
raise NotImplementedError
template_spec = (
- self.inputs.template_spec
- if isdefined(self.inputs.template_spec)
- else {}
+ self.inputs.template_spec if isdefined(self.inputs.template_spec) else {}
)
- default_resolution = {"precise": 1, "fast": 2, "testing": 2}[
- self.inputs.flavor
- ]
+ default_resolution = {'precise': 1, 'fast': 2, 'testing': 2}[self.inputs.flavor]
# Set the template resolution.
if isdefined(self.inputs.template_resolution):
- NIWORKFLOWS_LOG.warning(
- "The use of ``template_resolution`` is deprecated"
- )
- template_spec["res"] = self.inputs.template_resolution
+ NIWORKFLOWS_LOG.warning('The use of ``template_resolution`` is deprecated')
+ template_spec['res'] = self.inputs.template_resolution
- template_spec["suffix"] = self.inputs.reference
- template_spec["desc"] = None
+ template_spec['suffix'] = self.inputs.reference
+ template_spec['desc'] = None
ref_template, template_spec = get_template_specs(
self.inputs.template,
template_spec=template_spec,
@@ -434,28 +421,26 @@ def _get_ants_args(self):
# Get the template specified by the user.
ref_mask = get_template(
- self.inputs.template, desc="brain", suffix="mask", **template_spec
- ) or get_template(self.inputs.template, label="brain", suffix="mask", **template_spec)
+ self.inputs.template, desc='brain', suffix='mask', **template_spec
+ ) or get_template(self.inputs.template, label='brain', suffix='mask', **template_spec)
# Default is explicit masking disabled
- args["fixed_image"] = ref_template
+ args['fixed_image'] = ref_template
# Use the template mask as the fixed mask.
- args["fixed_image_masks"] = str(ref_mask)
+ args['fixed_image_masks'] = str(ref_mask)
# Overwrite defaults if explicit masking
if self.inputs.explicit_masking:
# Mask the template image with the template mask.
- args["fixed_image"] = mask(
- ref_template, str(ref_mask), "fixed_masked.nii.gz"
- )
+ args['fixed_image'] = mask(ref_template, str(ref_mask), 'fixed_masked.nii.gz')
# Do not use a fixed mask during registration.
- args.pop("fixed_image_masks", None)
+ args.pop('fixed_image_masks', None)
# If a lesion mask is provided...
if isdefined(self.inputs.lesion_mask):
# Create a cost function mask with the form: [global mask]
# Use this as the fixed mask.
- args["fixed_image_masks"] = create_cfm(
+ args['fixed_image_masks'] = create_cfm(
str(ref_mask), lesion_mask=None, global_mask=True
)
@@ -536,27 +521,23 @@ def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None):
from nipype.utils.filemanip import fname_presuffix
if out_path is None:
- out_path = fname_presuffix(in_file, suffix="_cfm", newpath=os.getcwd())
+ out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd())
else:
out_path = os.path.abspath(out_path)
if not global_mask and not lesion_mask:
NIWORKFLOWS_LOG.warning(
- "No lesion mask was provided and global_mask not requested, "
- "therefore the original mask will not be modified."
+ 'No lesion mask was provided and global_mask not requested, '
+ 'therefore the original mask will not be modified.'
)
# Load the input image
in_img = nb.load(in_file)
# If we want a global mask, create one based on the input image.
- data = (
- np.ones(in_img.shape, dtype=np.uint8)
- if global_mask
- else np.asanyarray(in_img.dataobj)
- )
+ data = np.ones(in_img.shape, dtype=np.uint8) if global_mask else np.asanyarray(in_img.dataobj)
if set(np.unique(data)) - {0, 1}:
- raise ValueError("`global_mask` must be true if `in_file` is not a binary mask")
+ raise ValueError('`global_mask` must be true if `in_file` is not a binary mask')
# If a lesion mask was provided, combine it with the secondary mask.
if lesion_mask is not None:
@@ -579,14 +560,14 @@ def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None):
def _write_outputs(runtime, out_fname=None):
if out_fname is None:
- out_fname = ".nipype"
+ out_fname = '.nipype'
out_files = []
- for name in ["stdout", "stderr", "merged"]:
- stream = getattr(runtime, name, "")
+ for name in ['stdout', 'stderr', 'merged']:
+ stream = getattr(runtime, name, '')
if stream:
out_file = op.join(runtime.cwd, name + out_fname)
- with open(out_file, "w") as outf:
+ with open(out_file, 'w') as outf:
print(stream, file=outf)
out_files.append(out_file)
return out_files
diff --git a/niworkflows/interfaces/plotting.py b/niworkflows/interfaces/plotting.py
index 0961f3cb5fb..ac7ad9998ba 100644
--- a/niworkflows/interfaces/plotting.py
+++ b/niworkflows/interfaces/plotting.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Visualization tools."""
+
import numpy as np
import nibabel as nb
@@ -42,19 +43,19 @@
class _FMRISummaryInputSpec(BaseInterfaceInputSpec):
- in_func = File(exists=True, mandatory=True, desc="")
- in_spikes_bg = File(exists=True, desc="")
- fd = File(exists=True, desc="")
- dvars = File(exists=True, desc="")
- outliers = File(exists=True, desc="")
- in_segm = File(exists=True, desc="")
- tr = traits.Either(None, traits.Float, usedefault=True, desc="the TR")
- fd_thres = traits.Float(0.2, usedefault=True, desc="")
- drop_trs = traits.Int(0, usedefault=True, desc="dummy scans")
+ in_func = File(exists=True, mandatory=True, desc='')
+ in_spikes_bg = File(exists=True, desc='')
+ fd = File(exists=True, desc='')
+ dvars = File(exists=True, desc='')
+ outliers = File(exists=True, desc='')
+ in_segm = File(exists=True, desc='')
+ tr = traits.Either(None, traits.Float, usedefault=True, desc='the TR')
+ fd_thres = traits.Float(0.2, usedefault=True, desc='')
+ drop_trs = traits.Int(0, usedefault=True, desc='dummy scans')
class _FMRISummaryOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="written file path")
+ out_file = File(exists=True, desc='written file path')
class FMRISummary(SimpleInterface):
@@ -66,53 +67,54 @@ class FMRISummary(SimpleInterface):
def _run_interface(self, runtime):
import pandas as pd
- self._results["out_file"] = fname_presuffix(
+ self._results['out_file'] = fname_presuffix(
self.inputs.in_func,
- suffix="_fmriplot.svg",
+ suffix='_fmriplot.svg',
use_ext=False,
newpath=runtime.cwd,
)
- dataframe = pd.DataFrame({
- "outliers": np.loadtxt(self.inputs.outliers, usecols=[0]).tolist(),
- # Pick non-standardize dvars (col 1)
- # First timepoint is NaN (difference)
- "DVARS": [np.nan]
- + np.loadtxt(self.inputs.dvars, skiprows=1, usecols=[1]).tolist(),
- # First timepoint is zero (reference volume)
- "FD": [0.0]
- + np.loadtxt(self.inputs.fd, skiprows=1, usecols=[0]).tolist(),
- }) if (
- isdefined(self.inputs.outliers)
- and isdefined(self.inputs.dvars)
- and isdefined(self.inputs.fd)
- ) else None
+ dataframe = (
+ pd.DataFrame(
+ {
+ 'outliers': np.loadtxt(self.inputs.outliers, usecols=[0]).tolist(),
+ # Pick non-standardize dvars (col 1)
+ # First timepoint is NaN (difference)
+ 'DVARS': [np.nan]
+ + np.loadtxt(self.inputs.dvars, skiprows=1, usecols=[1]).tolist(),
+ # First timepoint is zero (reference volume)
+ 'FD': [0.0] + np.loadtxt(self.inputs.fd, skiprows=1, usecols=[0]).tolist(),
+ }
+ )
+ if (
+ isdefined(self.inputs.outliers)
+ and isdefined(self.inputs.dvars)
+ and isdefined(self.inputs.fd)
+ )
+ else None
+ )
input_data = nb.load(self.inputs.in_func)
seg_file = self.inputs.in_segm if isdefined(self.inputs.in_segm) else None
dataset, segments = (
_cifti_timeseries(input_data)
- if isinstance(input_data, nb.Cifti2Image) else
- _nifti_timeseries(input_data, seg_file)
+ if isinstance(input_data, nb.Cifti2Image)
+ else _nifti_timeseries(input_data, seg_file)
)
fig = fMRIPlot(
dataset,
segments=segments,
spikes_files=(
- [self.inputs.in_spikes_bg]
- if isdefined(self.inputs.in_spikes_bg) else None
- ),
- tr=(
- self.inputs.tr if isdefined(self.inputs.tr) else
- _get_tr(input_data)
+ [self.inputs.in_spikes_bg] if isdefined(self.inputs.in_spikes_bg) else None
),
+ tr=(self.inputs.tr if isdefined(self.inputs.tr) else _get_tr(input_data)),
confounds=dataframe,
- units={"outliers": "%", "FD": "mm"},
- vlines={"FD": [self.inputs.fd_thres]},
+ units={'outliers': '%', 'FD': 'mm'},
+ vlines={'FD': [self.inputs.fd_thres]},
nskip=self.inputs.drop_trs,
).plot()
- fig.savefig(self._results["out_file"], bbox_inches="tight")
+ fig.savefig(self._results['out_file'], bbox_inches='tight')
return runtime
@@ -120,28 +122,26 @@ class _CompCorVariancePlotInputSpec(BaseInterfaceInputSpec):
metadata_files = traits.List(
File(exists=True),
mandatory=True,
- desc="List of files containing component metadata",
+ desc='List of files containing component metadata',
)
metadata_sources = traits.List(
traits.Str,
- desc="List of names of decompositions "
- "(e.g., aCompCor, tCompCor) yielding "
- "the arguments in `metadata_files`",
+ desc='List of names of decompositions '
+ '(e.g., aCompCor, tCompCor) yielding '
+ 'the arguments in `metadata_files`',
)
variance_thresholds = traits.Tuple(
traits.Float(0.5),
traits.Float(0.7),
traits.Float(0.9),
usedefault=True,
- desc="Levels of explained variance to include in plot",
- )
- out_file = traits.Either(
- None, File, value=None, usedefault=True, desc="Path to save plot"
+ desc='Levels of explained variance to include in plot',
)
+ out_file = traits.Either(None, File, value=None, usedefault=True, desc='Path to save plot')
class _CompCorVariancePlotOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="Path to saved plot")
+ out_file = File(exists=True, desc='Path to saved plot')
class CompCorVariancePlot(SimpleInterface):
@@ -152,59 +152,51 @@ class CompCorVariancePlot(SimpleInterface):
def _run_interface(self, runtime):
if self.inputs.out_file is None:
- self._results["out_file"] = fname_presuffix(
+ self._results['out_file'] = fname_presuffix(
self.inputs.metadata_files[0],
- suffix="_compcor.svg",
+ suffix='_compcor.svg',
use_ext=False,
newpath=runtime.cwd,
)
else:
- self._results["out_file"] = self.inputs.out_file
+ self._results['out_file'] = self.inputs.out_file
compcor_variance_plot(
metadata_files=self.inputs.metadata_files,
metadata_sources=self.inputs.metadata_sources,
- output_file=self._results["out_file"],
+ output_file=self._results['out_file'],
varexp_thresh=self.inputs.variance_thresholds,
)
return runtime
class _ConfoundsCorrelationPlotInputSpec(BaseInterfaceInputSpec):
- confounds_file = File(
- exists=True, mandatory=True, desc="File containing confound regressors"
- )
- out_file = traits.Either(
- None, File, value=None, usedefault=True, desc="Path to save plot"
- )
+ confounds_file = File(exists=True, mandatory=True, desc='File containing confound regressors')
+ out_file = traits.Either(None, File, value=None, usedefault=True, desc='Path to save plot')
reference_column = traits.Str(
- "global_signal",
+ 'global_signal',
usedefault=True,
- desc="Column in the confound file for "
- "which all correlation magnitudes "
- "should be ranked and plotted",
- )
- columns = traits.List(
- traits.Str,
- desc="Filter out all regressors not found in this list."
+ desc='Column in the confound file for '
+ 'which all correlation magnitudes '
+ 'should be ranked and plotted',
)
+ columns = traits.List(traits.Str, desc='Filter out all regressors not found in this list.')
max_dim = traits.Int(
20,
usedefault=True,
- desc="Maximum number of regressors to include in "
- "plot. Regressors with highest magnitude of "
- "correlation with `reference_column` will be "
- "selected.",
+ desc='Maximum number of regressors to include in '
+ 'plot. Regressors with highest magnitude of '
+ 'correlation with `reference_column` will be '
+ 'selected.',
)
ignore_initial_volumes = traits.Int(
0,
usedefault=True,
- desc="Number of non-steady-state volumes at the beginning of the scan "
- "to ignore.",
+ desc='Number of non-steady-state volumes at the beginning of the scan to ignore.',
)
class _ConfoundsCorrelationPlotOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="Path to saved plot")
+ out_file = File(exists=True, desc='Path to saved plot')
class ConfoundsCorrelationPlot(SimpleInterface):
@@ -215,19 +207,19 @@ class ConfoundsCorrelationPlot(SimpleInterface):
def _run_interface(self, runtime):
if self.inputs.out_file is None:
- self._results["out_file"] = fname_presuffix(
+ self._results['out_file'] = fname_presuffix(
self.inputs.confounds_file,
- suffix="_confoundCorrelation.svg",
+ suffix='_confoundCorrelation.svg',
use_ext=False,
newpath=runtime.cwd,
)
else:
- self._results["out_file"] = self.inputs.out_file
+ self._results['out_file'] = self.inputs.out_file
confounds_correlation_plot(
confounds_file=self.inputs.confounds_file,
columns=self.inputs.columns if isdefined(self.inputs.columns) else None,
max_dim=self.inputs.max_dim,
- output_file=self._results["out_file"],
+ output_file=self._results['out_file'],
reference=self.inputs.reference_column,
ignore_initial_volumes=self.inputs.ignore_initial_volumes,
)
@@ -253,4 +245,4 @@ def _get_tr(img):
return img.header.matrix.get_index_map(0).series_step
except AttributeError:
return img.header.get_zooms()[-1]
- raise RuntimeError("Could not extract TR - unknown data structure type")
+ raise RuntimeError('Could not extract TR - unknown data structure type')
diff --git a/niworkflows/interfaces/probmaps.py b/niworkflows/interfaces/probmaps.py
index e3ad2a71aad..4e3a675bdec 100644
--- a/niworkflows/interfaces/probmaps.py
+++ b/niworkflows/interfaces/probmaps.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Utilities."""
+
import numpy as np
import nibabel as nb
@@ -37,36 +38,28 @@
)
-LOG = logging.getLogger("nipype.interface")
+LOG = logging.getLogger('nipype.interface')
class _TPM2ROIInputSpec(BaseInterfaceInputSpec):
- in_tpm = File(
- exists=True, mandatory=True, desc="Tissue probability map file in T1 space"
- )
- in_mask = File(
- exists=True, mandatory=True, desc="Binary mask of skull-stripped T1w image"
- )
+ in_tpm = File(exists=True, mandatory=True, desc='Tissue probability map file in T1 space')
+ in_mask = File(exists=True, mandatory=True, desc='Binary mask of skull-stripped T1w image')
mask_erode_mm = traits.Float(
- xor=["mask_erode_prop"], desc="erode input mask (kernel width in mm)"
- )
- erode_mm = traits.Float(
- xor=["erode_prop"], desc="erode output mask (kernel width in mm)"
+ xor=['mask_erode_prop'], desc='erode input mask (kernel width in mm)'
)
+ erode_mm = traits.Float(xor=['erode_prop'], desc='erode output mask (kernel width in mm)')
mask_erode_prop = traits.Float(
- xor=["mask_erode_mm"], desc="erode input mask (target volume ratio)"
- )
- erode_prop = traits.Float(
- xor=["erode_mm"], desc="erode output mask (target volume ratio)"
+ xor=['mask_erode_mm'], desc='erode input mask (target volume ratio)'
)
+ erode_prop = traits.Float(xor=['erode_mm'], desc='erode output mask (target volume ratio)')
prob_thresh = traits.Float(
- 0.95, usedefault=True, desc="threshold for the tissue probability maps"
+ 0.95, usedefault=True, desc='threshold for the tissue probability maps'
)
class _TPM2ROIOutputSpec(TraitedSpec):
- roi_file = File(exists=True, desc="output ROI file")
- eroded_mask = File(exists=True, desc="resulting eroded mask")
+ roi_file = File(exists=True, desc='output ROI file')
+ eroded_mask = File(exists=True, desc='resulting eroded mask')
class TPM2ROI(SimpleInterface):
@@ -107,20 +100,18 @@ def _run_interface(self, runtime):
self.inputs.prob_thresh,
newpath=runtime.cwd,
)
- self._results["roi_file"] = roi_file
- self._results["eroded_mask"] = eroded_mask
+ self._results['roi_file'] = roi_file
+ self._results['eroded_mask'] = eroded_mask
return runtime
class _AddTPMsInputSpec(BaseInterfaceInputSpec):
- in_files = InputMultiPath(
- File(exists=True), mandatory=True, desc="input list of ROIs"
- )
- indices = traits.List(traits.Int, desc="select specific maps")
+ in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input list of ROIs')
+ indices = traits.List(traits.Int, desc='select specific maps')
class _AddTPMsOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="union of binarized input files")
+ out_file = File(exists=True, desc='union of binarized input files')
class AddTPMs(SimpleInterface):
@@ -137,27 +128,27 @@ def _run_interface(self, runtime):
indices = self.inputs.indices
if len(self.inputs.in_files) < 2:
- self._results["out_file"] = in_files[0]
+ self._results['out_file'] = in_files[0]
return runtime
first_fname = in_files[indices[0]]
if len(indices) == 1:
- self._results["out_file"] = first_fname
+ self._results['out_file'] = first_fname
return runtime
im = nb.concat_images([in_files[i] for i in indices])
data = im.get_fdata().sum(axis=3)
data = np.clip(data, a_min=0.0, a_max=1.0)
- out_file = fname_presuffix(first_fname, suffix="_tpmsum", newpath=runtime.cwd)
+ out_file = fname_presuffix(first_fname, suffix='_tpmsum', newpath=runtime.cwd)
newnii = im.__class__(data, im.affine, im.header)
newnii.set_data_dtype(np.float32)
# Set visualization thresholds
- newnii.header["cal_max"] = 1.0
- newnii.header["cal_min"] = 0.0
+ newnii.header['cal_max'] = 1.0
+ newnii.header['cal_min'] = 0.0
newnii.to_filename(out_file)
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
@@ -185,7 +176,7 @@ def _tpm2roi(
mask_erosion_prop is not None and mask_erosion_prop < 1
)
if erode_in:
- eroded_mask_file = fname_presuffix(in_mask, suffix="_eroded", newpath=newpath)
+ eroded_mask_file = fname_presuffix(in_mask, suffix='_eroded', newpath=newpath)
mask_img = nb.load(in_mask)
mask_data = np.asanyarray(mask_img.dataobj).astype(np.uint8)
if mask_erosion_mm:
@@ -219,7 +210,7 @@ def _tpm2roi(
roi_mask = nd.binary_erosion(roi_mask, iterations=1)
# Create image to resample
- roi_fname = fname_presuffix(in_tpm, suffix="_roi", newpath=newpath)
+ roi_fname = fname_presuffix(in_tpm, suffix='_roi', newpath=newpath)
roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header)
roi_img.set_data_dtype(np.uint8)
roi_img.to_filename(roi_fname)
diff --git a/niworkflows/interfaces/reportlets/__init__.py b/niworkflows/interfaces/reportlets/__init__.py
index e4a66e15d0b..60e7f2df6e7 100644
--- a/niworkflows/interfaces/reportlets/__init__.py
+++ b/niworkflows/interfaces/reportlets/__init__.py
@@ -1,7 +1,5 @@
import warnings
-msg = (
- 'Niworkflows will be deprecating reporting in favor of a standalone library "nireports".'
-)
+msg = 'Niworkflows will be deprecating reporting in favor of a standalone library "nireports".'
warnings.warn(msg, PendingDeprecationWarning)
diff --git a/niworkflows/interfaces/reportlets/base.py b/niworkflows/interfaces/reportlets/base.py
index 5915cc43789..baf28caf976 100644
--- a/niworkflows/interfaces/reportlets/base.py
+++ b/niworkflows/interfaces/reportlets/base.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""class mixin and utilities for enabling reports for nipype interfaces."""
+
from nipype.interfaces.base import File, traits
from nipype.interfaces.mixins import reporting
from ... import NIWORKFLOWS_LOG
@@ -28,17 +29,15 @@
class _SVGReportCapableInputSpec(reporting.ReportCapableInputSpec):
- out_report = File(
- "report.svg", usedefault=True, desc="filename for the visual report"
- )
+ out_report = File('report.svg', usedefault=True, desc='filename for the visual report')
compress_report = traits.Enum(
- "auto",
+ 'auto',
True,
False,
usedefault=True,
- desc="Compress the reportlet using SVGO or"
+ desc='Compress the reportlet using SVGO or'
"WEBP. 'auto' - compress if relevant "
- "software is installed, True = force,"
+ 'software is installed, True = force,'
"False - don't attempt to compress",
)
@@ -49,8 +48,8 @@ class RegistrationRC(reporting.ReportCapableInterface):
_fixed_image = None
_moving_image = None
_fixed_image_mask = None
- _fixed_image_label = "fixed"
- _moving_image_label = "moving"
+ _fixed_image_label = 'fixed'
+ _moving_image_label = 'moving'
_contour = None
_dismiss_affine = False
@@ -60,7 +59,7 @@ def _generate_report(self):
from nilearn.masking import apply_mask, unmask
from niworkflows.viz.utils import plot_registration
- NIWORKFLOWS_LOG.info("Generating visual report")
+ NIWORKFLOWS_LOG.info('Generating visual report')
fixed_image_nii = load_img(self._fixed_image)
moving_image_nii = load_img(self._moving_image)
@@ -91,7 +90,7 @@ def _generate_report(self):
compose_view(
plot_registration(
fixed_image_nii,
- "fixed-image",
+ 'fixed-image',
estimate_brightness=True,
cuts=cuts,
label=self._fixed_image_label,
@@ -101,7 +100,7 @@ def _generate_report(self):
),
plot_registration(
moving_image_nii,
- "moving-image",
+ 'moving-image',
estimate_brightness=True,
cuts=cuts,
label=self._moving_image_label,
@@ -146,7 +145,7 @@ def _generate_report(self):
from nilearn.masking import apply_mask, unmask
from niworkflows.viz.utils import plot_registration
- NIWORKFLOWS_LOG.info("Generating visual report")
+ NIWORKFLOWS_LOG.info('Generating visual report')
anat = load_img(self._anat_file)
contour_nii = load_img(self._contour) if self._contour is not None else None
@@ -167,7 +166,7 @@ def _generate_report(self):
compose_view(
plot_registration(
anat,
- "fixed-image",
+ 'fixed-image',
estimate_brightness=True,
cuts=cuts,
contour=contour_nii,
@@ -189,9 +188,7 @@ class ReportingInterface(reporting.ReportCapableInterface):
output_spec = reporting.ReportCapableOutputSpec
def __init__(self, generate_report=True, **kwargs):
- super().__init__(
- generate_report=generate_report, **kwargs
- )
+ super().__init__(generate_report=generate_report, **kwargs)
def _run_interface(self, runtime):
return runtime
diff --git a/niworkflows/interfaces/reportlets/masks.py b/niworkflows/interfaces/reportlets/masks.py
index bac370fe280..0330bb77920 100644
--- a/niworkflows/interfaces/reportlets/masks.py
+++ b/niworkflows/interfaces/reportlets/masks.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""ReportCapableInterfaces for masks tools."""
+
import os
import numpy as np
import nibabel as nb
@@ -43,9 +44,7 @@ class _BETInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.BETInputSp
pass
-class _BETOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.preprocess.BETOutputSpec
-):
+class _BETOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.BETOutputSpec):
pass
@@ -94,16 +93,11 @@ class BrainExtractionRPT(nrb.SegmentationRC, ants.segmentation.BrainExtraction):
output_spec = _BrainExtractionOutputSpecRPT
def _post_run_hook(self, runtime):
- """ generates a report showing slices from each axis """
+ """generates a report showing slices from each axis"""
- brain_extraction_mask = self.aggregate_outputs(
- runtime=runtime
- ).BrainExtractionMask
+ brain_extraction_mask = self.aggregate_outputs(runtime=runtime).BrainExtractionMask
- if (
- isdefined(self.inputs.keep_temporary_files)
- and self.inputs.keep_temporary_files == 1
- ):
+ if isdefined(self.inputs.keep_temporary_files) and self.inputs.keep_temporary_files == 1:
self._anat_file = self.aggregate_outputs(runtime=runtime).N4Corrected0
else:
self._anat_file = self.inputs.anatomical_image
@@ -124,9 +118,7 @@ class _ACompCorInputSpecRPT(nrb._SVGReportCapableInputSpec, confounds.CompCorInp
pass
-class _ACompCorOutputSpecRPT(
- reporting.ReportCapableOutputSpec, confounds.CompCorOutputSpec
-):
+class _ACompCorOutputSpecRPT(reporting.ReportCapableOutputSpec, confounds.CompCorOutputSpec):
pass
@@ -135,12 +127,12 @@ class ACompCorRPT(nrb.SegmentationRC, confounds.ACompCor):
output_spec = _ACompCorOutputSpecRPT
def _post_run_hook(self, runtime):
- """ generates a report showing slices from each axis """
+ """generates a report showing slices from each axis"""
if len(self.inputs.mask_files) != 1:
raise ValueError(
- "ACompCorRPT only supports a single input mask. "
- "A list %s was found." % self.inputs.mask_files
+ 'ACompCorRPT only supports a single input mask. '
+ 'A list %s was found.' % self.inputs.mask_files
)
self._anat_file = self.inputs.realigned_file
self._mask_file = self.inputs.mask_files[0]
@@ -156,15 +148,11 @@ def _post_run_hook(self, runtime):
return super()._post_run_hook(runtime)
-class _TCompCorInputSpecRPT(
- nrb._SVGReportCapableInputSpec, confounds.TCompCorInputSpec
-):
+class _TCompCorInputSpecRPT(nrb._SVGReportCapableInputSpec, confounds.TCompCorInputSpec):
pass
-class _TCompCorOutputSpecRPT(
- reporting.ReportCapableOutputSpec, confounds.TCompCorOutputSpec
-):
+class _TCompCorOutputSpecRPT(reporting.ReportCapableOutputSpec, confounds.TCompCorOutputSpec):
pass
@@ -173,16 +161,14 @@ class TCompCorRPT(nrb.SegmentationRC, confounds.TCompCor):
output_spec = _TCompCorOutputSpecRPT
def _post_run_hook(self, runtime):
- """ generates a report showing slices from each axis """
+ """generates a report showing slices from each axis"""
- high_variance_masks = self.aggregate_outputs(
- runtime=runtime
- ).high_variance_masks
+ high_variance_masks = self.aggregate_outputs(runtime=runtime).high_variance_masks
if isinstance(high_variance_masks, list):
raise ValueError(
- "TCompCorRPT only supports a single output high variance mask. "
- "A list %s was found." % high_variance_masks
+ 'TCompCorRPT only supports a single output high variance mask. '
+ 'A list %s was found.' % high_variance_masks
)
self._anat_file = self.inputs.realigned_file
self._mask_file = high_variance_masks
@@ -199,8 +185,8 @@ def _post_run_hook(self, runtime):
class _SimpleShowMaskInputSpec(nrb._SVGReportCapableInputSpec):
- background_file = File(exists=True, mandatory=True, desc="file before")
- mask_file = File(exists=True, mandatory=True, desc="file before")
+ background_file = File(exists=True, mandatory=True, desc='file before')
+ mask_file = File(exists=True, mandatory=True, desc='file before')
class SimpleShowMaskRPT(nrb.SegmentationRC, nrb.ReportingInterface):
@@ -216,24 +202,22 @@ def _post_run_hook(self, runtime):
class _ROIsPlotInputSpecRPT(nrb._SVGReportCapableInputSpec):
- in_file = File(
- exists=True, mandatory=True, desc="the volume where ROIs are defined"
- )
+ in_file = File(exists=True, mandatory=True, desc='the volume where ROIs are defined')
in_rois = InputMultiPath(
- File(exists=True), mandatory=True, desc="a list of regions to be plotted"
+ File(exists=True), mandatory=True, desc='a list of regions to be plotted'
)
- in_mask = File(exists=True, desc="a special region, eg. the brain mask")
- masked = traits.Bool(False, usedefault=True, desc="mask in_file prior plotting")
+ in_mask = File(exists=True, desc='a special region, eg. the brain mask')
+ masked = traits.Bool(False, usedefault=True, desc='mask in_file prior plotting')
colors = traits.Either(
- None, traits.List(Str), usedefault=True, desc="use specific colors for contours"
+ None, traits.List(Str), usedefault=True, desc='use specific colors for contours'
)
levels = traits.Either(
None,
traits.List(traits.Float),
usedefault=True,
- desc="pass levels to nilearn.plotting",
+ desc='pass levels to nilearn.plotting',
)
- mask_color = Str("r", usedefault=True, desc="color for mask")
+ mask_color = Str('r', usedefault=True, desc='color for mask')
class ROIsPlot(nrb.ReportingInterface):
@@ -253,23 +237,21 @@ def _generate_report(self):
if len(seg_files) == 1: # in_rois is a segmentation
nsegs = len(levels)
if nsegs == 0:
- levels = np.unique(
- np.round(nb.load(seg_files[0]).get_fdata(dtype="float32"))
- )
+ levels = np.unique(np.round(nb.load(seg_files[0]).get_fdata(dtype='float32')))
levels = (levels[levels > 0] - 0.5).tolist()
nsegs = len(levels)
levels = [levels]
missing = nsegs - len(colors)
if missing > 0:
- colors = colors + color_palette("husl", missing)
+ colors = colors + color_palette('husl', missing)
colors = [colors]
else: # in_rois is a list of masks
nsegs = len(seg_files)
levels = [[0.5]] * nsegs
missing = nsegs - len(colors)
if missing > 0:
- colors = [[c] for c in colors + color_palette("husl", missing)]
+ colors = [[c] for c in colors + color_palette('husl', missing)]
if mask_file:
seg_files.insert(0, mask_file)
diff --git a/niworkflows/interfaces/reportlets/registration.py b/niworkflows/interfaces/reportlets/registration.py
index dde3b6984ba..87a719d87b8 100644
--- a/niworkflows/interfaces/reportlets/registration.py
+++ b/niworkflows/interfaces/reportlets/registration.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""ReportCapableInterfaces for registration tools."""
+
import os
from looseversion import LooseVersion
@@ -66,15 +67,15 @@ class SpatialNormalizationRPT(nrb.RegistrationRC, SpatialNormalization):
def _post_run_hook(self, runtime):
# We need to dig into the internal ants.Registration interface
- self._fixed_image = self._get_ants_args()["fixed_image"]
+ self._fixed_image = self._get_ants_args()['fixed_image']
if isinstance(self._fixed_image, (list, tuple)):
self._fixed_image = self._fixed_image[0] # get first item if list
- if self._get_ants_args().get("fixed_image_mask") is not None:
- self._fixed_image_mask = self._get_ants_args().get("fixed_image_mask")
+ if self._get_ants_args().get('fixed_image_mask') is not None:
+ self._fixed_image_mask = self._get_ants_args().get('fixed_image_mask')
self._moving_image = self.aggregate_outputs(runtime=runtime).warped_image
NIWORKFLOWS_LOG.info(
- "Report - setting fixed (%s) and moving (%s) images",
+ 'Report - setting fixed (%s) and moving (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -102,7 +103,7 @@ def _post_run_hook(self, runtime):
self._fixed_image = self.inputs.fixed_image[0]
self._moving_image = self.aggregate_outputs(runtime=runtime).warped_image
NIWORKFLOWS_LOG.info(
- "Report - setting fixed (%s) and moving (%s) images",
+ 'Report - setting fixed (%s) and moving (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -130,7 +131,7 @@ def _post_run_hook(self, runtime):
self._fixed_image = self.inputs.reference_image
self._moving_image = self.aggregate_outputs(runtime=runtime).output_image
NIWORKFLOWS_LOG.info(
- "Report - setting fixed (%s) and moving (%s) images",
+ 'Report - setting fixed (%s) and moving (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -138,15 +139,11 @@ def _post_run_hook(self, runtime):
return super()._post_run_hook(runtime)
-class _ApplyTOPUPInputSpecRPT(
- nrb._SVGReportCapableInputSpec, fsl.epi.ApplyTOPUPInputSpec
-):
- wm_seg = File(argstr="-wmseg %s", desc="reference white matter segmentation mask")
+class _ApplyTOPUPInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.epi.ApplyTOPUPInputSpec):
+ wm_seg = File(argstr='-wmseg %s', desc='reference white matter segmentation mask')
-class _ApplyTOPUPOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.epi.ApplyTOPUPOutputSpec
-):
+class _ApplyTOPUPOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.epi.ApplyTOPUPOutputSpec):
pass
@@ -157,15 +154,13 @@ class ApplyTOPUPRPT(nrb.RegistrationRC, fsl.ApplyTOPUP):
def _post_run_hook(self, runtime):
from nilearn.image import index_img
- self._fixed_image_label = "after"
- self._moving_image_label = "before"
- self._fixed_image = index_img(
- self.aggregate_outputs(runtime=runtime).out_corrected, 0
- )
+ self._fixed_image_label = 'after'
+ self._moving_image_label = 'before'
+ self._fixed_image = index_img(self.aggregate_outputs(runtime=runtime).out_corrected, 0)
self._moving_image = index_img(self.inputs.in_files[0], 0)
self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
NIWORKFLOWS_LOG.info(
- "Report - setting corrected (%s) and warped (%s) images",
+ 'Report - setting corrected (%s) and warped (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -174,12 +169,10 @@ def _post_run_hook(self, runtime):
class _FUGUEInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.FUGUEInputSpec):
- wm_seg = File(argstr="-wmseg %s", desc="reference white matter segmentation mask")
+ wm_seg = File(argstr='-wmseg %s', desc='reference white matter segmentation mask')
-class _FUGUEOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.preprocess.FUGUEOutputSpec
-):
+class _FUGUEOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.FUGUEOutputSpec):
pass
@@ -188,13 +181,13 @@ class FUGUERPT(nrb.RegistrationRC, fsl.FUGUE):
output_spec = _FUGUEOutputSpecRPT
def _post_run_hook(self, runtime):
- self._fixed_image_label = "after"
- self._moving_image_label = "before"
+ self._fixed_image_label = 'after'
+ self._moving_image_label = 'before'
self._fixed_image = self.aggregate_outputs(runtime=runtime).unwarped_file
self._moving_image = self.inputs.in_file
self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
NIWORKFLOWS_LOG.info(
- "Report - setting corrected (%s) and warped (%s) images",
+ 'Report - setting corrected (%s) and warped (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -206,9 +199,7 @@ class _FLIRTInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.FLIRTInp
pass
-class _FLIRTOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.preprocess.FLIRTOutputSpec
-):
+class _FLIRTOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.FLIRTOutputSpec):
pass
@@ -221,7 +212,7 @@ def _post_run_hook(self, runtime):
self._moving_image = self.aggregate_outputs(runtime=runtime).out_file
self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
NIWORKFLOWS_LOG.info(
- "Report - setting fixed (%s) and moving (%s) images",
+ 'Report - setting fixed (%s) and moving (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -229,9 +220,7 @@ def _post_run_hook(self, runtime):
return super()._post_run_hook(runtime)
-class _ApplyXFMInputSpecRPT(
- nrb._SVGReportCapableInputSpec, fsl.preprocess.ApplyXFMInputSpec
-):
+class _ApplyXFMInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.ApplyXFMInputSpec):
pass
@@ -240,7 +229,7 @@ class ApplyXFMRPT(FLIRTRPT, fsl.ApplyXFM):
output_spec = _FLIRTOutputSpecRPT
-if LooseVersion("0.0.0") < fs.Info.looseversion() < LooseVersion("6.0.0"):
+if LooseVersion('0.0.0') < fs.Info.looseversion() < LooseVersion('6.0.0'):
_BBRegisterInputSpec = fs.preprocess.BBRegisterInputSpec
else:
_BBRegisterInputSpec = fs.preprocess.BBRegisterInputSpec6
@@ -253,9 +242,9 @@ class _BBRegisterInputSpecRPT(nrb._SVGReportCapableInputSpec, _BBRegisterInputSp
File,
default=True,
usedefault=True,
- argstr="--lta %s",
- min_ver="5.2.0",
- desc="write the transformation matrix in LTA format",
+ argstr='--lta %s',
+ min_ver='5.2.0',
+ desc='write the transformation matrix in LTA format',
)
@@ -271,23 +260,23 @@ class BBRegisterRPT(nrb.RegistrationRC, fs.BBRegister):
def _post_run_hook(self, runtime):
outputs = self.aggregate_outputs(runtime=runtime)
- mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, "mri")
- target_file = os.path.join(mri_dir, "brainmask.mgz")
+ mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, 'mri')
+ target_file = os.path.join(mri_dir, 'brainmask.mgz')
# Apply transform for simplicity
mri_vol2vol = fs.ApplyVolTransform(
source_file=self.inputs.source_file,
target_file=target_file,
lta_file=outputs.out_lta_file,
- interp="nearest",
+ interp='nearest',
)
res = mri_vol2vol.run()
self._fixed_image = target_file
self._moving_image = res.outputs.transformed_file
- self._contour = os.path.join(mri_dir, "ribbon.mgz")
+ self._contour = os.path.join(mri_dir, 'ribbon.mgz')
NIWORKFLOWS_LOG.info(
- "Report - setting fixed (%s) and moving (%s) images",
+ 'Report - setting fixed (%s) and moving (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -295,9 +284,7 @@ def _post_run_hook(self, runtime):
return super()._post_run_hook(runtime)
-class _MRICoregInputSpecRPT(
- nrb._SVGReportCapableInputSpec, fs.registration.MRICoregInputSpec
-):
+class _MRICoregInputSpecRPT(nrb._SVGReportCapableInputSpec, fs.registration.MRICoregInputSpec):
pass
@@ -315,30 +302,28 @@ def _post_run_hook(self, runtime):
outputs = self.aggregate_outputs(runtime=runtime)
mri_dir = None
if isdefined(self.inputs.subject_id):
- mri_dir = os.path.join(
- self.inputs.subjects_dir, self.inputs.subject_id, "mri"
- )
+ mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, 'mri')
if isdefined(self.inputs.reference_file):
target_file = self.inputs.reference_file
else:
- target_file = os.path.join(mri_dir, "brainmask.mgz")
+ target_file = os.path.join(mri_dir, 'brainmask.mgz')
# Apply transform for simplicity
mri_vol2vol = fs.ApplyVolTransform(
source_file=self.inputs.source_file,
target_file=target_file,
lta_file=outputs.out_lta_file,
- interp="nearest",
+ interp='nearest',
)
res = mri_vol2vol.run()
self._fixed_image = target_file
self._moving_image = res.outputs.transformed_file
if mri_dir is not None:
- self._contour = os.path.join(mri_dir, "ribbon.mgz")
+ self._contour = os.path.join(mri_dir, 'ribbon.mgz')
NIWORKFLOWS_LOG.info(
- "Report - setting fixed (%s) and moving (%s) images",
+ 'Report - setting fixed (%s) and moving (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -347,21 +332,19 @@ def _post_run_hook(self, runtime):
class _SimpleBeforeAfterInputSpecRPT(nrb._SVGReportCapableInputSpec):
- before = File(exists=True, mandatory=True, desc="file before")
- after = File(exists=True, mandatory=True, desc="file after")
- wm_seg = File(desc="reference white matter segmentation mask")
- before_label = traits.Str("before", usedefault=True)
- after_label = traits.Str("after", usedefault=True)
- dismiss_affine = traits.Bool(
- False, usedefault=True, desc="rotate image(s) to cardinal axes"
- )
+ before = File(exists=True, mandatory=True, desc='file before')
+ after = File(exists=True, mandatory=True, desc='file after')
+ wm_seg = File(desc='reference white matter segmentation mask')
+ before_label = traits.Str('before', usedefault=True)
+ after_label = traits.Str('after', usedefault=True)
+ dismiss_affine = traits.Bool(False, usedefault=True, desc='rotate image(s) to cardinal axes')
class SimpleBeforeAfterRPT(nrb.RegistrationRC, nrb.ReportingInterface):
input_spec = _SimpleBeforeAfterInputSpecRPT
def _post_run_hook(self, runtime):
- """ there is not inner interface to run """
+ """there is not inner interface to run"""
self._fixed_image_label = self.inputs.after_label
self._moving_image_label = self.inputs.before_label
self._fixed_image = self.inputs.after
@@ -369,7 +352,7 @@ def _post_run_hook(self, runtime):
self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
self._dismiss_affine = self.inputs.dismiss_affine
NIWORKFLOWS_LOG.info(
- "Report - setting before (%s) and after (%s) images",
+ 'Report - setting before (%s) and after (%s) images',
self._fixed_image,
self._moving_image,
)
@@ -378,7 +361,7 @@ def _post_run_hook(self, runtime):
class _ResampleBeforeAfterInputSpecRPT(_SimpleBeforeAfterInputSpecRPT):
- base = traits.Enum("before", "after", usedefault=True, mandatory=True)
+ base = traits.Enum('before', 'after', usedefault=True, mandatory=True)
class ResampleBeforeAfterRPT(SimpleBeforeAfterRPT):
@@ -389,31 +372,25 @@ def _post_run_hook(self, runtime):
self._fixed_image = self.inputs.after
self._moving_image = self.inputs.before
- if self.inputs.base == "before":
+ if self.inputs.base == 'before':
resampled_after = nli.resample_to_img(self._fixed_image, self._moving_image)
- fname = fname_presuffix(
- self._fixed_image, suffix="_resampled", newpath=runtime.cwd
- )
+ fname = fname_presuffix(self._fixed_image, suffix='_resampled', newpath=runtime.cwd)
resampled_after.to_filename(fname)
self._fixed_image = fname
else:
- resampled_before = nli.resample_to_img(
- self._moving_image, self._fixed_image
- )
- fname = fname_presuffix(
- self._moving_image, suffix="_resampled", newpath=runtime.cwd
- )
+ resampled_before = nli.resample_to_img(self._moving_image, self._fixed_image)
+ fname = fname_presuffix(self._moving_image, suffix='_resampled', newpath=runtime.cwd)
resampled_before.to_filename(fname)
self._moving_image = fname
self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
NIWORKFLOWS_LOG.info(
- "Report - setting before (%s) and after (%s) images",
+ 'Report - setting before (%s) and after (%s) images',
self._fixed_image,
self._moving_image,
)
runtime = super()._post_run_hook(runtime)
- NIWORKFLOWS_LOG.info("Successfully created report (%s)", self._out_report)
+ NIWORKFLOWS_LOG.info('Successfully created report (%s)', self._out_report)
os.unlink(fname)
return runtime
diff --git a/niworkflows/interfaces/reportlets/segmentation.py b/niworkflows/interfaces/reportlets/segmentation.py
index 918372771e0..757fee2fd62 100644
--- a/niworkflows/interfaces/reportlets/segmentation.py
+++ b/niworkflows/interfaces/reportlets/segmentation.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""ReportCapableInterfaces for segmentation tools."""
+
import os
from nipype.interfaces.base import File, isdefined
@@ -34,9 +35,7 @@ class _FASTInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.FASTInput
pass
-class _FASTOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.preprocess.FASTOutputSpec
-):
+class _FASTOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.FASTOutputSpec):
pass
@@ -63,8 +62,8 @@ def _post_run_hook(self, runtime):
self._masked = False
NIWORKFLOWS_LOG.info(
- "Generating report for FAST (in_files %s, "
- "segmentation %s, individual tissue classes %s).",
+ 'Generating report for FAST (in_files %s, '
+ 'segmentation %s, individual tissue classes %s).',
self.inputs.in_files,
outputs.tissue_class_map,
outputs.tissue_class_files,
@@ -95,35 +94,29 @@ def _post_run_hook(self, runtime):
overlaid"""
outputs = self.aggregate_outputs(runtime=runtime)
self._anat_file = os.path.join(
- outputs.subjects_dir, outputs.subject_id, "mri", "brain.mgz"
- )
- self._contour = os.path.join(
- outputs.subjects_dir, outputs.subject_id, "mri", "ribbon.mgz"
+ outputs.subjects_dir, outputs.subject_id, 'mri', 'brain.mgz'
)
+ self._contour = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'ribbon.mgz')
self._masked = False
- NIWORKFLOWS_LOG.info(
- "Generating report for ReconAll (subject %s)", outputs.subject_id
- )
+ NIWORKFLOWS_LOG.info('Generating report for ReconAll (subject %s)', outputs.subject_id)
return super()._post_run_hook(runtime)
class _MELODICInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.model.MELODICInputSpec):
out_report = File(
- "melodic_reportlet.svg",
+ 'melodic_reportlet.svg',
usedefault=True,
- desc="Filename for the visual report generated by Nipype.",
+ desc='Filename for the visual report generated by Nipype.',
)
report_mask = File(
- desc="Mask used to draw the outline on the reportlet. "
- "If not set the mask will be derived from the data."
+ desc='Mask used to draw the outline on the reportlet. '
+ 'If not set the mask will be derived from the data.'
)
-class _MELODICOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.model.MELODICOutputSpec
-):
+class _MELODICOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.model.MELODICOutputSpec):
pass
@@ -143,7 +136,7 @@ def _post_run_hook(self, runtime):
if not self.generate_report:
return runtime
- NIWORKFLOWS_LOG.info("Generating report for MELODIC.")
+ NIWORKFLOWS_LOG.info('Generating report for MELODIC.')
_melodic_dir = runtime.cwd
if isdefined(self.inputs.out_dir):
_melodic_dir = self.inputs.out_dir
@@ -151,18 +144,14 @@ def _post_run_hook(self, runtime):
self._out_report = self.inputs.out_report
if not os.path.isabs(self._out_report):
- self._out_report = os.path.abspath(
- os.path.join(runtime.cwd, self._out_report)
- )
+ self._out_report = os.path.abspath(os.path.join(runtime.cwd, self._out_report))
- mix = os.path.join(self._melodic_dir, "melodic_mix")
+ mix = os.path.join(self._melodic_dir, 'melodic_mix')
if not os.path.exists(mix):
- NIWORKFLOWS_LOG.warning(
- "MELODIC outputs not found, assuming it didn't converge."
- )
- self._out_report = self._out_report.replace(".svg", ".html")
- snippet = "
MELODIC did not converge, no output
"
- with open(self._out_report, "w") as fobj:
+ NIWORKFLOWS_LOG.warning("MELODIC outputs not found, assuming it didn't converge.")
+ self._out_report = self._out_report.replace('.svg', '.html')
+ snippet = '
MELODIC did not converge, no output
'
+ with open(self._out_report, 'w') as fobj:
fobj.write(snippet)
return runtime
@@ -175,7 +164,7 @@ def _list_outputs(self):
except NotImplementedError:
outputs = {}
if self._out_report is not None:
- outputs["out_report"] = self._out_report
+ outputs['out_report'] = self._out_report
return outputs
def _generate_report(self):
@@ -191,23 +180,19 @@ def _generate_report(self):
)
-class _ICA_AROMAInputSpecRPT(
- nrb._SVGReportCapableInputSpec, fsl.aroma.ICA_AROMAInputSpec
-):
+class _ICA_AROMAInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.aroma.ICA_AROMAInputSpec):
out_report = File(
- "ica_aroma_reportlet.svg",
+ 'ica_aroma_reportlet.svg',
usedefault=True,
- desc="Filename for the visual report generated by Nipype.",
+ desc='Filename for the visual report generated by Nipype.',
)
report_mask = File(
- desc="Mask used to draw the outline on the reportlet. "
- "If not set the mask will be derived from the data."
+ desc='Mask used to draw the outline on the reportlet. '
+ 'If not set the mask will be derived from the data.'
)
-class _ICA_AROMAOutputSpecRPT(
- reporting.ReportCapableOutputSpec, fsl.aroma.ICA_AROMAOutputSpec
-):
+class _ICA_AROMAOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.aroma.ICA_AROMAOutputSpec):
pass
@@ -229,10 +214,8 @@ def _generate_report(self):
def _post_run_hook(self, runtime):
outputs = self.aggregate_outputs(runtime=runtime)
- self._noise_components_file = os.path.join(
- outputs.out_dir, "classified_motion_ICs.txt"
- )
+ self._noise_components_file = os.path.join(outputs.out_dir, 'classified_motion_ICs.txt')
- NIWORKFLOWS_LOG.info("Generating report for ICA AROMA")
+ NIWORKFLOWS_LOG.info('Generating report for ICA AROMA')
return super()._post_run_hook(runtime)
diff --git a/niworkflows/interfaces/space.py b/niworkflows/interfaces/space.py
index 9ef19c53430..e650ac2ef89 100644
--- a/niworkflows/interfaces/space.py
+++ b/niworkflows/interfaces/space.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Interfaces for handling spaces."""
+
from nipype.interfaces.base import (
traits,
TraitedSpec,
@@ -30,17 +31,15 @@
class _SpaceDataSourceInputSpec(BaseInterfaceInputSpec):
- in_tuple = traits.Tuple(
- (traits.Str, traits.Dict), mandatory=True, desc="a space declaration"
- )
+ in_tuple = traits.Tuple((traits.Str, traits.Dict), mandatory=True, desc='a space declaration')
class _SpaceDataSourceOutputSpec(TraitedSpec):
- space = traits.Str(desc="the space identifier, after dropping the cohort modifier.")
- cohort = traits.Str(desc="a cohort specifier")
- resolution = traits.Str(desc="a resolution specifier")
- density = traits.Str(desc="a density specifier")
- uid = traits.Str(desc="a unique identifier combining space specifications")
+ space = traits.Str(desc='the space identifier, after dropping the cohort modifier.')
+ cohort = traits.Str(desc='a cohort specifier')
+ resolution = traits.Str(desc='a resolution specifier')
+ density = traits.Str(desc='a density specifier')
+ uid = traits.Str(desc='a unique identifier combining space specifications')
class SpaceDataSource(SimpleInterface):
@@ -78,5 +77,5 @@ def _run_interface(self, runtime):
from ..utils.spaces import format_reference, reference2dict
self._results = reference2dict(self.inputs.in_tuple)
- self._results["uid"] = format_reference(self.inputs.in_tuple)
+ self._results['uid'] = format_reference(self.inputs.in_tuple)
return runtime
diff --git a/niworkflows/interfaces/surf.py b/niworkflows/interfaces/surf.py
index 0606805bf0e..16284cae2cf 100644
--- a/niworkflows/interfaces/surf.py
+++ b/niworkflows/interfaces/surf.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Handling surfaces."""
+
import os
import re
from pathlib import Path
@@ -47,20 +48,20 @@
SECONDARY_ANAT_STRUC = {
- "smoothwm": "GrayWhite",
- "white": "GrayWhite",
- "pial": "Pial",
- "midthickness": "GrayMid",
+ 'smoothwm': 'GrayWhite',
+ 'white': 'GrayWhite',
+ 'pial': 'Pial',
+ 'midthickness': 'GrayMid',
}
class _NormalizeSurfInputSpec(BaseInterfaceInputSpec):
- in_file = File(mandatory=True, exists=True, desc="Freesurfer-generated GIFTI file")
- transform_file = File(exists=True, desc="FSL or LTA affine transform file")
+ in_file = File(mandatory=True, exists=True, desc='Freesurfer-generated GIFTI file')
+ transform_file = File(exists=True, desc='FSL or LTA affine transform file')
class _NormalizeSurfOutputSpec(TraitedSpec):
- out_file = File(desc="output file with re-centered GIFTI coordinates")
+ out_file = File(desc='output file with re-centered GIFTI coordinates')
class NormalizeSurf(SimpleInterface):
@@ -107,14 +108,14 @@ def _run_interface(self, runtime):
transform_file = self.inputs.transform_file
if not isdefined(transform_file):
transform_file = None
- self._results["out_file"] = normalize_surfs(
+ self._results['out_file'] = normalize_surfs(
self.inputs.in_file, transform_file, newpath=runtime.cwd
)
return runtime
class _Path2BIDSInputSpec(BaseInterfaceInputSpec):
- in_file = File(mandatory=True, desc="input GIFTI file")
+ in_file = File(mandatory=True, desc='input GIFTI file')
class _Path2BIDSOutputSpec(DynamicTraitedSpec):
@@ -166,10 +167,10 @@ class Path2BIDS(SimpleInterface):
input_spec = _Path2BIDSInputSpec
output_spec = _Path2BIDSOutputSpec
_pattern = re.compile(
- r"(?P[lr])h.(?P(white|smoothwm|pial|midthickness|"
- r"inflated|vinflated|sphere|flat|sulc|curv|thickness))[\w\d_-]*(?P\.\w+)?"
+ r'(?P[lr])h.(?P(white|smoothwm|pial|midthickness|'
+ r'inflated|vinflated|sphere|flat|sulc|curv|thickness))[\w\d_-]*(?P\.\w+)?'
)
- _excluded = ("extprefix",)
+ _excluded = ('extprefix',)
def __init__(self, pattern=None, **inputs):
"""Initialize the interface."""
@@ -191,26 +192,26 @@ def _outputs(self):
def _run_interface(self, runtime):
in_file = Path(self.inputs.in_file)
- extension = "".join(in_file.suffixes[-((in_file.suffixes[-1] == ".gz") + 1):])
+ extension = ''.join(in_file.suffixes[-((in_file.suffixes[-1] == '.gz') + 1) :])
info = self._pattern.match(in_file.name[: -len(extension)]).groupdict()
- self._results["extension"] = f"{info.pop('extprefix', None) or ''}{extension}"
+ self._results['extension'] = f"{info.pop('extprefix', None) or ''}{extension}"
self._results.update(info)
- if "hemi" in self._results:
- self._results["hemi"] = self._results["hemi"].upper()
+ if 'hemi' in self._results:
+ self._results['hemi'] = self._results['hemi'].upper()
return runtime
class _GiftiNameSourceInputSpec(BaseInterfaceInputSpec):
- in_file = File(mandatory=True, exists=True, desc="input GIFTI file")
+ in_file = File(mandatory=True, exists=True, desc='input GIFTI file')
pattern = traits.Str(
mandatory=True, desc='input file name pattern (must capture named group "LR")'
)
- template = traits.Str(mandatory=True, desc="output file name template")
- template_kwargs = traits.Dict(desc="additional template keyword value pairs")
+ template = traits.Str(mandatory=True, desc='output file name template')
+ template_kwargs = traits.Dict(desc='additional template keyword value pairs')
class _GiftiNameSourceOutputSpec(TraitedSpec):
- out_name = traits.Str(desc="(partial) filename formatted according to template")
+ out_name = traits.Str(desc='(partial) filename formatted according to template')
class GiftiNameSource(SimpleInterface):
@@ -267,6 +268,7 @@ class GiftiNameSource(SimpleInterface):
.. _GIFTI Standard: https://www.nitrc.org/frs/download.php/2871/GIFTI_Surface_Format.pdf
"""
+
input_spec = _GiftiNameSourceInputSpec
output_spec = _GiftiNameSourceOutputSpec
@@ -274,22 +276,20 @@ def _run_interface(self, runtime):
in_format = re.compile(self.inputs.pattern)
in_file = os.path.basename(self.inputs.in_file)
info = in_format.match(in_file).groupdict()
- info["LR"] = info["LR"].upper()
+ info['LR'] = info['LR'].upper()
if self.inputs.template_kwargs:
info.update(self.inputs.template_kwargs)
filefmt = self.inputs.template
- self._results["out_name"] = filefmt.format(**info)
+ self._results['out_name'] = filefmt.format(**info)
return runtime
class _GiftiSetAnatomicalStructureInputSpec(BaseInterfaceInputSpec):
- in_file = File(
- mandatory=True, exists=True, desc='GIFTI file beginning with "lh." or "rh."'
- )
+ in_file = File(mandatory=True, exists=True, desc='GIFTI file beginning with "lh." or "rh."')
class _GiftiSetAnatomicalStructureOutputSpec(TraitedSpec):
- out_file = File(desc="output file with updated AnatomicalStructurePrimary entry")
+ out_file = File(desc='output file with updated AnatomicalStructurePrimary entry')
class GiftiSetAnatomicalStructure(SimpleInterface):
@@ -313,32 +313,28 @@ class GiftiSetAnatomicalStructure(SimpleInterface):
def _run_interface(self, runtime):
img = nb.load(self.inputs.in_file)
- if any(nvpair.name == "AnatomicalStruturePrimary" for nvpair in img.meta.data):
+ if any(nvpair.name == 'AnatomicalStruturePrimary' for nvpair in img.meta.data):
out_file = self.inputs.in_file
else:
fname = os.path.basename(self.inputs.in_file)
- if fname[:3] in ("lh.", "rh."):
- asp = "CortexLeft" if fname[0] == "l" else "CortexRight"
+ if fname[:3] in ('lh.', 'rh.'):
+ asp = 'CortexLeft' if fname[0] == 'l' else 'CortexRight'
else:
- raise ValueError(
- "AnatomicalStructurePrimary cannot be derived from filename"
- )
- img.meta.data.insert(
- 0, nb.gifti.GiftiNVPairs("AnatomicalStructurePrimary", asp)
- )
+ raise ValueError('AnatomicalStructurePrimary cannot be derived from filename')
+ img.meta.data.insert(0, nb.gifti.GiftiNVPairs('AnatomicalStructurePrimary', asp))
out_file = os.path.join(runtime.cwd, fname)
img.to_filename(out_file)
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
class _GiftiToCSVInputSpec(BaseInterfaceInputSpec):
- in_file = File(mandatory=True, exists=True, desc="GIFTI file")
- itk_lps = traits.Bool(False, usedefault=True, desc="flip XY axes")
+ in_file = File(mandatory=True, exists=True, desc='GIFTI file')
+ itk_lps = traits.Bool(False, usedefault=True, desc='flip XY axes')
class _GiftiToCSVOutputSpec(TraitedSpec):
- out_file = File(desc="output csv file")
+ out_file = File(desc='output csv file')
class GiftiToCSV(SimpleInterface):
@@ -359,27 +355,27 @@ def _run_interface(self, runtime):
csvdata = np.hstack((data, np.zeros((data.shape[0], 3))))
out_file = fname_presuffix(
- self.inputs.in_file, newpath=runtime.cwd, use_ext=False, suffix="points.csv"
+ self.inputs.in_file, newpath=runtime.cwd, use_ext=False, suffix='points.csv'
)
np.savetxt(
out_file,
csvdata,
- delimiter=",",
- header="x,y,z,t,label,comment",
- fmt=["%.5f"] * 4 + ["%d"] * 2,
+ delimiter=',',
+ header='x,y,z,t,label,comment',
+ fmt=['%.5f'] * 4 + ['%d'] * 2,
)
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
class _CSVToGiftiInputSpec(BaseInterfaceInputSpec):
- in_file = File(mandatory=True, exists=True, desc="CSV file")
- gii_file = File(mandatory=True, exists=True, desc="reference GIfTI file")
- itk_lps = traits.Bool(False, usedefault=True, desc="flip XY axes")
+ in_file = File(mandatory=True, exists=True, desc='CSV file')
+ gii_file = File(mandatory=True, exists=True, desc='reference GIfTI file')
+ itk_lps = traits.Bool(False, usedefault=True, desc='flip XY axes')
class _CSVToGiftiOutputSpec(TraitedSpec):
- out_file = File(desc="output GIfTI file")
+ out_file = File(desc='output GIfTI file')
class CSVToGifti(SimpleInterface):
@@ -391,31 +387,27 @@ class CSVToGifti(SimpleInterface):
def _run_interface(self, runtime):
gii = nb.load(self.inputs.gii_file)
- data = np.loadtxt(
- self.inputs.in_file, delimiter=",", skiprows=1, usecols=(0, 1, 2)
- )
+ data = np.loadtxt(self.inputs.in_file, delimiter=',', skiprows=1, usecols=(0, 1, 2))
if self.inputs.itk_lps: # ITK: flip X and Y around 0
data[:, :2] *= -1
gii.darrays[0].data = data[:, :3].astype(gii.darrays[0].data.dtype)
out_file = fname_presuffix(
- self.inputs.gii_file, newpath=runtime.cwd, suffix=".transformed"
+ self.inputs.gii_file, newpath=runtime.cwd, suffix='.transformed'
)
gii.to_filename(out_file)
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
class _SurfacesToPointCloudInputSpec(BaseInterfaceInputSpec):
- in_files = InputMultiPath(
- File(exists=True), mandatory=True, desc="input GIfTI files"
- )
- out_file = File("pointcloud.ply", usedefault=True, desc="output file name")
+ in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input GIfTI files')
+ out_file = File('pointcloud.ply', usedefault=True, desc='output file name')
class _SurfacesToPointCloudOutputSpec(TraitedSpec):
- out_file = File(desc="output pointcloud in PLY format")
+ out_file = File(desc='output pointcloud in PLY format')
class SurfacesToPointCloud(SimpleInterface):
@@ -430,12 +422,10 @@ def _run_interface(self, runtime):
giis = [nb.load(g) for g in self.inputs.in_files]
vertices = np.vstack([g.darrays[0].data for g in giis])
- norms = np.vstack(
- [vertex_normals(g.darrays[0].data, g.darrays[1].data) for g in giis]
- )
+ norms = np.vstack([vertex_normals(g.darrays[0].data, g.darrays[1].data) for g in giis])
out_file = Path(self.inputs.out_file).resolve()
pointcloud2ply(vertices, norms, out_file=out_file)
- self._results["out_file"] = str(out_file)
+ self._results['out_file'] = str(out_file)
return runtime
@@ -443,20 +433,20 @@ class _PoissonReconInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
- argstr="--in %s",
- desc="input PLY pointcloud (vertices + normals)",
+ argstr='--in %s',
+ desc='input PLY pointcloud (vertices + normals)',
)
out_file = File(
- argstr="--out %s",
+ argstr='--out %s',
keep_extension=True,
- name_source=["in_file"],
- name_template="%s_avg",
- desc="output PLY triangular mesh",
+ name_source=['in_file'],
+ name_template='%s_avg',
+ desc='output PLY triangular mesh',
)
class _PoissonReconOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="output PLY triangular mesh")
+ out_file = File(exists=True, desc='output PLY triangular mesh')
class PoissonRecon(CommandLine):
@@ -467,16 +457,16 @@ class PoissonRecon(CommandLine):
input_spec = _PoissonReconInputSpec
output_spec = _PoissonReconOutputSpec
- _cmd = "PoissonRecon"
+ _cmd = 'PoissonRecon'
class _PLYtoGiftiInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input PLY file")
- surf_key = traits.Str(mandatory=True, desc="reference GIfTI file")
+ in_file = File(exists=True, mandatory=True, desc='input PLY file')
+ surf_key = traits.Str(mandatory=True, desc='reference GIfTI file')
class _PLYtoGiftiOutputSpec(TraitedSpec):
- out_file = File(desc="output GIfTI file")
+ out_file = File(desc='output GIfTI file')
class PLYtoGifti(SimpleInterface):
@@ -489,51 +479,51 @@ def _run_interface(self, runtime):
from pathlib import Path
meta = {
- "GeometricType": "Anatomical",
- "VolGeomWidth": "256",
- "VolGeomHeight": "256",
- "VolGeomDepth": "256",
- "VolGeomXsize": "1.0",
- "VolGeomYsize": "1.0",
- "VolGeomZsize": "1.0",
- "VolGeomX_R": "-1.0",
- "VolGeomX_A": "0.0",
- "VolGeomX_S": "0.0",
- "VolGeomY_R": "0.0",
- "VolGeomY_A": "0.0",
- "VolGeomY_S": "-1.0",
- "VolGeomZ_R": "0.0",
- "VolGeomZ_A": "1.0",
- "VolGeomZ_S": "0.0",
- "VolGeomC_R": "0.0",
- "VolGeomC_A": "0.0",
- "VolGeomC_S": "0.0",
+ 'GeometricType': 'Anatomical',
+ 'VolGeomWidth': '256',
+ 'VolGeomHeight': '256',
+ 'VolGeomDepth': '256',
+ 'VolGeomXsize': '1.0',
+ 'VolGeomYsize': '1.0',
+ 'VolGeomZsize': '1.0',
+ 'VolGeomX_R': '-1.0',
+ 'VolGeomX_A': '0.0',
+ 'VolGeomX_S': '0.0',
+ 'VolGeomY_R': '0.0',
+ 'VolGeomY_A': '0.0',
+ 'VolGeomY_S': '-1.0',
+ 'VolGeomZ_R': '0.0',
+ 'VolGeomZ_A': '1.0',
+ 'VolGeomZ_S': '0.0',
+ 'VolGeomC_R': '0.0',
+ 'VolGeomC_A': '0.0',
+ 'VolGeomC_S': '0.0',
}
- meta["AnatomicalStructurePrimary"] = "Cortex%s" % (
- "Left" if self.inputs.surf_key.startswith("lh") else "Right"
+ meta['AnatomicalStructurePrimary'] = 'Cortex%s' % (
+ 'Left' if self.inputs.surf_key.startswith('lh') else 'Right'
)
- meta["AnatomicalStructureSecondary"] = SECONDARY_ANAT_STRUC[
- self.inputs.surf_key.split(".")[-1]
+ meta['AnatomicalStructureSecondary'] = SECONDARY_ANAT_STRUC[
+ self.inputs.surf_key.split('.')[-1]
]
- meta["Name"] = "%s_average.gii" % self.inputs.surf_key
+ meta['Name'] = '%s_average.gii' % self.inputs.surf_key
- out_file = Path(runtime.cwd) / meta["Name"]
+ out_file = Path(runtime.cwd) / meta['Name']
out_file = ply2gii(self.inputs.in_file, meta, out_file=out_file)
- self._results["out_file"] = str(out_file)
+ self._results['out_file'] = str(out_file)
return runtime
class _UnzipJoinedSurfacesInputSpec(BaseInterfaceInputSpec):
in_files = traits.List(
- InputMultiPath(File(exists=True), mandatory=True, desc="input GIfTI files")
+ InputMultiPath(File(exists=True), mandatory=True, desc='input GIfTI files')
)
class _UnzipJoinedSurfacesOutputSpec(TraitedSpec):
out_files = traits.List(
- OutputMultiPath(File(exists=True), desc="output pointcloud in PLY format")
+ OutputMultiPath(File(exists=True), desc='output pointcloud in PLY format')
)
- surf_keys = traits.List(traits.Str, desc="surface identifier keys")
+ surf_keys = traits.List(traits.Str, desc='surface identifier keys')
class UnzipJoinedSurfaces(SimpleInterface):
@@ -550,10 +540,10 @@ def _run_interface(self, runtime):
for f in in_files:
bname = Path(f).name
- groups[bname.split("_")[0]].append(f)
+ groups[bname.split('_')[0]].append(f)
- self._results["out_files"] = [sorted(els) for els in groups.values()]
- self._results["surf_keys"] = list(groups.keys())
+ self._results['out_files'] = [sorted(els) for els in groups.values()]
+ self._results['surf_keys'] = list(groups.keys())
return runtime
@@ -561,8 +551,8 @@ def _run_interface(self, runtime):
class CreateSurfaceROIInputSpec(TraitedSpec):
subject_id = traits.Str(desc='subject ID')
hemisphere = traits.Enum(
- "L",
- "R",
+ 'L',
+ 'R',
mandatory=True,
desc='hemisphere',
)
@@ -590,11 +580,11 @@ def _run_interface(self, runtime):
subject = 'sub-XYZ'
img = nb.GiftiImage.from_filename(self.inputs.thickness_file)
# wb_command -set-structure (L282)
- img.meta["AnatomicalStructurePrimary"] = {'L': 'CortexLeft', 'R': 'CortexRight'}[hemi]
+ img.meta['AnatomicalStructurePrimary'] = {'L': 'CortexLeft', 'R': 'CortexRight'}[hemi]
darray = img.darrays[0]
# wb_command -set-map-names (L284)
meta = darray.meta
- meta['Name'] = f"{subject}_{hemi}_ROI"
+ meta['Name'] = f'{subject}_{hemi}_ROI'
# wb_command -metric-palette calls (L285, L289) have no effect on ROI files
# Compiling an odd sequence of math operations (L283, L288, L290) that work out to:
@@ -612,9 +602,9 @@ def _run_interface(self, runtime):
meta=meta,
)
- out_filename = os.path.join(runtime.cwd, f"{subject}.{hemi}.roi.native.shape.gii")
+ out_filename = os.path.join(runtime.cwd, f'{subject}.{hemi}.roi.native.shape.gii')
img.to_filename(out_filename)
- self._results["roi_file"] = out_filename
+ self._results['roi_file'] = out_filename
return runtime
@@ -632,23 +622,21 @@ def normalize_surfs(in_file, transform_file, newpath=None):
img = nb.load(in_file)
transform = load_transform(transform_file)
- pointset = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET")[0]
+ pointset = img.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0]
coords = pointset.data.T
- c_ras_keys = ("VolGeomC_R", "VolGeomC_A", "VolGeomC_S")
+ c_ras_keys = ('VolGeomC_R', 'VolGeomC_A', 'VolGeomC_S')
ras = np.array([[float(pointset.metadata[key])] for key in c_ras_keys])
ones = np.ones((1, coords.shape[1]), dtype=coords.dtype)
# Apply C_RAS translation to coordinates, then transform
- pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(
- coords.dtype
- )
+ pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(coords.dtype)
- secondary = nb.gifti.GiftiNVPairs("AnatomicalStructureSecondary", "MidThickness")
- geom_type = nb.gifti.GiftiNVPairs("GeometricType", "Anatomical")
+ secondary = nb.gifti.GiftiNVPairs('AnatomicalStructureSecondary', 'MidThickness')
+ geom_type = nb.gifti.GiftiNVPairs('GeometricType', 'Anatomical')
has_ass = has_geo = False
for nvpair in pointset.meta.data:
# Remove C_RAS translation from metadata to avoid double-dipping in FreeSurfer
if nvpair.name in c_ras_keys:
- nvpair.value = "0.000000"
+ nvpair.value = '0.000000'
# Check for missing metadata
elif nvpair.name == secondary.name:
has_ass = True
@@ -656,7 +644,7 @@ def normalize_surfs(in_file, transform_file, newpath=None):
has_geo = True
fname = os.path.basename(in_file)
# Update metadata for MidThickness/graymid surfaces
- if "midthickness" in fname.lower() or "graymid" in fname.lower():
+ if 'midthickness' in fname.lower() or 'graymid' in fname.lower():
if not has_ass:
pointset.meta.data.insert(1, secondary)
if not has_geo:
@@ -685,24 +673,24 @@ def load_transform(fname):
if fname is None:
return np.eye(4)
- if fname.endswith(".mat"):
+ if fname.endswith('.mat'):
return np.loadtxt(fname)
- elif fname.endswith(".lta"):
- with open(fname, "rb") as fobj:
+ elif fname.endswith('.lta'):
+ with open(fname, 'rb') as fobj:
for line in fobj:
- if line.startswith(b"1 4 4"):
+ if line.startswith(b'1 4 4'):
break
lines = fobj.readlines()[:4]
return np.genfromtxt(lines)
- raise ValueError("Unknown transform type; pass FSL (.mat) or LTA (.lta)")
+ raise ValueError('Unknown transform type; pass FSL (.mat) or LTA (.lta)')
def vertex_normals(vertices, faces):
"""Calculates the normals of a triangular mesh"""
def normalize_v3(arr):
- """ Normalize a numpy array of 3 component vectors shape=(n,3) """
+ """Normalize a numpy array of 3 component vectors shape=(n,3)"""
lens = np.sqrt(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2)
arr /= lens[:, np.newaxis]
@@ -725,11 +713,11 @@ def pointcloud2ply(vertices, normals, out_file=None):
from pyntcloud import PyntCloud
df = pd.DataFrame(np.hstack((vertices, normals)))
- df.columns = ["x", "y", "z", "nx", "ny", "nz"]
+ df.columns = ['x', 'y', 'z', 'nx', 'ny', 'nz']
cloud = PyntCloud(df)
if out_file is None:
- out_file = Path("pointcloud.ply").resolve()
+ out_file = Path('pointcloud.ply').resolve()
cloud.to_file(str(out_file))
return out_file
@@ -753,24 +741,24 @@ def ply2gii(in_file, metadata, out_file=None):
# Update centroid metadata
metadata.update(
zip(
- ("SurfaceCenterX", "SurfaceCenterY", "SurfaceCenterZ"),
- ["%.4f" % c for c in surf.centroid],
+ ('SurfaceCenterX', 'SurfaceCenterY', 'SurfaceCenterZ'),
+ ['%.4f' % c for c in surf.centroid],
)
)
# Prepare data arrays
da = (
GiftiDataArray(
- data=surf.xyz.astype("float32"),
- datatype="NIFTI_TYPE_FLOAT32",
- intent="NIFTI_INTENT_POINTSET",
+ data=surf.xyz.astype('float32'),
+ datatype='NIFTI_TYPE_FLOAT32',
+ intent='NIFTI_INTENT_POINTSET',
meta=GiftiMetaData.from_dict(metadata),
coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3),
),
GiftiDataArray(
data=surf.mesh.values,
- datatype="NIFTI_TYPE_INT32",
- intent="NIFTI_INTENT_TRIANGLE",
+ datatype='NIFTI_TYPE_INT32',
+ intent='NIFTI_INTENT_TRIANGLE',
coordsys=None,
),
)
@@ -778,7 +766,7 @@ def ply2gii(in_file, metadata, out_file=None):
if out_file is None:
out_file = fname_presuffix(
- in_file.name, suffix=".gii", use_ext=False, newpath=str(Path.cwd())
+ in_file.name, suffix='.gii', use_ext=False, newpath=str(Path.cwd())
)
surfgii.to_filename(str(out_file))
diff --git a/niworkflows/interfaces/tests/data/__init__.py b/niworkflows/interfaces/tests/data/__init__.py
index 0eb4f637ab0..f3a8363d212 100644
--- a/niworkflows/interfaces/tests/data/__init__.py
+++ b/niworkflows/interfaces/tests/data/__init__.py
@@ -2,6 +2,7 @@
.. autofunction:: load_test_data
"""
+
from acres import Loader
load_test_data = Loader(__package__)
diff --git a/niworkflows/interfaces/tests/test_bids.py b/niworkflows/interfaces/tests/test_bids.py
index c983ea0d769..91b2dff9928 100644
--- a/niworkflows/interfaces/tests/test_bids.py
+++ b/niworkflows/interfaces/tests/test_bids.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Tests on BIDS compliance."""
+
import sys
import os
from pathlib import Path
@@ -38,14 +39,14 @@
XFORM_CODES = {
- "MNI152Lin": 4,
- "T1w": 2,
- "boldref": 2,
+ 'MNI152Lin': 4,
+ 'T1w': 2,
+ 'boldref': 2,
None: 1,
}
-T1W_PATH = "ds054/sub-100185/anat/sub-100185_T1w.nii.gz"
-BOLD_PATH = "ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz"
+T1W_PATH = 'ds054/sub-100185/anat/sub-100185_T1w.nii.gz'
+BOLD_PATH = 'ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz'
def make_prep_and_save(
@@ -76,171 +77,169 @@ def connect_and_run_save(prep_result, save):
return save.run()
-@pytest.mark.parametrize("interface", [bintfs.DerivativesDataSink, bintfs.PrepareDerivative])
-@pytest.mark.parametrize("out_path_base", [None, "fmriprep"])
+@pytest.mark.parametrize('interface', [bintfs.DerivativesDataSink, bintfs.PrepareDerivative])
+@pytest.mark.parametrize('out_path_base', [None, 'fmriprep'])
@pytest.mark.parametrize(
- "source,input_files,entities,expectation,checksum",
+ 'source,input_files,entities,expectation,checksum',
[
(
T1W_PATH,
- ["anat.nii.gz"],
- {"desc": "preproc"},
- "sub-100185/anat/sub-100185_desc-preproc_T1w.nii.gz",
- "7c047921def32da260df4a985019b9f5231659fa",
+ ['anat.nii.gz'],
+ {'desc': 'preproc'},
+ 'sub-100185/anat/sub-100185_desc-preproc_T1w.nii.gz',
+ '7c047921def32da260df4a985019b9f5231659fa',
),
(
T1W_PATH,
- ["anat.nii.gz"],
- {"desc": "preproc", "space": "MNI"},
- "sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz",
- "b22399f50ce454049d5d074457a92ab13e7fdf8c",
+ ['anat.nii.gz'],
+ {'desc': 'preproc', 'space': 'MNI'},
+ 'sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz',
+ 'b22399f50ce454049d5d074457a92ab13e7fdf8c',
),
(
T1W_PATH,
- ["anat.nii.gz"],
- {"desc": "preproc", "space": "MNI", "resolution": "native"},
- "sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz",
- "b22399f50ce454049d5d074457a92ab13e7fdf8c",
+ ['anat.nii.gz'],
+ {'desc': 'preproc', 'space': 'MNI', 'resolution': 'native'},
+ 'sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz',
+ 'b22399f50ce454049d5d074457a92ab13e7fdf8c',
),
(
T1W_PATH,
- ["anat.nii.gz"],
- {"desc": "preproc", "space": "MNI", "resolution": "high"},
- "sub-100185/anat/sub-100185_space-MNI_res-high_desc-preproc_T1w.nii.gz",
- "b22399f50ce454049d5d074457a92ab13e7fdf8c",
+ ['anat.nii.gz'],
+ {'desc': 'preproc', 'space': 'MNI', 'resolution': 'high'},
+ 'sub-100185/anat/sub-100185_space-MNI_res-high_desc-preproc_T1w.nii.gz',
+ 'b22399f50ce454049d5d074457a92ab13e7fdf8c',
),
(
T1W_PATH,
- ["tfm.txt"],
- {"from": "fsnative", "to": "T1w", "suffix": "xfm"},
- "sub-100185/anat/sub-100185_from-fsnative_to-T1w_mode-image_xfm.txt",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['tfm.txt'],
+ {'from': 'fsnative', 'to': 'T1w', 'suffix': 'xfm'},
+ 'sub-100185/anat/sub-100185_from-fsnative_to-T1w_mode-image_xfm.txt',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
T1W_PATH,
- ["tfm.h5"],
- {"from": "MNI152NLin2009cAsym", "to": "T1w", "suffix": "xfm"},
- "sub-100185/anat/sub-100185_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['tfm.h5'],
+ {'from': 'MNI152NLin2009cAsym', 'to': 'T1w', 'suffix': 'xfm'},
+ 'sub-100185/anat/sub-100185_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
T1W_PATH,
- ["anat.nii.gz"],
- {"desc": "brain", "suffix": "mask"},
- "sub-100185/anat/sub-100185_desc-brain_mask.nii.gz",
- "7af86a1f6806a41078e4d2699d680dbe2b9f6ae2",
+ ['anat.nii.gz'],
+ {'desc': 'brain', 'suffix': 'mask'},
+ 'sub-100185/anat/sub-100185_desc-brain_mask.nii.gz',
+ '7af86a1f6806a41078e4d2699d680dbe2b9f6ae2',
),
(
T1W_PATH,
- ["anat.nii.gz"],
- {"desc": "brain", "suffix": "mask", "space": "MNI"},
- "sub-100185/anat/sub-100185_space-MNI_desc-brain_mask.nii.gz",
- "1591f90e0da2a624c972784dda6a01b5572add15",
+ ['anat.nii.gz'],
+ {'desc': 'brain', 'suffix': 'mask', 'space': 'MNI'},
+ 'sub-100185/anat/sub-100185_space-MNI_desc-brain_mask.nii.gz',
+ '1591f90e0da2a624c972784dda6a01b5572add15',
),
(
T1W_PATH,
- ["anat.surf.gii"],
- {"suffix": "pial", "hemi": "L"},
- "sub-100185/anat/sub-100185_hemi-L_pial.surf.gii",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['anat.surf.gii'],
+ {'suffix': 'pial', 'hemi': 'L'},
+ 'sub-100185/anat/sub-100185_hemi-L_pial.surf.gii',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
T1W_PATH,
- ["aseg.nii", "aparc.nii"],
- {"desc": ["aseg", "aparcaseg"], "suffix": "dseg"},
+ ['aseg.nii', 'aparc.nii'],
+ {'desc': ['aseg', 'aparcaseg'], 'suffix': 'dseg'},
+ [f'sub-100185/anat/sub-100185_desc-{s}_dseg.nii' for s in ('aseg', 'aparcaseg')],
[
- f"sub-100185/anat/sub-100185_desc-{s}_dseg.nii"
- for s in ("aseg", "aparcaseg")
+ '5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282',
+ '5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282',
],
- ["5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282",
- "5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282"],
),
(
T1W_PATH,
- ["anat.nii", "anat.json"],
- {"desc": "preproc"},
+ ['anat.nii', 'anat.json'],
+ {'desc': 'preproc'},
+ [f'sub-100185/anat/sub-100185_desc-preproc_T1w.{ext}' for ext in ('nii', 'json')],
[
- f"sub-100185/anat/sub-100185_desc-preproc_T1w.{ext}"
- for ext in ("nii", "json")
+ '25c107d4a3e6f98e48aa752c5bbd88ab8e8d069f',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
],
- ["25c107d4a3e6f98e48aa752c5bbd88ab8e8d069f",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709"],
),
(
T1W_PATH,
- ["anat.nii.gz"] * 3,
- {"label": ["GM", "WM", "CSF"], "suffix": "probseg"},
+ ['anat.nii.gz'] * 3,
+ {'label': ['GM', 'WM', 'CSF'], 'suffix': 'probseg'},
[
- f"sub-100185/anat/sub-100185_label-{lab}_probseg.nii.gz"
- for lab in ("GM", "WM", "CSF")
+ f'sub-100185/anat/sub-100185_label-{lab}_probseg.nii.gz'
+ for lab in ('GM', 'WM', 'CSF')
],
- ["7c047921def32da260df4a985019b9f5231659fa"] * 3,
+ ['7c047921def32da260df4a985019b9f5231659fa'] * 3,
),
# BOLD data
(
BOLD_PATH,
- ["aroma.csv"],
- {"suffix": "AROMAnoiseICs"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_AROMAnoiseICs.csv",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['aroma.csv'],
+ {'suffix': 'AROMAnoiseICs'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_AROMAnoiseICs.csv',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
BOLD_PATH,
- ["confounds.tsv"],
- {"suffix": "regressors", "desc": "confounds"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_desc-confounds_regressors.tsv",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['confounds.tsv'],
+ {'suffix': 'regressors', 'desc': 'confounds'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_desc-confounds_regressors.tsv',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
BOLD_PATH,
- ["mixing.tsv"],
- {"suffix": "mixing", "desc": "MELODIC"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_desc-MELODIC_mixing.tsv",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['mixing.tsv'],
+ {'suffix': 'mixing', 'desc': 'MELODIC'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_desc-MELODIC_mixing.tsv',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
BOLD_PATH,
- ["lh.func.gii"],
- {"space": "fsaverage", "density": "10k", "hemi": "L"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_"
- "hemi-L_space-fsaverage_den-10k_bold.func.gii",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['lh.func.gii'],
+ {'space': 'fsaverage', 'density': '10k', 'hemi': 'L'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_'
+ 'hemi-L_space-fsaverage_den-10k_bold.func.gii',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
BOLD_PATH,
- ["hcp.dtseries.nii"],
- {"space": "fsLR", "density": "91k"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_"
- "space-fsLR_den-91k_bold.dtseries.nii",
- "335f1394ce90b58bbf27026b6eeec4d2124c11da",
+ ['hcp.dtseries.nii'],
+ {'space': 'fsLR', 'density': '91k'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_'
+ 'space-fsLR_den-91k_bold.dtseries.nii',
+ '335f1394ce90b58bbf27026b6eeec4d2124c11da',
),
(
BOLD_PATH,
- ["ref.nii"],
- {"space": "MNI", "suffix": "boldref"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_boldref.nii",
- "53d9b486d08fec5a952f68fcbcddb38a72818d4c",
+ ['ref.nii'],
+ {'space': 'MNI', 'suffix': 'boldref'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_boldref.nii',
+ '53d9b486d08fec5a952f68fcbcddb38a72818d4c',
),
(
BOLD_PATH,
- ["dseg.nii"],
- {"space": "MNI", "suffix": "dseg", "desc": "aseg"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-aseg_dseg.nii",
- "ddadc9be8224eebe0177a65bf87300f275e17e96",
+ ['dseg.nii'],
+ {'space': 'MNI', 'suffix': 'dseg', 'desc': 'aseg'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-aseg_dseg.nii',
+ 'ddadc9be8224eebe0177a65bf87300f275e17e96',
),
(
BOLD_PATH,
- ["mask.nii"],
- {"space": "MNI", "suffix": "mask", "desc": "brain"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-brain_mask.nii",
- "f97a1877508139b42ea9fc476bdba367b001ab00",
+ ['mask.nii'],
+ {'space': 'MNI', 'suffix': 'mask', 'desc': 'brain'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-brain_mask.nii',
+ 'f97a1877508139b42ea9fc476bdba367b001ab00',
),
(
BOLD_PATH,
- ["bold.nii"],
- {"space": "MNI", "desc": "preproc"},
- "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-preproc_bold.nii",
- "aa1eed935e6a8dcca646b0c78ee57218e30e2974",
+ ['bold.nii'],
+ {'space': 'MNI', 'desc': 'preproc'},
+ 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-preproc_bold.nii',
+ 'aa1eed935e6a8dcca646b0c78ee57218e30e2974',
),
# Nondeterministic order - do we really need this to work, or we can stay safe with
# MapNodes?
@@ -250,42 +249,42 @@ def connect_and_run_save(prep_result, save):
# for s in ("MNIa", "MNIb") for l in ("GM", "WM", "CSF")]),
(
T1W_PATH,
- ["anat.html"],
- {"desc": "conform", "datatype": "figures"},
- "sub-100185/figures/sub-100185_desc-conform_T1w.html",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ ['anat.html'],
+ {'desc': 'conform', 'datatype': 'figures'},
+ 'sub-100185/figures/sub-100185_desc-conform_T1w.html',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
BOLD_PATH,
- ["aroma.csv"],
- {"suffix": "AROMAnoiseICs", "extension": "h5"},
+ ['aroma.csv'],
+ {'suffix': 'AROMAnoiseICs', 'extension': 'h5'},
ValueError,
None,
),
(
T1W_PATH,
- ["anat.nii.gz"] * 3,
- {"desc": "preproc", "space": "MNI"},
+ ['anat.nii.gz'] * 3,
+ {'desc': 'preproc', 'space': 'MNI'},
ValueError,
None,
),
(
- "sub-07/ses-preop/anat/sub-07_ses-preop_T1w.nii.gz",
- ["tfm.h5"],
- {"from": "orig", "to": "target", "suffix": "xfm"},
- "sub-07/ses-preop/anat/sub-07_ses-preop_from-orig_to-target_mode-image_xfm.h5",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ 'sub-07/ses-preop/anat/sub-07_ses-preop_T1w.nii.gz',
+ ['tfm.h5'],
+ {'from': 'orig', 'to': 'target', 'suffix': 'xfm'},
+ 'sub-07/ses-preop/anat/sub-07_ses-preop_from-orig_to-target_mode-image_xfm.h5',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
(
- "sub-07/ses-preop/anat/sub-07_ses-preop_run-01_T1w.nii.gz",
- ["tfm.txt"],
- {"from": "orig", "to": "T1w", "suffix": "xfm"},
- "sub-07/ses-preop/anat/sub-07_ses-preop_run-01_from-orig_to-T1w_mode-image_xfm.txt",
- "da39a3ee5e6b4b0d3255bfef95601890afd80709",
+ 'sub-07/ses-preop/anat/sub-07_ses-preop_run-01_T1w.nii.gz',
+ ['tfm.txt'],
+ {'from': 'orig', 'to': 'T1w', 'suffix': 'xfm'},
+ 'sub-07/ses-preop/anat/sub-07_ses-preop_run-01_from-orig_to-T1w_mode-image_xfm.txt',
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709',
),
],
)
-@pytest.mark.parametrize("dismiss_entities", [None, ("run", "session")])
+@pytest.mark.parametrize('dismiss_entities', [None, ('run', 'session')])
def test_DerivativesDataSink_build_path(
tmp_path,
interface,
@@ -299,32 +298,35 @@ def test_DerivativesDataSink_build_path(
):
"""Check a few common derivatives generated by NiPreps."""
if interface is bintfs.PrepareDerivative and out_path_base is not None:
- pytest.skip("PrepareDerivative does not support out_path_base")
+ pytest.skip('PrepareDerivative does not support out_path_base')
ds_inputs = []
for input_file in input_files:
fname = tmp_path / input_file
- if fname.name.endswith(".dtseries.nii"):
- axes = (nb.cifti2.SeriesAxis(start=0, step=2, size=20),
- nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5))))
+ if fname.name.endswith('.dtseries.nii'):
+ axes = (
+ nb.cifti2.SeriesAxis(start=0, step=2, size=20),
+ nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5))),
+ )
hdr = nb.cifti2.cifti2_axes.to_header(axes)
- cifti = nb.Cifti2Image(np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32),
- header=hdr)
- cifti.nifti_header.set_intent("ConnDenseSeries")
+ cifti = nb.Cifti2Image(
+ np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32), header=hdr
+ )
+ cifti.nifti_header.set_intent('ConnDenseSeries')
cifti.to_filename(fname)
- elif fname.name.rstrip(".gz").endswith(".nii"):
+ elif fname.name.rstrip('.gz').endswith('.nii'):
hdr = nb.Nifti1Header()
hdr.set_qform(np.eye(4), code=2)
hdr.set_sform(np.eye(4), code=2)
- units = ("mm", "sec") if "bold" in input_file else ("mm",)
- size = (10, 10, 10, 10) if "bold" in input_file else (10, 10, 10)
+ units = ('mm', 'sec') if 'bold' in input_file else ('mm',)
+ size = (10, 10, 10, 10) if 'bold' in input_file else (10, 10, 10)
hdr.set_xyzt_units(*units)
nb.Nifti1Image(np.zeros(size), np.eye(4), hdr).to_filename(fname)
else:
- fname.write_text("")
+ fname.write_text('')
ds_inputs.append(str(fname))
- base_directory = tmp_path / "output"
+ base_directory = tmp_path / 'output'
base_directory.mkdir()
prep, save = make_prep_and_save(
@@ -351,67 +353,67 @@ def test_DerivativesDataSink_build_path(
checksum = [checksum]
if dismiss_entities:
- if "run" in dismiss_entities:
- expectation = [e.replace("_run-01", "") for e in expectation]
+ if 'run' in dismiss_entities:
+ expectation = [e.replace('_run-01', '') for e in expectation]
- if "session" in dismiss_entities:
+ if 'session' in dismiss_entities:
expectation = [
- e.replace("_ses-preop", "").replace("ses-preop/", "")
- for e in expectation
+ e.replace('_ses-preop', '').replace('ses-preop/', '') for e in expectation
]
- base = (out_path_base or "niworkflows") if interface == bintfs.DerivativesDataSink else ""
+ base = (out_path_base or 'niworkflows') if interface == bintfs.DerivativesDataSink else ''
for out, exp in zip(output, expectation):
assert Path(out).relative_to(base_directory) == Path(base) / exp
for out, exp in zip(output, expectation):
assert Path(out).relative_to(base_directory) == Path(base) / exp
# Regression - some images were given nan scale factors
- if out.endswith(".nii") or out.endswith(".nii.gz"):
+ if out.endswith('.nii') or out.endswith('.nii.gz'):
img = nb.load(out)
if isinstance(img, nb.Nifti1Image):
with nb.openers.ImageOpener(out) as fobj:
hdr = img.header.from_fileobj(fobj)
- assert not np.isnan(hdr["scl_slope"])
- assert not np.isnan(hdr["scl_inter"])
+ assert not np.isnan(hdr['scl_slope'])
+ assert not np.isnan(hdr['scl_inter'])
for out, chksum in zip(output, checksum):
- if chksum == "335f1394ce90b58bbf27026b6eeec4d2124c11da":
+ if chksum == '335f1394ce90b58bbf27026b6eeec4d2124c11da':
if sys.version_info < (3, 8):
# Python 3.8 began preserving insertion order of attributes in XML
# Therefore we get a different checksum before/after
- chksum = "a37ffb1188dd9a7b708de5b8daef46dac56ef8d4"
+ chksum = 'a37ffb1188dd9a7b708de5b8daef46dac56ef8d4'
elif Version(nb.__version__) < Version('5.3'):
# Nibabel 5.3 avoids unnecessary roundtrips for Cifti2Headers
# Older versions transformed a `SeriesStep="2"` into `SeriesStep="2.0"`
- chksum = "f7b8755c6ad0d8dcdb60676331b52a23ce288b61"
+ chksum = 'f7b8755c6ad0d8dcdb60676331b52a23ce288b61'
assert sha1(Path(out).read_bytes()).hexdigest() == chksum
-@pytest.mark.parametrize("interface", [bintfs.DerivativesDataSink, bintfs.PrepareDerivative])
+@pytest.mark.parametrize('interface', [bintfs.DerivativesDataSink, bintfs.PrepareDerivative])
def test_DerivativesDataSink_dtseries_json(tmp_path, interface):
- cifti_fname = str(tmp_path / "test.dtseries.nii")
+ cifti_fname = str(tmp_path / 'test.dtseries.nii')
- axes = (nb.cifti2.SeriesAxis(start=0, step=2, size=20),
- nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5))))
+ axes = (
+ nb.cifti2.SeriesAxis(start=0, step=2, size=20),
+ nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5))),
+ )
hdr = nb.cifti2.cifti2_axes.to_header(axes)
- cifti = nb.Cifti2Image(np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32),
- header=hdr)
- cifti.nifti_header.set_intent("ConnDenseSeries")
+ cifti = nb.Cifti2Image(np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32), header=hdr)
+ cifti.nifti_header.set_intent('ConnDenseSeries')
cifti.to_filename(cifti_fname)
- source_file = tmp_path / "bids" / "sub-01" / "func" / "sub-01_task-rest_bold.nii.gz"
+ source_file = tmp_path / 'bids' / 'sub-01' / 'func' / 'sub-01_task-rest_bold.nii.gz'
source_file.parent.mkdir(parents=True)
source_file.touch()
prep, save = make_prep_and_save(
interface,
base_directory=str(tmp_path),
- out_path_base="",
+ out_path_base='',
in_file=cifti_fname,
source_file=str(source_file),
compress=False,
- space="fsLR",
- grayordinates="91k",
+ space='fsLR',
+ grayordinates='91k',
RepetitionTime=2.0,
)
@@ -420,44 +422,44 @@ def test_DerivativesDataSink_dtseries_json(tmp_path, interface):
out_path = Path(save_result.outputs.out_file)
- assert out_path.name == "sub-01_task-rest_space-fsLR_bold.dtseries.nii"
- old_sidecar = out_path.with_name("sub-01_task-rest_space-fsLR_bold.dtseries.json")
- new_sidecar = out_path.with_name("sub-01_task-rest_space-fsLR_bold.json")
+ assert out_path.name == 'sub-01_task-rest_space-fsLR_bold.dtseries.nii'
+ old_sidecar = out_path.with_name('sub-01_task-rest_space-fsLR_bold.dtseries.json')
+ new_sidecar = out_path.with_name('sub-01_task-rest_space-fsLR_bold.json')
assert not old_sidecar.exists()
assert new_sidecar.exists()
- assert "RepetitionTime" in json.loads(new_sidecar.read_text())
+ assert 'RepetitionTime' in json.loads(new_sidecar.read_text())
-@pytest.mark.parametrize("interface", [bintfs.DerivativesDataSink, bintfs.PrepareDerivative])
+@pytest.mark.parametrize('interface', [bintfs.DerivativesDataSink, bintfs.PrepareDerivative])
@pytest.mark.parametrize(
- "space, size, units, xcodes, zipped, fixed, data_dtype",
+ 'space, size, units, xcodes, zipped, fixed, data_dtype',
[
- ("T1w", (30, 30, 30, 10), ("mm", "sec"), (2, 2), True, [False], None),
- ("T1w", (30, 30, 30, 10), ("mm", "sec"), (0, 2), True, [True], "float64"),
- ("T1w", (30, 30, 30, 10), ("mm", "sec"), (0, 0), True, [True], " 1:
data *= np.linspace(0.6, 1.0, num=10)[::-1]
t_mask = np.zeros(shape[3], dtype=bool)
t_mask[:3] = True
- fname = str(tmpdir.join("file1.nii.gz"))
+ fname = str(tmpdir.join('file1.nii.gz'))
nb.Nifti1Image(data, np.eye(4), None).to_filename(fname)
avg = im.RobustAverage(in_file=fname, t_mask=list(t_mask)).run()
@@ -194,24 +194,24 @@ def test_TemplateDimensions(tmp_path):
]
for i, (shape, zoom) in enumerate(zip(shapes, zooms)):
- img = nb.Nifti1Image(np.ones(shape, dtype="float32"), np.eye(4))
+ img = nb.Nifti1Image(np.ones(shape, dtype='float32'), np.eye(4))
img.header.set_zooms(zoom)
- img.to_filename(tmp_path / f"test{i}.nii")
+ img.to_filename(tmp_path / f'test{i}.nii')
- anat_list = [str(tmp_path / f"test{i}.nii") for i in range(2)]
+ anat_list = [str(tmp_path / f'test{i}.nii') for i in range(2)]
td = im.TemplateDimensions(anat_list=anat_list)
res = td.run()
report = Path(res.outputs.out_report).read_text()
- assert "Input T1w images: 2" in report
- assert "Output dimensions: 11x11x11" in report
- assert "Output voxel size: 0.9mm x 0.9mm x 0.9mm" in report
- assert "Discarded images: 0" in report
+ assert 'Input T1w images: 2' in report
+ assert 'Output dimensions: 11x11x11' in report
+ assert 'Output voxel size: 0.9mm x 0.9mm x 0.9mm' in report
+ assert 'Discarded images: 0' in report
assert res.outputs.t1w_valid_list == anat_list
assert res.outputs.anat_valid_list == anat_list
assert np.allclose(res.outputs.target_zooms, (0.9, 0.9, 0.9))
assert res.outputs.target_shape == (11, 11, 11)
- with pytest.warns(UserWarning, match="t1w_list .* is deprecated"):
+ with pytest.warns(UserWarning, match='t1w_list .* is deprecated'):
im.TemplateDimensions(t1w_list=anat_list)
diff --git a/niworkflows/interfaces/tests/test_itk.py b/niworkflows/interfaces/tests/test_itk.py
index 7fd015f5b73..ba4e6713189 100644
--- a/niworkflows/interfaces/tests/test_itk.py
+++ b/niworkflows/interfaces/tests/test_itk.py
@@ -33,26 +33,26 @@
from .data import load_test_data
-@pytest.mark.skipif(Info.version() is None, reason="Missing ANTs")
-@pytest.mark.parametrize("ext", (".nii", ".nii.gz"))
-@pytest.mark.parametrize("copy_dtype", (True, False))
-@pytest.mark.parametrize("in_dtype", ("i2", "f4"))
+@pytest.mark.skipif(Info.version() is None, reason='Missing ANTs')
+@pytest.mark.parametrize('ext', ('.nii', '.nii.gz'))
+@pytest.mark.parametrize('copy_dtype', (True, False))
+@pytest.mark.parametrize('in_dtype', ('i2', 'f4'))
def test_applytfms(tmpdir, ext, copy_dtype, in_dtype):
import nibabel as nb
import numpy as np
- in_file = str(tmpdir / ("src" + ext))
+ in_file = str(tmpdir / ('src' + ext))
nii = nb.Nifti1Image(np.zeros((5, 5, 5), dtype=np.float32), np.eye(4))
nii.set_data_dtype(in_dtype)
nii.to_filename(in_file)
- in_xform = data.load("itkIdentityTransform.txt")
+ in_xform = data.load('itkIdentityTransform.txt')
- ifargs = {"copy_dtype": copy_dtype, "reference_image": in_file}
+ ifargs = {'copy_dtype': copy_dtype, 'reference_image': in_file}
args = (in_file, in_xform, ifargs, 0, str(tmpdir))
out_file, cmdline = _applytfms(args)
- assert out_file == str(tmpdir / ("src_xform-%05d%s" % (0, ext)))
+ assert out_file == str(tmpdir / ('src_xform-%05d%s' % (0, ext)))
out_nii = nb.load(out_file)
assert np.allclose(nii.affine, out_nii.affine)
@@ -67,11 +67,11 @@ def test_MCFLIRT2ITK(tmp_path):
fsl2itk = pe.Node(
MCFLIRT2ITK(
- in_files=[str(test_data / "MAT_0098"), str(test_data / "MAT_0099")],
- in_reference=str(test_data / "boldref.nii"),
- in_source=str(test_data / "boldref.nii"),
+ in_files=[str(test_data / 'MAT_0098'), str(test_data / 'MAT_0099')],
+ in_reference=str(test_data / 'boldref.nii'),
+ in_source=str(test_data / 'boldref.nii'),
),
- name="fsl2itk",
+ name='fsl2itk',
base_dir=str(tmp_path),
)
@@ -82,22 +82,22 @@ def test_MCFLIRT2ITK(tmp_path):
lines = out_file.read_text().splitlines()
assert lines[:2] == [
- "#Insight Transform File V1.0",
- "#Transform 0",
+ '#Insight Transform File V1.0',
+ '#Transform 0',
]
assert re.match(
- r"Transform: (MatrixOffsetTransformBase|AffineTransform)_(float|double)_3_3",
+ r'Transform: (MatrixOffsetTransformBase|AffineTransform)_(float|double)_3_3',
lines[2],
)
- assert lines[3].startswith("Parameters: ")
- assert lines[4] == "FixedParameters: 0 0 0"
- offset = 1 if lines[5] == "" else 0
- assert lines[5 + offset] == "#Transform 1"
+ assert lines[3].startswith('Parameters: ')
+ assert lines[4] == 'FixedParameters: 0 0 0'
+ offset = 1 if lines[5] == '' else 0
+ assert lines[5 + offset] == '#Transform 1'
assert lines[6 + offset] == lines[2]
- assert lines[7 + offset].startswith("Parameters: ")
+ assert lines[7 + offset].startswith('Parameters: ')
- params0 = np.array([float(p) for p in lines[3].split(" ")[1:]])
- params1 = np.array([float(p) for p in lines[7 + offset].split(" ")[1:]])
+ params0 = np.array([float(p) for p in lines[3].split(' ')[1:]])
+ params1 = np.array([float(p) for p in lines[7 + offset].split(' ')[1:]])
# Empirically determined
assert np.allclose(
params0,
diff --git a/niworkflows/interfaces/tests/test_morphology.py b/niworkflows/interfaces/tests/test_morphology.py
index bb457d6f1b2..72ade3c953d 100644
--- a/niworkflows/interfaces/tests/test_morphology.py
+++ b/niworkflows/interfaces/tests/test_morphology.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Test morphology module."""
+
from pathlib import Path
import shutil
import numpy as np
@@ -35,40 +36,40 @@
def test_BinaryDilation_interface(tmpdir):
"""Check the dilation interface."""
- data = np.zeros((80, 80, 80), dtype="uint8")
+ data = np.zeros((80, 80, 80), dtype='uint8')
data[30:-30, 35:-35, 20:-20] = 1
- nb.Nifti1Image(data, np.eye(4), None).to_filename("mask.nii.gz")
+ nb.Nifti1Image(data, np.eye(4), None).to_filename('mask.nii.gz')
out1 = (
BinaryDilation(
- in_mask=str(Path("mask.nii.gz").absolute()),
+ in_mask=str(Path('mask.nii.gz').absolute()),
radius=4,
)
.run()
.outputs.out_mask
)
- shutil.move(out1, "large_radius.nii.gz")
+ shutil.move(out1, 'large_radius.nii.gz')
out2 = (
BinaryDilation(
- in_mask=str(Path("mask.nii.gz").absolute()),
+ in_mask=str(Path('mask.nii.gz').absolute()),
radius=1,
)
.run()
.outputs.out_mask
)
- shutil.move(out2, "small_radius.nii.gz")
+ shutil.move(out2, 'small_radius.nii.gz')
out_final = (
BinarySubtraction(
- in_base=str(Path("large_radius.nii.gz").absolute()),
- in_subtract=str(Path("small_radius.nii.gz").absolute()),
+ in_base=str(Path('large_radius.nii.gz').absolute()),
+ in_subtract=str(Path('small_radius.nii.gz').absolute()),
)
.run()
.outputs.out_mask
)
- out_data = np.asanyarray(nb.load(out_final).dataobj, dtype="uint8")
+ out_data = np.asanyarray(nb.load(out_final).dataobj, dtype='uint8')
assert np.all(out_data[data] == 0)
diff --git a/niworkflows/interfaces/tests/test_nibabel.py b/niworkflows/interfaces/tests/test_nibabel.py
index bd61ee22fae..fd8e1937e21 100644
--- a/niworkflows/interfaces/tests/test_nibabel.py
+++ b/niworkflows/interfaces/tests/test_nibabel.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""test nibabel interfaces."""
+
import json
import os
from pathlib import Path
@@ -30,7 +31,13 @@
import pytest
from ..nibabel import (
- Binarize, ApplyMask, SplitSeries, MergeSeries, MergeROIs, MapLabels, ReorientImage
+ Binarize,
+ ApplyMask,
+ SplitSeries,
+ MergeSeries,
+ MergeROIs,
+ MapLabels,
+ ReorientImage,
)
@@ -41,10 +48,11 @@ def create_roi(tmp_path):
def _create_roi(affine, img_data, roi_index):
img_data[tuple(roi_index)] = 1
nii = nb.Nifti1Image(img_data, affine)
- filename = tmp_path / f"{str(uuid.uuid4())}.nii.gz"
+ filename = tmp_path / f'{str(uuid.uuid4())}.nii.gz'
files.append(filename)
nii.to_filename(filename)
return filename
+
yield _create_roi
# cleanup files
for f in files:
@@ -62,7 +70,7 @@ def create_image(data, filename):
@pytest.mark.parametrize(
- "affine, data, roi_index, error, err_message",
+ 'affine, data, roi_index, error, err_message',
[
(np.eye(4), np.zeros((2, 2, 2, 2), dtype=np.uint16), [1, 0], None, None),
(
@@ -70,21 +78,21 @@ def create_image(data, filename):
np.zeros((2, 2, 3, 2), dtype=np.uint16),
[1, 0],
True,
- "Mismatch in image shape",
+ 'Mismatch in image shape',
),
(
bad_affine,
np.zeros((2, 2, 2, 2), dtype=np.uint16),
[1, 0],
True,
- "Mismatch in affine",
+ 'Mismatch in affine',
),
(
np.eye(4),
np.zeros((2, 2, 2, 2), dtype=np.uint16),
[0, 0, 0],
True,
- "Overlapping ROIs",
+ 'Overlapping ROIs',
),
],
)
@@ -111,10 +119,10 @@ def test_Binarize(tmp_path):
mask = np.zeros((20, 20, 20), dtype=bool)
mask[5:15, 5:15, 5:15] = bool
- data = np.zeros_like(mask, dtype="float32")
+ data = np.zeros_like(mask, dtype='float32')
data[mask] = np.random.gamma(2, size=mask.sum())
- in_file = tmp_path / "input.nii.gz"
+ in_file = tmp_path / 'input.nii.gz'
nb.Nifti1Image(data, np.eye(4), None).to_filename(str(in_file))
binif = Binarize(thresh_low=0.0, in_file=str(in_file)).run()
@@ -133,32 +141,28 @@ def test_ApplyMask(tmp_path):
mask[8:11, 8:11, 8:11] = 1.0
# Test the 3D
- in_file = tmp_path / "input3D.nii.gz"
+ in_file = tmp_path / 'input3D.nii.gz'
nb.Nifti1Image(data, np.eye(4), None).to_filename(str(in_file))
- in_mask = tmp_path / "mask.nii.gz"
+ in_mask = tmp_path / 'mask.nii.gz'
nb.Nifti1Image(mask, np.eye(4), None).to_filename(str(in_mask))
masked1 = ApplyMask(in_file=str(in_file), in_mask=str(in_mask), threshold=0.4).run()
- assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5 ** 3
+ assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5**3
masked1 = ApplyMask(in_file=str(in_file), in_mask=str(in_mask), threshold=0.6).run()
- assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3 ** 3
+ assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3**3
data4d = np.stack((data, 2 * data, 3 * data), axis=-1)
# Test the 4D case
- in_file4d = tmp_path / "input4D.nii.gz"
+ in_file4d = tmp_path / 'input4D.nii.gz'
nb.Nifti1Image(data4d, np.eye(4), None).to_filename(str(in_file4d))
- masked1 = ApplyMask(
- in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.4
- ).run()
- assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5 ** 3 * 6
+ masked1 = ApplyMask(in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.4).run()
+ assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5**3 * 6
- masked1 = ApplyMask(
- in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.6
- ).run()
- assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3 ** 3 * 6
+ masked1 = ApplyMask(in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.6).run()
+ assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3**3 * 6
# Test errors
nb.Nifti1Image(mask, 2 * np.eye(4), None).to_filename(str(in_mask))
@@ -173,7 +177,7 @@ def test_ApplyMask(tmp_path):
@pytest.mark.parametrize(
- "shape,exp_n",
+ 'shape,exp_n',
[
((20, 20, 20, 15), 15),
((20, 20, 20), 1),
@@ -193,15 +197,13 @@ def test_SplitSeries(tmp_path, shape, exp_n):
"""Test 4-to-3 NIfTI split interface."""
os.chdir(tmp_path)
- in_file = str(tmp_path / "input.nii.gz")
+ in_file = str(tmp_path / 'input.nii.gz')
nb.Nifti1Image(np.ones(shape, dtype=float), np.eye(4), None).to_filename(in_file)
_interface = SplitSeries(in_file=in_file)
if exp_n > 0:
split = _interface.run()
- n = int(isinstance(split.outputs.out_files, str)) or len(
- split.outputs.out_files
- )
+ n = int(isinstance(split.outputs.out_files, str)) or len(split.outputs.out_files)
assert n == exp_n
else:
with pytest.raises(ValueError):
@@ -212,18 +214,14 @@ def test_MergeSeries(tmp_path):
"""Test 3-to-4 NIfTI concatenation interface."""
os.chdir(str(tmp_path))
- in_file = tmp_path / "input3D.nii.gz"
- nb.Nifti1Image(np.ones((20, 20, 20), dtype=float), np.eye(4), None).to_filename(
- str(in_file)
- )
+ in_file = tmp_path / 'input3D.nii.gz'
+ nb.Nifti1Image(np.ones((20, 20, 20), dtype=float), np.eye(4), None).to_filename(str(in_file))
merge = MergeSeries(in_files=[str(in_file)] * 5).run()
assert nb.load(merge.outputs.out_file).dataobj.shape == (20, 20, 20, 5)
- in_4D = tmp_path / "input4D.nii.gz"
- nb.Nifti1Image(np.ones((20, 20, 20, 4), dtype=float), np.eye(4), None).to_filename(
- str(in_4D)
- )
+ in_4D = tmp_path / 'input4D.nii.gz'
+ nb.Nifti1Image(np.ones((20, 20, 20, 4), dtype=float), np.eye(4), None).to_filename(str(in_4D))
merge = MergeSeries(in_files=[str(in_file)] + [str(in_4D)]).run()
assert nb.load(merge.outputs.out_file).dataobj.shape == (20, 20, 20, 5)
@@ -256,7 +254,7 @@ def test_MergeSeries_affines(tmp_path):
@pytest.mark.parametrize(
- "data,mapping,tojson,expected",
+ 'data,mapping,tojson,expected',
[
(LABEL_INPUT, LABEL_MAPPINGS, False, LABEL_OUTPUT),
(LABEL_INPUT, LABEL_MAPPINGS, True, LABEL_OUTPUT),
@@ -264,7 +262,7 @@ def test_MergeSeries_affines(tmp_path):
)
def test_map_labels(tmpdir, data, mapping, tojson, expected):
tmpdir.chdir()
- in_file = create_image(data, Path("test.nii.gz"))
+ in_file = create_image(data, Path('test.nii.gz'))
maplbl = MapLabels(in_file=in_file)
if tojson:
map_file = Path('mapping.json')
@@ -288,7 +286,7 @@ def create_save_img(ornt: str):
data = np.random.rand(2, 2, 2)
img = nb.Nifti1Image(data, affine=np.eye(4))
# img will always be in RAS at the start
- ras = nb.orientations.axcodes2ornt("RAS")
+ ras = nb.orientations.axcodes2ornt('RAS')
if ornt != 'RAS':
new = nb.orientations.axcodes2ornt(ornt)
xfm = nb.orientations.ornt_transform(ras, new)
@@ -299,13 +297,13 @@ def create_save_img(ornt: str):
@pytest.mark.parametrize(
- "in_ornt,out_ornt",
+ 'in_ornt,out_ornt',
[
- ("RAS", "RAS"),
- ("RAS", "LAS"),
- ("LAS", "RAS"),
- ("RAS", "RPI"),
- ("LPI", "RAS"),
+ ('RAS', 'RAS'),
+ ('RAS', 'LAS'),
+ ('LAS', 'RAS'),
+ ('RAS', 'RPI'),
+ ('LPI', 'RAS'),
],
)
def test_reorient_image(tmpdir, in_ornt, out_ornt):
diff --git a/niworkflows/interfaces/tests/test_plotting.py b/niworkflows/interfaces/tests/test_plotting.py
index e6743393ddc..2c6ee8fc22e 100644
--- a/niworkflows/interfaces/tests/test_plotting.py
+++ b/niworkflows/interfaces/tests/test_plotting.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Tests plotting interfaces."""
+
import os
import nibabel as nb
from niworkflows import viz
@@ -31,10 +32,10 @@
def test_cifti_carpetplot():
"""Exercise extraction of timeseries from CIFTI2."""
- save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False)
+ save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False)
cifti_file = load_test_data(
- "sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii"
+ 'sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii'
)
data, segments = _cifti_timeseries(str(cifti_file))
viz.plot_carpet(
@@ -42,34 +43,26 @@ def test_cifti_carpetplot():
segments,
tr=_get_tr(nb.load(cifti_file)),
output_file=(
- os.path.join(
- save_artifacts, "carpetplot_cifti.svg"
- ) if save_artifacts else None
+ os.path.join(save_artifacts, 'carpetplot_cifti.svg') if save_artifacts else None
),
drop_trs=0,
- cmap="paired",
+ cmap='paired',
)
def test_nifti_carpetplot():
"""Exercise extraction of timeseries from CIFTI2."""
- save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False)
+ save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False)
- nifti_file = load_test_data(
- "sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz"
- )
- seg_file = load_test_data(
- "sub-ds205s03_task-functionallocalizer_run-01_bold_parc.nii.gz"
- )
+ nifti_file = load_test_data('sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz')
+ seg_file = load_test_data('sub-ds205s03_task-functionallocalizer_run-01_bold_parc.nii.gz')
data, segments = _nifti_timeseries(str(nifti_file), str(seg_file))
viz.plot_carpet(
data,
segments,
tr=_get_tr(nb.load(nifti_file)),
output_file=(
- os.path.join(
- save_artifacts, "carpetplot_nifti.svg"
- ) if save_artifacts else None
+ os.path.join(save_artifacts, 'carpetplot_nifti.svg') if save_artifacts else None
),
drop_trs=0,
)
diff --git a/niworkflows/interfaces/tests/test_utility.py b/niworkflows/interfaces/tests/test_utility.py
index 0b18b55b2c2..6500913e065 100644
--- a/niworkflows/interfaces/tests/test_utility.py
+++ b/niworkflows/interfaces/tests/test_utility.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""KeySelect tests."""
+
from pathlib import Path
import pytest
from ..utility import KeySelect, _tsv2json
@@ -29,7 +30,7 @@
def test_KeySelect():
"""Test KeySelect."""
with pytest.raises(ValueError):
- KeySelect(fields="field1", keys=["a", "b", "c", "a"])
+ KeySelect(fields='field1', keys=['a', 'b', 'c', 'a'])
with pytest.raises(ValueError):
KeySelect(fields=[])
diff --git a/niworkflows/interfaces/utility.py b/niworkflows/interfaces/utility.py
index 2f0b1843f39..24d5a557d88 100644
--- a/niworkflows/interfaces/utility.py
+++ b/niworkflows/interfaces/utility.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Interfaces under evaluation before upstreaming to nipype.interfaces.utility."""
+
import numpy as np
import re
import json
@@ -43,12 +44,12 @@
class _KeySelectInputSpec(DynamicTraitedSpec):
- key = Str(mandatory=True, desc="selective key")
- keys = InputMultiObject(Str, mandatory=True, min=1, desc="index of keys")
+ key = Str(mandatory=True, desc='selective key')
+ keys = InputMultiObject(Str, mandatory=True, min=1, desc='index of keys')
class _KeySelectOutputSpec(DynamicTraitedSpec):
- key = Str(desc="propagates selected key")
+ key = Str(desc='propagates selected key')
class KeySelect(BaseInterface):
@@ -161,15 +162,14 @@ def __init__(self, keys=None, fields=None, **inputs):
# Handle and initiate fields
if not fields:
raise ValueError(
- "A list or multiplexed fields must be provided at "
- "instantiation time."
+ 'A list or multiplexed fields must be provided at instantiation time.'
)
if isinstance(fields, str):
fields = [fields]
_invalid = set(self.input_spec.class_editable_traits()).intersection(fields)
if _invalid:
- raise ValueError("Some fields are invalid (%s)." % ", ".join(_invalid))
+ raise ValueError('Some fields are invalid (%s).' % ', '.join(_invalid))
self._fields = fields
@@ -185,17 +185,15 @@ def __init__(self, keys=None, fields=None, **inputs):
setattr(self.inputs, in_field, inputs[in_field])
def _check_len(self, name, new):
- if name == "keys":
+ if name == 'keys':
nitems = len(new)
if len(set(new)) != nitems:
- raise ValueError(
- "Found duplicated entries in the index of ordered keys"
- )
+ raise ValueError('Found duplicated entries in the index of ordered keys')
if not isdefined(self.inputs.keys):
return
- if name == "key" and new not in self.inputs.keys:
+ if name == 'key' and new not in self.inputs.keys:
raise ValueError('Selected key "%s" not found in the index' % new)
if name in self._fields:
@@ -207,7 +205,7 @@ def _check_len(self, name, new):
if len(new) != len(self.inputs.keys):
raise ValueError(
'Length of value (%s) for input field "%s" does not match '
- "the length of the indexing list." % (new, name)
+ 'the length of the indexing list.' % (new, name)
)
def _run_interface(self, runtime):
@@ -218,7 +216,7 @@ def _list_outputs(self):
outputs = {k: getattr(self.inputs, k)[index] for k in self._fields}
- outputs["key"] = self.inputs.key
+ outputs['key'] = self.inputs.key
return outputs
def _outputs(self):
@@ -228,12 +226,12 @@ def _outputs(self):
class _AddTSVHeaderInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input file")
- columns = traits.List(traits.Str, mandatory=True, desc="header for columns")
+ in_file = File(exists=True, mandatory=True, desc='input file')
+ columns = traits.List(traits.Str, mandatory=True, desc='header for columns')
class _AddTSVHeaderOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="output average file")
+ out_file = File(exists=True, desc='output average file')
class AddTSVHeader(SimpleInterface):
@@ -259,13 +257,14 @@ class AddTSVHeader(SimpleInterface):
True
"""
+
input_spec = _AddTSVHeaderInputSpec
output_spec = _AddTSVHeaderOutputSpec
def _run_interface(self, runtime):
out_file = fname_presuffix(
self.inputs.in_file,
- suffix="_motion.tsv",
+ suffix='_motion.tsv',
newpath=runtime.cwd,
use_ext=False,
)
@@ -273,24 +272,24 @@ def _run_interface(self, runtime):
np.savetxt(
out_file,
data,
- delimiter="\t",
- header="\t".join(self.inputs.columns),
- comments="",
+ delimiter='\t',
+ header='\t'.join(self.inputs.columns),
+ comments='',
)
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
class _JoinTSVColumnsInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="input file")
- join_file = File(exists=True, mandatory=True, desc="file to be adjoined")
- side = traits.Enum("right", "left", usedefault=True, desc="where to join")
- columns = traits.List(traits.Str, desc="header for columns")
+ in_file = File(exists=True, mandatory=True, desc='input file')
+ join_file = File(exists=True, mandatory=True, desc='file to be adjoined')
+ side = traits.Enum('right', 'left', usedefault=True, desc='where to join')
+ columns = traits.List(traits.Str, desc='header for columns')
class _JoinTSVColumnsOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="output TSV file")
+ out_file = File(exists=True, desc='output TSV file')
class JoinTSVColumns(SimpleInterface):
@@ -348,20 +347,21 @@ class JoinTSVColumns(SimpleInterface):
True
"""
+
input_spec = _JoinTSVColumnsInputSpec
output_spec = _JoinTSVColumnsOutputSpec
def _run_interface(self, runtime):
out_file = fname_presuffix(
self.inputs.in_file,
- suffix="_joined.tsv",
+ suffix='_joined.tsv',
newpath=runtime.cwd,
use_ext=False,
)
- header = ""
+ header = ''
if isdefined(self.inputs.columns) and self.inputs.columns:
- header = "\t".join(self.inputs.columns)
+ header = '\t'.join(self.inputs.columns)
with open(self.inputs.in_file) as ifh:
data = ifh.read().splitlines(keepends=False)
@@ -370,33 +370,33 @@ def _run_interface(self, runtime):
join = ifh.read().splitlines(keepends=False)
if len(data) != len(join):
- raise ValueError("Number of columns in datasets do not match")
+ raise ValueError('Number of columns in datasets do not match')
merged = []
for d, j in zip(data, join):
- line = "%s\t%s" % ((j, d) if self.inputs.side == "left" else (d, j))
+ line = '%s\t%s' % ((j, d) if self.inputs.side == 'left' else (d, j))
merged.append(line)
if header:
merged.insert(0, header)
- with open(out_file, "w") as ofh:
- ofh.write("\n".join(merged))
+ with open(out_file, 'w') as ofh:
+ ofh.write('\n'.join(merged))
- self._results["out_file"] = out_file
+ self._results['out_file'] = out_file
return runtime
class _DictMergeInputSpec(BaseInterfaceInputSpec):
in_dicts = traits.List(
traits.Either(traits.Dict, traits.Instance(OrderedDict)),
- desc="Dictionaries to be merged. In the event of a collision, values "
- "from dictionaries later in the list receive precedence.",
+ desc='Dictionaries to be merged. In the event of a collision, values '
+ 'from dictionaries later in the list receive precedence.',
)
class _DictMergeOutputSpec(TraitedSpec):
- out_dict = traits.Dict(desc="Merged dictionary")
+ out_dict = traits.Dict(desc='Merged dictionary')
class DictMerge(SimpleInterface):
@@ -409,45 +409,43 @@ def _run_interface(self, runtime):
out_dict = {}
for in_dict in self.inputs.in_dicts:
out_dict.update(in_dict)
- self._results["out_dict"] = out_dict
+ self._results['out_dict'] = out_dict
return runtime
class _TSV2JSONInputSpec(BaseInterfaceInputSpec):
- in_file = File(exists=True, mandatory=True, desc="Input TSV file")
+ in_file = File(exists=True, mandatory=True, desc='Input TSV file')
index_column = traits.Str(
mandatory=True,
- desc="Name of the column in the TSV to be used "
- "as the top-level key in the JSON. All "
- "remaining columns will be assigned as "
- "nested keys.",
+ desc='Name of the column in the TSV to be used '
+ 'as the top-level key in the JSON. All '
+ 'remaining columns will be assigned as '
+ 'nested keys.',
)
output = traits.Either(
None,
File,
- desc="Path where the output file is to be saved. "
- "If this is `None`, then a JSON-compatible "
- "dictionary is returned instead.",
+ desc='Path where the output file is to be saved. '
+ 'If this is `None`, then a JSON-compatible '
+ 'dictionary is returned instead.',
)
additional_metadata = traits.Either(
None,
traits.Dict,
traits.Instance(OrderedDict),
usedefault=True,
- desc="Any additional metadata that "
- "should be applied to all "
- "entries in the JSON.",
+ desc='Any additional metadata that should be applied to all entries in the JSON.',
)
drop_columns = traits.Either(
None,
traits.List(),
usedefault=True,
- desc="List of columns in the TSV to be dropped from the JSON.",
+ desc='List of columns in the TSV to be dropped from the JSON.',
)
enforce_case = traits.Bool(
True,
usedefault=True,
- desc="Enforce snake case for top-level keys and camel case for nested keys",
+ desc='Enforce snake case for top-level keys and camel case for nested keys',
)
@@ -456,7 +454,7 @@ class _TSV2JSONOutputSpec(TraitedSpec):
traits.Dict,
File(exists=True),
traits.Instance(OrderedDict),
- desc="Output dictionary or JSON file",
+ desc='Output dictionary or JSON file',
)
@@ -469,12 +467,12 @@ class TSV2JSON(SimpleInterface):
def _run_interface(self, runtime):
if not isdefined(self.inputs.output):
output = fname_presuffix(
- self.inputs.in_file, suffix=".json", newpath=runtime.cwd, use_ext=False
+ self.inputs.in_file, suffix='.json', newpath=runtime.cwd, use_ext=False
)
else:
output = self.inputs.output
- self._results["output"] = _tsv2json(
+ self._results['output'] = _tsv2json(
in_tsv=self.inputs.in_file,
out_json=output,
index_column=self.inputs.index_column,
@@ -525,45 +523,42 @@ def _tsv2json(
# Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ...
# back-using-regular-expressions-and-python-m9j
- re_to_camel = r"(.*?)_([a-zA-Z0-9])"
- re_to_snake = r"(^.+?|.*?)((? and"
- " has the desired output mesh",
+ desc='A sphere surface that is in register with and'
+ ' has the desired output mesh',
)
method = traits.Enum(
- "ADAP_BARY_AREA",
- "BARYCENTRIC",
- argstr="%s",
+ 'ADAP_BARY_AREA',
+ 'BARYCENTRIC',
+ argstr='%s',
mandatory=True,
position=3,
- desc="The method name - ADAP_BARY_AREA method is recommended for"
- " ordinary metric data, because it should use all data while"
- " downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used,"
- " exactly one of area_surfs or area_metrics must be specified",
+ desc='The method name - ADAP_BARY_AREA method is recommended for'
+ ' ordinary metric data, because it should use all data while'
+ ' downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used,'
+ ' exactly one of area_surfs or area_metrics must be specified',
)
out_file = File(
- name_source=["new_sphere"],
- name_template="%s.out",
+ name_source=['new_sphere'],
+ name_template='%s.out',
keep_extension=True,
- argstr="%s",
+ argstr='%s',
position=4,
- desc="The output metric",
+ desc='The output metric',
)
area_surfs = traits.Bool(
position=5,
- argstr="-area-surfs",
- xor=["area_metrics"],
- desc="Specify surfaces to do vertex area correction based on",
+ argstr='-area-surfs',
+ xor=['area_metrics'],
+ desc='Specify surfaces to do vertex area correction based on',
)
area_metrics = traits.Bool(
position=5,
- argstr="-area-metrics",
- xor=["area_surfs"],
- desc="Specify vertex area metrics to do area correction based on",
+ argstr='-area-metrics',
+ xor=['area_surfs'],
+ desc='Specify vertex area metrics to do area correction based on',
)
current_area = File(
exists=True,
position=6,
- argstr="%s",
- desc="A relevant anatomical surface with mesh OR"
- " a metric file with vertex areas for mesh",
+ argstr='%s',
+ desc='A relevant anatomical surface with mesh OR'
+ ' a metric file with vertex areas for mesh',
)
new_area = File(
exists=True,
position=7,
- argstr="%s",
- desc="A relevant anatomical surface with mesh OR"
- " a metric file with vertex areas for mesh",
+ argstr='%s',
+ desc='A relevant anatomical surface with mesh OR'
+ ' a metric file with vertex areas for mesh',
)
roi_metric = File(
exists=True,
position=8,
- argstr="-current-roi %s",
- desc="Input roi on the current mesh used to exclude non-data vertices",
+ argstr='-current-roi %s',
+ desc='Input roi on the current mesh used to exclude non-data vertices',
)
valid_roi_out = traits.Bool(
position=9,
- argstr="-valid-roi-out",
- desc="Output the ROI of vertices that got data from valid source vertices",
+ argstr='-valid-roi-out',
+ desc='Output the ROI of vertices that got data from valid source vertices',
)
largest = traits.Bool(
position=10,
- argstr="-largest",
- desc="Use only the value of the vertex with the largest weight",
+ argstr='-largest',
+ desc='Use only the value of the vertex with the largest weight',
)
class MetricResampleOutputSpec(TraitedSpec):
- out_file = File(exists=True, desc="the output metric")
- roi_file = File(desc="ROI of vertices that got data from valid source vertices")
+ out_file = File(exists=True, desc='the output metric')
+ roi_file = File(desc='ROI of vertices that got data from valid source vertices')
class MetricResample(WBCommand, OpenMPCommandMixin):
@@ -276,184 +277,184 @@ class MetricResample(WBCommand, OpenMPCommandMixin):
input_spec = MetricResampleInputSpec
output_spec = MetricResampleOutputSpec
- _cmd = "wb_command -metric-resample"
+ _cmd = 'wb_command -metric-resample'
def _format_arg(self, opt, spec, val):
- if opt in ["current_area", "new_area"]:
+ if opt in ['current_area', 'new_area']:
if not self.inputs.area_surfs and not self.inputs.area_metrics:
raise ValueError(
- "{} was set but neither area_surfs or area_metrics were set".format(opt)
+ '{} was set but neither area_surfs or area_metrics were set'.format(opt)
)
- if opt == "method":
+ if opt == 'method':
if (
- val == "ADAP_BARY_AREA"
+ val == 'ADAP_BARY_AREA'
and not self.inputs.area_surfs
and not self.inputs.area_metrics
):
- raise ValueError("Exactly one of area_surfs or area_metrics must be specified")
- if opt == "valid_roi_out" and val:
+ raise ValueError('Exactly one of area_surfs or area_metrics must be specified')
+ if opt == 'valid_roi_out' and val:
# generate a filename and add it to argstr
- roi_out = self._gen_filename(self.inputs.in_file, suffix="_roi")
- iflogger.info("Setting roi output file as", roi_out)
- spec.argstr += " " + roi_out
+ roi_out = self._gen_filename(self.inputs.in_file, suffix='_roi')
+ iflogger.info('Setting roi output file as', roi_out)
+ spec.argstr += ' ' + roi_out
return super()._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = super()._list_outputs()
if self.inputs.valid_roi_out:
- roi_file = self._gen_filename(self.inputs.in_file, suffix="_roi")
- outputs["roi_file"] = os.path.abspath(roi_file)
+ roi_file = self._gen_filename(self.inputs.in_file, suffix='_roi')
+ outputs['roi_file'] = os.path.abspath(roi_file)
return outputs
class VolumeToSurfaceMappingInputSpec(OpenMPTraitedSpec):
volume_file = File(
exists=True,
- argstr="%s",
+ argstr='%s',
mandatory=True,
position=1,
- desc="the volume to map data from",
+ desc='the volume to map data from',
)
surface_file = File(
exists=True,
- argstr="%s",
+ argstr='%s',
mandatory=True,
position=2,
- desc="the surface to map the data onto",
+ desc='the surface to map the data onto',
)
out_file = File(
- name_source=["surface_file"],
- name_template="%s_mapped.func.gii",
+ name_source=['surface_file'],
+ name_template='%s_mapped.func.gii',
keep_extension=False,
- argstr="%s",
+ argstr='%s',
position=3,
- desc="the output metric file",
+ desc='the output metric file',
)
method = traits.Enum(
- "trilinear",
- "enclosing",
- "cubic",
- "ribbon-constrained",
- "myelin-style",
- argstr="-%s",
+ 'trilinear',
+ 'enclosing',
+ 'cubic',
+ 'ribbon-constrained',
+ 'myelin-style',
+ argstr='-%s',
position=4,
- desc="the interpolation method to use",
+ desc='the interpolation method to use',
)
_ribbon_constrained = [
- "inner_surface",
- "outer_surface",
- "volume_roi",
- "weighted",
- "voxel_subdiv",
- "gaussian",
- "interpolate",
- "bad_vertices_out",
- "output_weights",
- "output_weights_text",
+ 'inner_surface',
+ 'outer_surface',
+ 'volume_roi',
+ 'weighted',
+ 'voxel_subdiv',
+ 'gaussian',
+ 'interpolate',
+ 'bad_vertices_out',
+ 'output_weights',
+ 'output_weights_text',
]
_myelin_style = [
- "ribbon_roi",
- "thickness",
- "sigma",
- "legacy_bug",
+ 'ribbon_roi',
+ 'thickness',
+ 'sigma',
+ 'legacy_bug',
]
inner_surface = File(
exists=True,
- argstr="%s",
+ argstr='%s',
position=5,
- desc="the inner surface of the ribbon [-ribbon-constrained]",
+ desc='the inner surface of the ribbon [-ribbon-constrained]',
xor=_myelin_style,
)
outer_surface = File(
exists=True,
- argstr="%s",
+ argstr='%s',
position=6,
- desc="the outer surface of the ribbon [-ribbon-constrained]",
+ desc='the outer surface of the ribbon [-ribbon-constrained]',
xor=_myelin_style,
)
volume_roi = File(
exists=True,
- argstr="-volume-roi %s",
+ argstr='-volume-roi %s',
position=7,
- desc="use a volume roi [-ribbon-constrained]",
+ desc='use a volume roi [-ribbon-constrained]',
xor=_myelin_style,
)
weighted = traits.Bool(
- argstr="-weighted",
+ argstr='-weighted',
position=8,
- desc="treat the roi values as weightings rather than binary [-ribbon-constrained]",
- requires=["volume_roi"],
+ desc='treat the roi values as weightings rather than binary [-ribbon-constrained]',
+ requires=['volume_roi'],
xor=_myelin_style,
)
voxel_subdiv = traits.Int(
default_value=3,
- argstr="-voxel-subdiv %d",
- desc="voxel divisions while estimating voxel weights [-ribbon-constrained]",
+ argstr='-voxel-subdiv %d',
+ desc='voxel divisions while estimating voxel weights [-ribbon-constrained]',
xor=_myelin_style,
)
thin_columns = traits.Bool(
- argstr="-thin-columns",
- desc="use non-overlapping polyhedra [-ribbon-constrained]",
+ argstr='-thin-columns',
+ desc='use non-overlapping polyhedra [-ribbon-constrained]',
xor=_myelin_style,
)
gaussian = traits.Float(
- argstr="-gaussian %g",
+ argstr='-gaussian %g',
desc="reduce weight to voxels that aren't near [-ribbon-constrained]",
xor=_myelin_style,
)
interpolate = traits.Enum(
- "CUBIC",
- "TRILINEAR",
- "ENCLOSING_VOXEL",
- argstr="-interpolate %s",
- desc="instead of a weighted average of voxels, "
- "interpolate at subpoints inside the ribbon [-ribbon-constrained]",
+ 'CUBIC',
+ 'TRILINEAR',
+ 'ENCLOSING_VOXEL',
+ argstr='-interpolate %s',
+ desc='instead of a weighted average of voxels, '
+ 'interpolate at subpoints inside the ribbon [-ribbon-constrained]',
xor=_myelin_style,
)
bad_vertices_out = File(
- argstr="-bad-vertices-out %s",
+ argstr='-bad-vertices-out %s',
desc="output an ROI of which vertices didn't intersect any valid voxels",
xor=_myelin_style,
)
output_weights = traits.Int(
- argstr="-output-weights %(0)d output_weights.nii.gz",
- desc="write the voxel weights for a vertex to a volume file",
+ argstr='-output-weights %(0)d output_weights.nii.gz',
+ desc='write the voxel weights for a vertex to a volume file',
xor=_myelin_style,
)
output_weights_text = traits.File(
- argstr="-output-weights-text %s",
- desc="write the voxel weights for all vertices to a text file",
+ argstr='-output-weights-text %s',
+ desc='write the voxel weights for all vertices to a text file',
xor=_myelin_style,
)
ribbon_roi = File(
exists=True,
- argstr="%s",
+ argstr='%s',
position=5,
- desc="an roi volume of the cortical ribbon for this hemisphere [-myelin-style]",
+ desc='an roi volume of the cortical ribbon for this hemisphere [-myelin-style]',
xor=_ribbon_constrained,
)
thickness = File(
exists=True,
- argstr="%s",
+ argstr='%s',
position=6,
- desc="the thickness metric file for this hemisphere [-myelin-style]",
+ desc='the thickness metric file for this hemisphere [-myelin-style]',
xor=_ribbon_constrained,
)
sigma = traits.Float(
- argstr="%g",
+ argstr='%g',
position=7,
- desc="gaussian kernel in mm for weighting voxels within range [-myelin-style]",
+ desc='gaussian kernel in mm for weighting voxels within range [-myelin-style]',
xor=_ribbon_constrained,
)
legacy_bug = traits.Bool(
- argstr="-legacy-bug",
+ argstr='-legacy-bug',
position=8,
- desc="use the old bug in the myelin-style algorithm [-myelin-style]",
+ desc='use the old bug in the myelin-style algorithm [-myelin-style]',
xor=_ribbon_constrained,
)
subvol_select = traits.Int(
- argstr="-subvol-select %d",
- desc="select a single subvolume to map",
+ argstr='-subvol-select %d',
+ desc='select a single subvolume to map',
)
"""\
@@ -521,10 +522,10 @@ class VolumeToSurfaceMappingInputSpec(OpenMPTraitedSpec):
class VolumeToSurfaceMappingOutputSpec(TraitedSpec):
- out_file = File(desc="the output metric file")
- bad_vertices_file = File(desc="the output metric file of vertices that have no data")
- weights_file = File(desc="volume to write the weights to")
- weights_text_file = File(desc="the output text filename")
+ out_file = File(desc='the output metric file')
+ bad_vertices_file = File(desc='the output metric file of vertices that have no data')
+ weights_file = File(desc='volume to write the weights to')
+ weights_text_file = File(desc='the output text filename')
class VolumeToSurfaceMapping(WBCommand, OpenMPCommandMixin):
@@ -585,25 +586,25 @@ class VolumeToSurfaceMapping(WBCommand, OpenMPCommandMixin):
input_spec = VolumeToSurfaceMappingInputSpec
output_spec = VolumeToSurfaceMappingOutputSpec
- _cmd = "wb_command -volume-to-surface-mapping"
+ _cmd = 'wb_command -volume-to-surface-mapping'
def _format_arg(self, opt, spec, val):
if opt in self.input_spec._ribbon_constrained:
- if self.inputs.method != "ribbon-constrained":
- return ""
+ if self.inputs.method != 'ribbon-constrained':
+ return ''
elif opt in self.input_spec._myelin_style:
- if self.inputs.method != "myelin-style":
- return ""
+ if self.inputs.method != 'myelin-style':
+ return ''
return super()._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = super()._list_outputs()
if isdefined(self.inputs.bad_vertices_out):
- outputs["bad_vertices_file"] = os.path.abspath(self.inputs.bad_vertices_out)
+ outputs['bad_vertices_file'] = os.path.abspath(self.inputs.bad_vertices_out)
if isdefined(self.inputs.output_weights):
- outputs["weights_file"] = os.path.abspath(self.inputs.output_weights)
+ outputs['weights_file'] = os.path.abspath(self.inputs.output_weights)
if isdefined(self.inputs.output_weights_text):
- outputs["weights_text_file"] = os.path.abspath(self.inputs.output_weights_text)
+ outputs['weights_text_file'] = os.path.abspath(self.inputs.output_weights_text)
return outputs
@@ -624,36 +625,36 @@ class MetricMaskInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
- argstr="%s",
+ argstr='%s',
position=1,
mandatory=True,
- desc="input metric file",
+ desc='input metric file',
)
mask = File(
exists=True,
- argstr="%s",
+ argstr='%s',
position=2,
mandatory=True,
- desc="mask metric file",
+ desc='mask metric file',
)
out_file = File(
- name_template="%s_masked.func.gii",
- name_source=["in_file"],
+ name_template='%s_masked.func.gii',
+ name_source=['in_file'],
keep_extension=False,
- argstr="%s",
+ argstr='%s',
position=3,
- desc="output metric file",
+ desc='output metric file',
)
column = traits.Either(
traits.Int,
traits.String,
- argstr="-column %s",
- desc="select a single column by number or name",
+ argstr='-column %s',
+ desc='select a single column by number or name',
)
class MetricMaskOutputSpec(TraitedSpec):
- out_file = File(desc="output metric file")
+ out_file = File(desc='output metric file')
class MetricMask(WBCommand):
@@ -671,7 +672,7 @@ class MetricMask(WBCommand):
input_spec = MetricMaskInputSpec
output_spec = MetricMaskOutputSpec
- _cmd = "wb_command -metric-mask"
+ _cmd = 'wb_command -metric-mask'
class MetricFillHolesInputSpec(TraitedSpec):
@@ -692,34 +693,34 @@ class MetricFillHolesInputSpec(TraitedSpec):
surface_file = File(
mandatory=True,
exists=True,
- argstr="%s",
+ argstr='%s',
position=1,
- desc="surface to use for neighbor information",
+ desc='surface to use for neighbor information',
)
metric_file = File(
mandatory=True,
exists=True,
- argstr="%s",
+ argstr='%s',
position=2,
- desc="input ROI metric",
+ desc='input ROI metric',
)
out_file = File(
- name_template="%s_filled.shape.gii",
- name_source="metric_file",
+ name_template='%s_filled.shape.gii',
+ name_source='metric_file',
keep_extension=False,
- argstr="%s",
+ argstr='%s',
position=3,
- desc="output ROI metric",
+ desc='output ROI metric',
)
corrected_areas = File(
exists=True,
- argstr="-corrected-areas %s",
- desc="vertex areas to use instead of computing them from the surface",
+ argstr='-corrected-areas %s',
+ desc='vertex areas to use instead of computing them from the surface',
)
class MetricFillHolesOutputSpec(TraitedSpec):
- out_file = File(desc="output ROI metric")
+ out_file = File(desc='output ROI metric')
class MetricFillHoles(WBCommand):
@@ -738,7 +739,7 @@ class MetricFillHoles(WBCommand):
input_spec = MetricFillHolesInputSpec
output_spec = MetricFillHolesOutputSpec
- _cmd = "wb_command -metric-fill-holes"
+ _cmd = 'wb_command -metric-fill-holes'
class MetricRemoveIslandsInputSpec(TraitedSpec):
@@ -759,34 +760,34 @@ class MetricRemoveIslandsInputSpec(TraitedSpec):
surface_file = File(
mandatory=True,
exists=True,
- argstr="%s",
+ argstr='%s',
position=1,
- desc="surface to use for neighbor information",
+ desc='surface to use for neighbor information',
)
metric_file = File(
mandatory=True,
exists=True,
- argstr="%s",
+ argstr='%s',
position=2,
- desc="input ROI metric",
+ desc='input ROI metric',
)
out_file = File(
- name_template="%s_noislands.shape.gii",
- name_source="metric_file",
+ name_template='%s_noislands.shape.gii',
+ name_source='metric_file',
keep_extension=False,
- argstr="%s",
+ argstr='%s',
position=3,
- desc="output ROI metric",
+ desc='output ROI metric',
)
corrected_areas = File(
exists=True,
- argstr="-corrected-areas %s",
- desc="vertex areas to use instead of computing them from the surface",
+ argstr='-corrected-areas %s',
+ desc='vertex areas to use instead of computing them from the surface',
)
class MetricRemoveIslandsOutputSpec(TraitedSpec):
- out_file = File(desc="output ROI metric")
+ out_file = File(desc='output ROI metric')
class MetricRemoveIslands(WBCommand):
@@ -805,4 +806,4 @@ class MetricRemoveIslands(WBCommand):
input_spec = MetricRemoveIslandsInputSpec
output_spec = MetricRemoveIslandsOutputSpec
- _cmd = "wb_command -metric-remove-islands"
+ _cmd = 'wb_command -metric-remove-islands'
diff --git a/niworkflows/reports/__init__.py b/niworkflows/reports/__init__.py
index 961d5ab1a8b..107316216bc 100644
--- a/niworkflows/reports/__init__.py
+++ b/niworkflows/reports/__init__.py
@@ -3,4 +3,4 @@
from .core import generate_reports
-__all__ = ["generate_reports"]
+__all__ = ['generate_reports']
diff --git a/niworkflows/reports/core.py b/niworkflows/reports/core.py
index 81c021bada7..5a76e8865a1 100644
--- a/niworkflows/reports/core.py
+++ b/niworkflows/reports/core.py
@@ -26,6 +26,7 @@
Generalizes report generation across BIDS-Apps
"""
+
from pathlib import Path
import re
from itertools import compress
@@ -43,7 +44,7 @@
if "Configuration 'figures' already exists" != str(e):
raise
-PLURAL_SUFFIX = defaultdict(str("s").format, [("echo", "es")])
+PLURAL_SUFFIX = defaultdict(str('s').format, [('echo', 'es')])
SVG_SNIPPET = [
"""\
"""
- snippet = '
%s
\n%s\n' % (warning_txt, description)
+ snippet = f'
{warning_txt}
\n{description}\n'
# Store new file and report
img.to_filename(out_fname)
with open(out_report, 'w') as fobj:
@@ -522,10 +522,7 @@ def _run_interface(self, runtime):
img.to_filename(out_fname)
if warning_txt:
- snippet = '