diff --git a/.circleci/config.yml b/.circleci/config.yml
index c00016612..86d21638d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -394,6 +394,12 @@ jobs:
/tmp/data/ds005 /tmp/ds005/derivatives participant \
--sloppy --write-graph --mem_mb 4096 \
--nthreads 2 --anat-only -vv
+ - run:
+ name: Clean-up after anatomical run
+ command: |
+ rm -rf /tmp/ds005/work/fmriprep_wf/fsdir*
+ rm -rf /tmp/ds005/work/reportlets
+ rm -rf /tmp/ds005/derivatives/fmriprep
- save_cache:
key: ds005-anat-v14-{{ .Branch }}-{{ epoch }}
paths:
@@ -525,6 +531,12 @@ jobs:
/tmp/data/ds054 /tmp/ds054/derivatives participant \
--fs-no-reconall --sloppy --write-graph \
--mem_mb 4096 --nthreads 2 --anat-only -vv
+ - run:
+ name: Clean-up after anatomical run
+ command: |
+ rm -rf /tmp/ds054/work/fmriprep_wf/fsdir*
+ rm -rf /tmp/ds054/work/reportlets
+ rm -rf /tmp/ds054/derivatives/fmriprep
- save_cache:
key: ds054-anat-v12-{{ .Branch }}-{{ epoch }}
paths:
@@ -643,6 +655,12 @@ jobs:
/tmp/data/ds210 /tmp/ds210/derivatives participant \
--fs-no-reconall --sloppy --write-graph \
--mem_mb 4096 --nthreads 2 --anat-only -vv
+ - run:
+ name: Clean-up after anatomical run
+ command: |
+ rm -rf /tmp/ds210/work/fmriprep_wf/fsdir*
+ rm -rf /tmp/ds210/work/reportlets
+ rm -rf /tmp/ds210/derivatives/fmriprep
- save_cache:
key: ds210-anat-v10-{{ .Branch }}-{{ epoch }}
paths:
diff --git a/docs/workflows.rst b/docs/workflows.rst
index 358b558ff..13b89eb6e 100644
--- a/docs/workflows.rst
+++ b/docs/workflows.rst
@@ -70,17 +70,15 @@ T1w/T2w preprocessing
bids_root='.',
debug=False,
freesurfer=True,
- fs_spaces=['T1w', 'fsnative',
- 'template', 'fsaverage5'],
hires=True,
longitudinal=False,
num_t1w=1,
omp_nthreads=1,
output_dir='.',
+ output_spaces={'MNI152NLin2009cAsym': {'res': 2}},
reportlets_dir='.',
skull_strip_template='MNI152NLin2009cAsym',
skull_strip_fixed_seed=False,
- template='MNI152NLin2009cAsym',
)
The anatomical sub-workflow begins by constructing an average image by
diff --git a/fmriprep/__about__.py b/fmriprep/__about__.py
index fd3d2f9f7..445bb4095 100644
--- a/fmriprep/__about__.py
+++ b/fmriprep/__about__.py
@@ -105,9 +105,9 @@
LINKS_REQUIRES = [
'git+https://github.com/poldracklab/niworkflows.git@'
- 'b7d111c8fd36a099c74be5e7671677eedb175533#egg=niworkflows',
+ '076aed98962b10d107c83110c05e42466a89bbc4#egg=niworkflows',
'git+https://github.com/poldracklab/smriprep.git@'
- '423bcc43ab7300177eb3b98da62817b2cad8eb87#egg=smriprep-0.1.0',
+ 'f1cfc37bcdc346549dbf1d037cdade3a3b32d5de#egg=smriprep-0.1.0',
]
TESTS_REQUIRES = [
diff --git a/fmriprep/cli/run.py b/fmriprep/cli/run.py
index 822675282..45f2068e3 100755
--- a/fmriprep/cli/run.py
+++ b/fmriprep/cli/run.py
@@ -260,7 +260,6 @@ def main():
"""Entry point"""
from nipype import logging as nlogging
from multiprocessing import set_start_method, Process, Manager
- from ..viz.reports import generate_reports
from ..utils.bids import write_derivative_description
set_start_method('forkserver')
@@ -384,8 +383,6 @@ def before_send(event, hints):
nlogging.getLogger('nipype.interface').setLevel(log_level)
nlogging.getLogger('nipype.utils').setLevel(log_level)
- errno = 0
-
# Call build_workflow(opts, retval)
with Manager() as mgr:
retval = mgr.dict()
@@ -395,9 +392,9 @@ def before_send(event, hints):
retcode = p.exitcode or retval.get('return_code', 0)
- bids_dir = retval.get('bids_dir')
- output_dir = retval.get('output_dir')
- work_dir = retval.get('work_dir')
+ bids_dir = Path(retval.get('bids_dir'))
+ output_dir = Path(retval.get('output_dir'))
+ work_dir = Path(retval.get('work_dir'))
plugin_settings = retval.get('plugin_settings', None)
subject_list = retval.get('subject_list', None)
fmriprep_wf = retval.get('workflow', None)
@@ -436,32 +433,48 @@ def before_send(event, hints):
sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
sentry_sdk.capture_message('fMRIPrep started', level='info')
+ errno = 1 # Default is error exit unless otherwise set
try:
fmriprep_wf.run(**plugin_settings)
- except RuntimeError as e:
- errno = 1
- if "Workflow did not execute cleanly" not in str(e):
- sentry_sdk.capture_exception(e)
- raise
+ except Exception as e:
+ if not opts.notrack:
+ from ..utils.sentry import process_crashfile
+ crashfolders = [output_dir / 'fmriprep' / 'sub-{}'.format(s) / 'log' / run_uuid
+ for s in subject_list]
+ for crashfolder in crashfolders:
+ for crashfile in crashfolder.glob('crash*.*'):
+ process_crashfile(crashfile)
+
+ if "Workflow did not execute cleanly" not in str(e):
+ sentry_sdk.capture_exception(e)
+ logger.critical('fMRIPrep failed: %s', e)
+ raise
else:
if opts.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get('fsaverage', suffix='dseg', extensions=['.tsv']))
_copy_any(dseg_tsv,
- str(Path(output_dir) / 'fmriprep' / 'desc-aseg_dseg.tsv'))
+ str(output_dir / 'fmriprep' / 'desc-aseg_dseg.tsv'))
_copy_any(dseg_tsv,
- str(Path(output_dir) / 'fmriprep' / 'desc-aparcaseg_dseg.tsv'))
+ str(output_dir / 'fmriprep' / 'desc-aparcaseg_dseg.tsv'))
+ errno = 0
logger.log(25, 'fMRIPrep finished without errors')
+ if not opts.notrack:
+ sentry_sdk.capture_message('fMRIPrep finished without errors',
+ level='info')
finally:
+ from niworkflows.reports import generate_reports
# Generate reports phase
- errno += generate_reports(subject_list, output_dir, work_dir, run_uuid,
- sentry_sdk=sentry_sdk)
- write_derivative_description(bids_dir, str(Path(output_dir) / 'fmriprep'))
+ failed_reports = generate_reports(
+ subject_list, output_dir, work_dir, run_uuid, packagename='fmriprep')
+ write_derivative_description(bids_dir, output_dir / 'fmriprep')
- if not opts.notrack and errno == 0:
- sentry_sdk.capture_message('fMRIPrep finished without errors', level='info')
- sys.exit(int(errno > 0))
+ if failed_reports and not opts.notrack:
+ sentry_sdk.capture_message(
+ 'Report generation failed for %d subjects' % failed_reports,
+ level='error')
+ sys.exit(int((errno + failed_reports) > 0))
def validate_input_dir(exec_env, bids_dir, participant_label):
@@ -574,9 +587,9 @@ def build_workflow(opts, retval):
from nipype import logging, config as ncfg
from niworkflows.utils.bids import collect_participants
+ from niworkflows.reports import generate_reports
from ..__about__ import __version__
from ..workflows.base import init_fmriprep_wf
- from ..viz.reports import generate_reports
logger = logging.getLogger('nipype.workflow')
@@ -734,7 +747,8 @@ def build_workflow(opts, retval):
run_uuid = opts.run_uuid
retval['run_uuid'] = run_uuid
retval['return_code'] = generate_reports(
- subject_list, str(output_dir), str(work_dir), run_uuid)
+ subject_list, output_dir, work_dir, run_uuid,
+ packagename='fmriprep')
return retval
# Build main workflow
diff --git a/fmriprep/utils/sentry.py b/fmriprep/utils/sentry.py
new file mode 100644
index 000000000..6a496c0e2
--- /dev/null
+++ b/fmriprep/utils/sentry.py
@@ -0,0 +1,115 @@
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+"""Stripped out routines for Sentry"""
+import re
+from niworkflows.utils.misc import read_crashfile
+import sentry_sdk
+
+CHUNK_SIZE = 16384
+# Group common events with pre specified fingerprints
+KNOWN_ERRORS = {
+ 'permission-denied': [
+ "PermissionError: [Errno 13] Permission denied"
+ ],
+ 'memory-error': [
+ "MemoryError",
+ "Cannot allocate memory",
+ "Return code: 134",
+ ],
+ 'reconall-already-running': [
+ "ERROR: it appears that recon-all is already running"
+ ],
+ 'no-disk-space': [
+ "[Errno 28] No space left on device",
+ "[Errno 122] Disk quota exceeded"
+ ],
+ 'segfault': [
+ "Segmentation Fault",
+ "Segfault",
+ "Return code: 139",
+ ],
+ 'potential-race-condition': [
+ "[Errno 39] Directory not empty",
+ "_unfinished.json",
+ ],
+ 'keyboard-interrupt': [
+ "KeyboardInterrupt",
+ ],
+}
+
+
+def process_crashfile(crashfile):
+ """Parse the contents of a crashfile and submit sentry messages"""
+ crash_info = read_crashfile(str(crashfile))
+ with sentry_sdk.push_scope() as scope:
+ scope.level = 'fatal'
+
+ # Extract node name
+ node_name = crash_info.pop('node').split('.')[-1]
+ scope.set_tag("node_name", node_name)
+
+ # Massage the traceback, extract the gist
+ traceback = crash_info.pop('traceback')
+ # last line is probably most informative summary
+ gist = traceback.splitlines()[-1]
+ exception_text_start = 1
+ for line in traceback.splitlines()[1:]:
+ if not line[0].isspace():
+ break
+ exception_text_start += 1
+
+ exception_text = '\n'.join(
+ traceback.splitlines()[exception_text_start:])
+
+ # Extract inputs, if present
+ inputs = crash_info.pop('inputs', None)
+ if inputs:
+ scope.set_extra('inputs', dict(inputs))
+
+ # Extract any other possible metadata in the crash file
+ for k, v in crash_info.items():
+ strv = list(_chunks(str(v)))
+ if len(strv) == 1:
+ scope.set_extra(k, strv[0])
+ else:
+ for i, chunk in enumerate(strv):
+ scope.set_extra('%s_%02d' % (k, i), chunk)
+
+ fingerprint = ''
+ issue_title = '{}: {}'.format(node_name, gist)
+ for new_fingerprint, error_snippets in KNOWN_ERRORS.items():
+ for error_snippet in error_snippets:
+ if error_snippet in traceback:
+ fingerprint = new_fingerprint
+ issue_title = new_fingerprint
+ break
+ if fingerprint:
+ break
+
+ message = issue_title + '\n\n'
+ message += exception_text[-(8192 - len(message)):]
+ if fingerprint:
+ sentry_sdk.add_breadcrumb(message=fingerprint, level='fatal')
+ else:
+ # remove file paths
+ fingerprint = re.sub(r"(/[^/ ]*)+/?", '', message)
+ # remove words containing numbers
+ fingerprint = re.sub(r"([a-zA-Z]*[0-9]+[a-zA-Z]*)+", '', fingerprint)
+ # adding the return code if it exists
+ for line in message.splitlines():
+ if line.startswith("Return code"):
+ fingerprint += line
+ break
+
+ scope.fingerprint = [fingerprint]
+ sentry_sdk.capture_message(message, 'fatal')
+
+
+def _chunks(string, length=CHUNK_SIZE):
+ """
+ Splits a string into smaller chunks
+ >>> list(_chunks('some longer string.', length=3))
+ ['som', 'e l', 'ong', 'er ', 'str', 'ing', '.']
+ """
+ return (string[i:i + length]
+ for i in range(0, len(string), length))
diff --git a/fmriprep/viz/__init__.py b/fmriprep/viz/__init__.py
deleted file mode 100644
index 719c96812..000000000
--- a/fmriprep/viz/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
-# vi: set ft=python sts=4 ts=4 sw=4 et:
-"""
-The fmriprep reporting engine for visual assessment
-"""
-from .reports import run_reports, generate_reports
-
-__all__ = ['run_reports', 'generate_reports']
diff --git a/fmriprep/viz/config.json b/fmriprep/viz/config.json
deleted file mode 100644
index 0e3f18511..000000000
--- a/fmriprep/viz/config.json
+++ /dev/null
@@ -1,151 +0,0 @@
-{
-"sections":
-[
- {
- "name": "Summary",
- "reportlets":
- [
- {
- "name": "anat/summary",
- "file_pattern": "anat/.*_summary",
- "raw": true
- }
- ]
- },
- {
- "name": "Anatomical",
- "reportlets":
- [
- {
- "name": "anat/conform",
- "file_pattern": "anat/.*_conform",
- "raw": true
- },
- {
- "name": "anat_preproc/t1_brain_seg",
- "file_pattern": "anat/.*seg_brainmask",
- "title": "Brain mask and brain tissue segmentation of the T1w",
- "description": "This panel shows the template T1-weighted image (if several T1w images were found), with contours delineating the detected brain mask and brain tissue segmentations."
- },
- {
- "name": "anat_preproc/t1_2_mni",
- "file_pattern": "anat/.*t1_2_mni",
- "title": "T1 to MNI registration",
- "description": "Nonlinear mapping of the T1w image into MNI space. Hover on the panel with the mouse to transition between both spaces."
- },
- {
- "name": "anat_preproc/reconall",
- "file_pattern": "anat/.*reconall",
- "title": "Surface reconstruction",
- "description": "Surfaces (white and pial) reconstructed with FreeSurfer (recon-all
) overlaid on the participant's T1w template."
- }
-
- ]
- },
- {
- "name": "Fieldmaps",
- "reportlets":
- [
- {
- "name": "fmap/magnitude_mask",
- "file_pattern": "fmap/.*fmap_mask",
- "title": "Skull stripped magnitude image",
- "description": "Brain extraction of the magnitude image from the fieldmap"
- }
- ]
- },
- {
- "name": "Functional",
- "reportlets":
- [
- {
- "name": "epi/summary",
- "file_pattern": "func/.*_summary",
- "raw": true
- },
- {
- "name": "epi/validation",
- "file_pattern": "func/.*_validation\\.",
- "raw": true
- },
- {
- "name": "epi/fmpa_reg",
- "file_pattern": "func/.*fmap_reg\\.",
- "title": "Fieldmap to EPI registration",
- "description": "Results of affine coregistration between the magnitude image of the fieldmap and the reference EPI image"
- },
- {
- "name": "epi/fmpa_reg",
- "file_pattern": "func/.*fmap_reg_vsm\\.",
- "title": "Fieldmap",
- "description": "Overlaid on the reference EPI image"
- },
- {
- "name": "epi/unwarp",
- "file_pattern": "func/.*_sdc_.*\\.",
- "title": "Susceptibility distortion correction",
- "description": "Results of performing susceptibility distortion correction (SDC) on the EPI"
- },
- {
- "name": "epi/sdc_syn",
- "file_pattern": "func/.*_forcedsyn",
- "title": "Experimental fieldmap-less susceptibility distortion correction",
- "description": "The dataset contained some fieldmap information, but the argument --force-syn
was used. The higher-priority SDC method was used. Here, we show the results of performing SyN-based SDC on the EPI for comparison."
- },
- {
- "name": "epi/rois",
- "file_pattern": "func/.*_rois",
- "title": "ROIs in BOLD space",
- "description": "Brain mask calculated on the BOLD signal (red contour), along with the masks used for a/tCompCor.
The aCompCor mask (magenta contour) is a conservative CSF and white-matter mask for extracting physiological and movement confounds.
The fCompCor mask (blue contour) contains the top 5% most variable voxels within a heavily-eroded brain-mask."
- },
- {
- "name": "epi_mean_t1_registration/flirt",
- "file_pattern": "func/.*_flirtnobbr",
- "title": "EPI to T1 registration",
- "description": "FSL flirt
was used to generate transformations from EPI space to T1 Space - BBR refinement rejected. Note that Nearest Neighbor interpolation is used in the reportlets in order to highlight potential spin-history and other artifacts, whereas final images are resampled using Lanczos interpolation."
- },
- {
- "name": "epi_mean_t1_registration/coreg",
- "file_pattern": "func/.*_coreg",
- "title": "EPI to T1 registration",
- "description": "mri_coreg
(FreeSurfer) was used to generate transformations from EPI space to T1 Space - bbregister
refinement rejected. Note that Nearest Neighbor interpolation is used in the reportlets in order to highlight potential spin-history and other artifacts, whereas final images are resampled using Lanczos interpolation."
- },
- {
- "name": "epi_mean_t1_registration/flirtbbr",
- "file_pattern": "func/.*_flirtbbr",
- "title": "EPI to T1 registration",
- "description": "FSL flirt
was used to generate transformations from EPI-space to T1w-space - The white matter mask calculated with FSL fast
(brain tissue segmentation) was used for BBR. Note that Nearest Neighbor interpolation is used in the reportlets in order to highlight potential spin-history and other artifacts, whereas final images are resampled using Lanczos interpolation."
- },
- {
- "name": "epi_mean_t1_registration/bbregister",
- "file_pattern": "func/.*_bbregister",
- "title": "EPI to T1 registration",
- "description": "bbregister
was used to generate transformations from EPI-space to T1w-space. Note that Nearest Neighbor interpolation is used in the reportlets in order to highlight potential spin-history and other artifacts, whereas final images are resampled using Lanczos interpolation."
- },
- {
- "name": "epi/carpetplot",
- "file_pattern": "func/.*carpetplot",
- "title": "BOLD Summary",
- "description": "Summary statistics are plotted, which may reveal trends or artifacts in the BOLD data. Global signals calculated within the whole-brain (GS), within the white-matter (WM) and within cerebro-spinal fluid (CSF) show the mean BOLD signal in their corresponding masks. DVARS and FD show the standardized DVARS and framewise-displacement measures for each time point.
A carpet plot shows the time series for all voxels within the brain mask. Voxels are grouped into cortical (blue), and subcortical (orange) gray matter, cerebellum (green) and white matter and CSF (red), indicated by the color map on the left-hand side."
- },
- {
- "name": "ica_aroma",
- "file_pattern": "func/.*ica_aroma",
- "title": "ICA AROMA",
- "description": "Maps created with maximum intensity projection (glass brain) with a black brain outline. Right hand side of each map: time series (top in seconds), frequency spectrum (bottom in Hertz). Components classified as signal are plotted in green; noise components in red."
- }
- ]
- },
- {
- "name": "About",
- "reportlets":
- [
- {
- "name": "anat/about",
- "file_pattern": "anat/.*_about",
- "raw": true
- }
- ]
- }
-]
-}
diff --git a/fmriprep/viz/report.tpl b/fmriprep/viz/report.tpl
deleted file mode 100644
index 0272dd10d..000000000
--- a/fmriprep/viz/report.tpl
+++ /dev/null
@@ -1,186 +0,0 @@
-
-
-
-
{{ elem.description }}
{% endif %} - {% for content in elem.contents %} - {% if elem.raw %}{{ content }}{% else %} -
{{ elem.description }}
{% endif %}
- {% for content in elem.contents %}
- {% if elem.raw %}{{ content }}{% else %}
-
We kindly ask to report results preprocessed with fMRIPrep using the following - boilerplate
- -Failed to generate the boilerplate
- {% endif %} -Alternatively, an interactive boilerplate generator is available in the documentation website.
-%s\n' % (logs_path / 'CITATION.md').read_text(encoding='UTF-8') - boilerplate.append((boiler_idx, 'Markdown', text)) - boiler_idx += 1 - - if (logs_path / 'CITATION.tex').exists(): - text = (logs_path / 'CITATION.tex').read_text(encoding='UTF-8') - text = re.compile( - r'\\begin{document}(.*?)\\end{document}', - re.DOTALL | re.IGNORECASE).findall(text)[0].strip() - text = '
%s\n' % text - text += '
%s\n' % Path( - pkgrf('fmriprep', 'data/boilerplate.bib')).read_text(encoding='UTF-8') - boilerplate.append((boiler_idx, 'LaTeX', text)) - boiler_idx += 1 - - searchpath = pkgrf('fmriprep', '/') - env = jinja2.Environment( - loader=jinja2.FileSystemLoader(searchpath=searchpath), - trim_blocks=True, lstrip_blocks=True - ) - report_tpl = env.get_template('viz/report.tpl') - report_render = report_tpl.render(sections=self.sections, errors=self.errors, - boilerplate=boilerplate) - - # Write out report - (self.out_dir / 'fmriprep' / self.out_filename).write_text(report_render, encoding='UTF-8') - return len(self.errors) - - -def order_by_run(subreport): - ordered = [] - run_reps = {} - - for element in subreport.reportlets: - if len(element.source_files) == 1 and element.source_files[0]: - ordered.append(element) - continue - - for filename, file_contents in zip(element.source_files, element.contents): - name, title = generate_name_title(filename) - if not filename or not name: - continue - - new_element = Reportlet( - name=element.name, title=element.title, file_pattern=element.file_pattern, - description=element.description, raw=element.raw) - new_element.contents.append(file_contents) - new_element.source_files.append(filename) - - if name not in run_reps: - run_reps[name] = SubReport(name, title=title) - - run_reps[name].reportlets.append(new_element) - - if run_reps: - keys = list(sorted(run_reps.keys())) - for key in keys: - ordered.append(run_reps[key]) - subreport.isnested = True - - subreport.reportlets = ordered - return subreport - - -def generate_name_title(filename): - fname = Path(filename).name - expr = re.compile('^sub-(?P