Skip to content

Commit ba0d685

Browse files
authored
Merge pull request nipreps#474 from oesteban/maint/drop-python2
MAINT: Pacify security patterns found by Codacy
2 parents 53eed17 + f2bf058 commit ba0d685

File tree

7 files changed

+81
-92
lines changed

7 files changed

+81
-92
lines changed

niworkflows/interfaces/images.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,8 @@ def _run_interface(self, runtime):
307307
out_name = fname
308308

309309
transform = ornt_xfm.dot(conform_xfm)
310-
assert np.allclose(orig_img.affine.dot(transform), target_affine)
310+
if not np.allclose(orig_img.affine.dot(transform), target_affine):
311+
raise ValueError("Original and target affines are not similar")
311312

312313
mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False)
313314
np.savetxt(mat_name, transform, fmt='%.08f')

niworkflows/interfaces/itk.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,10 @@ def _run_interface(self, runtime):
109109
tmp_folder = TemporaryDirectory(prefix='tmp-', dir=runtime.cwd)
110110

111111
xfms_list = _arrange_xfms(transforms, num_files, tmp_folder)
112-
assert len(xfms_list) == num_files
112+
if len(xfms_list) != num_files:
113+
raise ValueError(
114+
"Number of files and entries in the transforms list do not match"
115+
)
113116

114117
# Inputs are ready to run in parallel
115118
if num_threads < 1:

niworkflows/interfaces/masks.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,10 @@ class ACompCorRPT(nrc.SegmentationRC, confounds.ACompCor):
172172
def _post_run_hook(self, runtime):
173173
''' generates a report showing slices from each axis '''
174174

175-
assert len(self.inputs.mask_files) == 1, \
176-
"ACompCorRPT only supports a single input mask. " \
177-
"A list %s was found." % self.inputs.mask_files
175+
if len(self.inputs.mask_files) != 1:
176+
raise ValueError(
177+
"ACompCorRPT only supports a single input mask. "
178+
"A list %s was found." % self.inputs.mask_files)
178179
self._anat_file = self.inputs.realigned_file
179180
self._mask_file = self.inputs.mask_files[0]
180181
self._seg_files = self.inputs.mask_files
@@ -205,9 +206,11 @@ def _post_run_hook(self, runtime):
205206

206207
high_variance_masks = self.aggregate_outputs(runtime=runtime).high_variance_masks
207208

208-
assert not isinstance(high_variance_masks, list),\
209-
"TCompCorRPT only supports a single output high variance mask. " \
210-
"A list %s was found." % str(high_variance_masks)
209+
if isinstance(high_variance_masks, list):
210+
raise ValueError(
211+
"TCompCorRPT only supports a single output high variance mask. "
212+
"A list %s was found." % high_variance_masks
213+
)
211214
self._anat_file = self.inputs.realigned_file
212215
self._mask_file = high_variance_masks
213216
self._seg_files = [high_variance_masks]

niworkflows/interfaces/utils.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -724,7 +724,8 @@ def _run_interface(self, runtime):
724724
with open(self.inputs.join_file) as ifh:
725725
join = ifh.read().splitlines(keepends=False)
726726

727-
assert len(data) == len(join)
727+
if len(data) != len(join):
728+
raise ValueError("Number of columns in datasets do not match")
728729

729730
merged = []
730731
for d, j in zip(data, join):

niworkflows/reports/core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ def generate_report(self):
378378

379379
env = jinja2.Environment(
380380
loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)),
381-
trim_blocks=True, lstrip_blocks=True
381+
trim_blocks=True, lstrip_blocks=True, autoescape=False
382382
)
383383
report_tpl = env.get_template(self.template_path.name)
384384
report_render = report_tpl.render(sections=self.sections, errors=self.errors,

niworkflows/utils/images.py

Lines changed: 24 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
"""Utilities to manipulate images."""
12
import nibabel as nb
23
import numpy as np
34

@@ -72,23 +73,29 @@ def overwrite_header(img, fname):
7273

7374
ondisk = nb.load(fname, mmap=False)
7475

75-
try:
76-
assert isinstance(ondisk.header, img.header_class)
77-
# Check that the data block should be the same size
78-
assert ondisk.get_data_dtype() == img.get_data_dtype()
79-
assert img.header.get_data_shape() == ondisk.shape
80-
# At the same offset from the start of the file
81-
assert img.header['vox_offset'] == ondisk.dataobj.offset
82-
# With the same scale factors
83-
assert np.allclose(img.header['scl_slope'], ondisk.dataobj.slope, equal_nan=True)
84-
assert np.allclose(img.header['scl_inter'], ondisk.dataobj.inter, equal_nan=True)
85-
except AssertionError as e:
86-
raise ValueError("Cannot write header without compromising data") from e
87-
else:
88-
data = np.asarray(dataobj.get_unscaled())
89-
img._dataobj = data # Allow old dataobj to be garbage collected
90-
del ondisk, img, dataobj # Drop everything we don't need, to be safe
91-
unsafe_write_nifti_header_and_data(fname, header, data)
76+
errmsg = "Cannot overwrite header (reason: {}).".format
77+
if not isinstance(ondisk.header, img.header_class):
78+
raise ValueError(errmsg("inconsistent header objects"))
79+
80+
if (
81+
ondisk.get_data_dtype() != img.get_data_dtype()
82+
or img.header.get_data_shape() != ondisk.shape
83+
):
84+
raise ValueError(errmsg("data blocks are not the same size"))
85+
86+
if img.header['vox_offset'] != ondisk.dataobj.offset:
87+
raise ValueError(errmsg("change in offset from start of file"))
88+
89+
if (
90+
not np.allclose(img.header['scl_slope'], ondisk.dataobj.slope, equal_nan=True)
91+
or not np.allclose(img.header['scl_inter'], ondisk.dataobj.inter, equal_nan=True)
92+
):
93+
raise ValueError(errmsg("change in scale factors"))
94+
95+
data = np.asarray(dataobj.get_unscaled())
96+
img._dataobj = data # Allow old dataobj to be garbage collected
97+
del ondisk, img, dataobj # Drop everything we don't need, to be safe
98+
unsafe_write_nifti_header_and_data(fname, header, data)
9299

93100

94101
def update_header_fields(fname, **kwargs):

niworkflows/viz/utils.py

Lines changed: 39 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,27 @@
11
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
22
# vi: set ft=python sts=4 ts=4 sw=4 et:
33
"""Helper tools for visualization purposes"""
4-
import os.path as op
4+
from pathlib import Path
55
from shutil import which
66
import subprocess
77
import base64
88
import re
9-
from sys import version_info
109
from uuid import uuid4
1110
from io import StringIO
1211

1312
import numpy as np
1413
import nibabel as nb
1514

16-
from lxml import etree
1715
from nilearn import image as nlimage
1816
from nilearn.plotting import plot_anat
19-
from svgutils.transform import SVGFigure
17+
from svgutils.transform import fromstring, SVGFigure, GroupElement
2018
from seaborn import color_palette
2119

2220
from nipype.utils import filemanip
2321
from .. import NIWORKFLOWS_LOG
2422

2523

2624
SVGNS = "http://www.w3.org/2000/svg"
27-
PY3 = version_info[0] > 2
2825

2926

3027
def robust_set_limits(data, plot_params, percentiles=(15, 99.8)):
@@ -229,19 +226,9 @@ def plot_segs(image_nii, seg_niis, out_file, bbox_nii=None, masked=False,
229226
plot_params['cut_coords'] = cuts[d]
230227
svg = _plot_anat_with_contours(image_nii, segs=seg_niis, compress=compress,
231228
**plot_params)
232-
233229
# Find and replace the figure_1 id.
234-
try:
235-
xml_data = etree.fromstring(svg)
236-
except etree.XMLSyntaxError as e:
237-
NIWORKFLOWS_LOG.info(e)
238-
return
239-
find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS)
240-
find_text(xml_data)[0].set('id', 'segmentation-%s-%s' % (d, uuid4()))
241-
242-
svg_fig = SVGFigure()
243-
svg_fig.root = xml_data
244-
out_files.append(svg_fig)
230+
svg = svg.replace('figure_1', 'segmentation-%s-%s' % (d, uuid4()), 1)
231+
out_files.append(fromstring(svg))
245232

246233
return out_files
247234

@@ -333,17 +320,8 @@ def plot_registration(anat_nii, div_id, plot_params=None,
333320
display.close()
334321

335322
# Find and replace the figure_1 id.
336-
try:
337-
xml_data = etree.fromstring(svg)
338-
except etree.XMLSyntaxError as e:
339-
NIWORKFLOWS_LOG.info(e)
340-
return
341-
find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS)
342-
find_text(xml_data)[0].set('id', '%s-%s-%s' % (div_id, mode, uuid4()))
343-
344-
svg_fig = SVGFigure()
345-
svg_fig.root = xml_data
346-
out_files.append(svg_fig)
323+
svg = svg.replace('figure_1', '%s-%s-%s' % (div_id, mode, uuid4()), 1)
324+
out_files.append(fromstring(svg))
347325

348326
return out_files
349327

@@ -353,7 +331,6 @@ def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'):
353331
Composes the input svgs into one standalone svg and inserts
354332
the CSS code for the flickering animation
355333
"""
356-
import svgutils.transform as svgt
357334

358335
if fg_svgs is None:
359336
fg_svgs = []
@@ -380,7 +357,7 @@ def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'):
380357

381358
# Compose the views panel: total size is the width of
382359
# any element (used the first here) and the sum of heights
383-
fig = svgt.SVGFigure(width, heights[:nsvgs].sum())
360+
fig = SVGFigure(width, heights[:nsvgs].sum())
384361

385362
yoffset = 0
386363
for i, r in enumerate(roots):
@@ -393,21 +370,20 @@ def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'):
393370
# Group background and foreground panels in two groups
394371
if fg_svgs:
395372
newroots = [
396-
svgt.GroupElement(roots[:nsvgs], {'class': 'background-svg'}),
397-
svgt.GroupElement(roots[nsvgs:], {'class': 'foreground-svg'})
373+
GroupElement(roots[:nsvgs], {'class': 'background-svg'}),
374+
GroupElement(roots[nsvgs:], {'class': 'foreground-svg'})
398375
]
399376
else:
400377
newroots = roots
401378
fig.append(newroots)
402379
fig.root.attrib.pop("width")
403380
fig.root.attrib.pop("height")
404381
fig.root.set("preserveAspectRatio", "xMidYMid meet")
405-
out_file = op.abspath(out_file)
406-
fig.save(out_file)
382+
out_file = Path(out_file).absolute()
383+
fig.save(str(out_file))
407384

408385
# Post processing
409-
with open(out_file, 'r' if PY3 else 'rb') as f:
410-
svg = f.read().split('\n')
386+
svg = out_file.read_text().splitlines()
411387

412388
# Remove <?xml... line
413389
if svg[0].startswith("<?xml"):
@@ -422,9 +398,8 @@ def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'):
422398
.foreground-svg:hover { animation-play-state: running;}
423399
</style>""" % tuple([uuid4()] * 2))
424400

425-
with open(out_file, 'w' if PY3 else 'wb') as f:
426-
f.write('\n'.join(svg))
427-
return out_file
401+
out_file.write_text("\n".join(svg))
402+
return str(out_file)
428403

429404

430405
def transform_to_2d(data, max_axis):
@@ -459,32 +434,31 @@ def plot_melodic_components(melodic_dir, in_file, tr=None,
459434
from functional MRI data.
460435
461436
Parameters
462-
463-
melodic_dir : str
464-
Path pointing to the outputs of MELODIC
465-
in_file : str
466-
Path pointing to the reference fMRI dataset. This file
467-
will be used to extract the TR value, if the ``tr`` argument
468-
is not set. This file will be used to calculate a mask
469-
if ``report_mask`` is not provided.
470-
tr : float
471-
Repetition time in seconds
472-
out_file : str
473-
Path where the resulting SVG file will be stored
474-
compress : ``'auto'`` or bool
475-
Whether SVG should be compressed. If ``'auto'``, compression
476-
will be executed if dependencies are installed (SVGO)
477-
report_mask : str
478-
Path to a brain mask corresponding to ``in_file``
479-
noise_components_file : str
480-
A CSV file listing the indexes of components classified as noise
481-
by some manual or automated (e.g. ICA-AROMA) procedure. If a
482-
``noise_components_file`` is provided, then components will be
483-
plotted with red/green colors (correspondingly to whether they
484-
are in the file -noise components, red-, or not -signal, green-).
485-
When all or none of the components are in the file, a warning
486-
is printed at the top.
487-
437+
----------
438+
melodic_dir : str
439+
Path pointing to the outputs of MELODIC
440+
in_file : str
441+
Path pointing to the reference fMRI dataset. This file
442+
will be used to extract the TR value, if the ``tr`` argument
443+
is not set. This file will be used to calculate a mask
444+
if ``report_mask`` is not provided.
445+
tr : float
446+
Repetition time in seconds
447+
out_file : str
448+
Path where the resulting SVG file will be stored
449+
compress : ``'auto'`` or bool
450+
Whether SVG should be compressed. If ``'auto'``, compression
451+
will be executed if dependencies are installed (SVGO)
452+
report_mask : str
453+
Path to a brain mask corresponding to ``in_file``
454+
noise_components_file : str
455+
A CSV file listing the indexes of components classified as noise
456+
by some manual or automated (e.g. ICA-AROMA) procedure. If a
457+
``noise_components_file`` is provided, then components will be
458+
plotted with red/green colors (correspondingly to whether they
459+
are in the file -noise components, red-, or not -signal, green-).
460+
When all or none of the components are in the file, a warning
461+
is printed at the top.
488462
489463
"""
490464
from nilearn.image import index_img, iter_img

0 commit comments

Comments
 (0)