Skip to content

Commit a078fbe

Browse files
committed
Merge remote-tracking branch 'upstream/master' into enh/MigrateToSetuptools
2 parents 608dc5c + 80c5984 commit a078fbe

19 files changed

+1351
-785
lines changed

codecov.yml

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,17 @@
11
coverage:
22
range: "0...100"
33
status:
4+
patch:
5+
default:
6+
target: 0
7+
threshold: 100
48
project:
59
default:
610
target: 0
711
threshold: 100
8-
patch: false
12+
patch:
13+
target: 0
14+
threshold: 100
915
unittests:
1016
target: 0
1117
threshold: 100

doc/devel/testing_nipype.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ dependencies. Installing software dependencies and downloading the necessary dat
3939
will reduce the number of skip tests.
4040

4141
Some tests in Nipype make use of some images distributed within the `FSL course data
42-
<http://fsl.fmrib.ox.ac.uk/fslcourse/>`_. This reduced version of the package can be downloaded `here
42+
<http://fsl.fmrib.ox.ac.uk/fslcourse/>`_. This reduced version of the package can be downloaded `here
4343
<https://3552243d5be815c1b09152da6525cb8fe7b900a6.googledrive.com/host/0BxI12kyv2olZVUswazA3NkFvOXM/nipype-fsl_course_data.tar.gz>`_.
4444
To enable the tests depending on these data, just unpack the targz file and set the :code:`FSL_COURSE_DATA` environment
4545
variable to point to that folder.

doc/users/grabbing_and_sinking.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ when using multiple nested iterables.
235235
Substitutions
236236
-------------
237237

238-
The `substitutions` and `substitutions_regexp` inputs allow users to modify the
238+
The ``substitutions`` and ``regexp_substitutions`` inputs allow users to modify the
239239
output destination path and name of a file. Substitutions are a list of 2-tuples
240240
and are carried out in the order in which they were entered. Assuming that the
241241
output path of a file is:
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
2+
from ...testing import assert_equal
3+
from ..confounds import CompCor
4+
5+
6+
def test_CompCor_inputs():
7+
input_map = dict(components_file=dict(mandatory=False,
8+
usedefault=True,
9+
),
10+
ignore_exception=dict(nohash=True,
11+
usedefault=True,
12+
),
13+
mask_file=dict(mandatory=False,
14+
),
15+
num_components=dict(usedefault=True,
16+
),
17+
realigned_file=dict(mandatory=True,
18+
),
19+
regress_poly_degree=dict(usedefault=True,
20+
),
21+
use_regress_poly=dict(usedefault=True,
22+
),
23+
)
24+
inputs = CompCor.input_spec()
25+
26+
for key, metadata in list(input_map.items()):
27+
for metakey, value in list(metadata.items()):
28+
yield assert_equal, getattr(inputs.traits()[key], metakey), value
29+
30+
31+
def test_CompCor_outputs():
32+
output_map = dict(components_file=dict(),
33+
)
34+
outputs = CompCor.output_spec()
35+
36+
for key, metadata in list(output_map.items()):
37+
for metakey, value in list(metadata.items()):
38+
yield assert_equal, getattr(outputs.traits()[key], metakey), value
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
2+
from ...testing import assert_equal
3+
from ..confounds import TCompCor
4+
5+
6+
def test_TCompCor_inputs():
7+
input_map = dict(components_file=dict(mandatory=False,
8+
usedefault=True,
9+
),
10+
ignore_exception=dict(nohash=True,
11+
usedefault=True,
12+
),
13+
mask_file=dict(mandatory=False,
14+
),
15+
num_components=dict(usedefault=True,
16+
),
17+
percentile_threshold=dict(usedefault=True,
18+
),
19+
realigned_file=dict(mandatory=True,
20+
),
21+
regress_poly_degree=dict(usedefault=True,
22+
),
23+
use_regress_poly=dict(usedefault=True,
24+
),
25+
)
26+
inputs = TCompCor.input_spec()
27+
28+
for key, metadata in list(input_map.items()):
29+
for metakey, value in list(metadata.items()):
30+
yield assert_equal, getattr(inputs.traits()[key], metakey), value
31+
32+
33+
def test_TCompCor_outputs():
34+
output_map = dict(components_file=dict(),
35+
)
36+
outputs = TCompCor.output_spec()
37+
38+
for key, metadata in list(output_map.items()):
39+
for metakey, value in list(metadata.items()):
40+
yield assert_equal, getattr(outputs.traits()[key], metakey), value
Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
#!/usr/bin/env python
22
# -*- coding: utf-8 -*-
33
import os
4-
from nipype.testing import (assert_equal, example_data, skipif)
5-
from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS
6-
import numpy as np
74
from tempfile import mkdtemp
85
from shutil import rmtree
96

7+
from nipype.testing import (assert_equal, example_data, skipif, assert_true)
8+
from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS
9+
import numpy as np
10+
11+
1012
nonitime = True
1113
try:
1214
import nitime
@@ -18,11 +20,12 @@
1820
def test_fd():
1921
tempdir = mkdtemp()
2022
ground_truth = np.loadtxt(example_data('fsl_motion_outliers_fd.txt'))
21-
fd = FramewiseDisplacement(in_plots=example_data('fsl_mcflirt_movpar.txt'),
22-
out_file=tempdir + '/fd.txt')
23-
res = fd.run()
24-
yield assert_equal, np.allclose(ground_truth, np.loadtxt(res.outputs.out_file)), True
25-
yield assert_equal, np.abs(ground_truth.mean() - res.outputs.fd_average) < 1e-4, True
23+
fdisplacement = FramewiseDisplacement(in_plots=example_data('fsl_mcflirt_movpar.txt'),
24+
out_file=tempdir + '/fd.txt')
25+
res = fdisplacement.run()
26+
27+
yield assert_true, np.allclose(ground_truth, np.loadtxt(res.outputs.out_file), atol=.16)
28+
yield assert_true, np.abs(ground_truth.mean() - res.outputs.fd_average) < 1e-2
2629
rmtree(tempdir)
2730

2831
@skipif(nonitime)
@@ -31,9 +34,9 @@ def test_dvars():
3134
ground_truth = np.loadtxt(example_data('ds003_sub-01_mc.DVARS'))
3235
dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
3336
in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'),
34-
save_all = True)
37+
save_all=True)
3538
os.chdir(tempdir)
3639
res = dvars.run()
3740

3841
dv1 = np.loadtxt(res.outputs.out_std)
39-
yield assert_equal, (np.abs(dv1 - ground_truth).sum()/ len(dv1)) < 0.05, True
42+
yield assert_equal, (np.abs(dv1 - ground_truth).sum()/ len(dv1)) < 0.05, True

nipype/interfaces/dipy/reconstruction.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,7 @@ def _run_interface(self, runtime):
211211
from dipy.reconst.csdeconv import recursive_response, auto_response
212212

213213
img = nb.load(self.inputs.in_file)
214+
imref = nb.four_to_three(img)[0]
214215
affine = img.get_affine()
215216

216217
if isdefined(self.inputs.in_mask):

nipype/interfaces/fsl/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
2323
Examples
2424
--------
25-
See the docstrings of the individual classes for examples.
25+
See the docstrings of the individual classes for examples.
2626
2727
"""
2828
from __future__ import print_function, division, unicode_literals, absolute_import

nipype/interfaces/nilearn.py

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
# -*- coding: utf-8 -*-
2+
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
3+
# vi: set ft=python sts=4 ts=4 sw=4 et:
4+
'''
5+
Algorithms to compute statistics on :abbr:`fMRI (functional MRI)`
6+
7+
Change directory to provide relative paths for doctests
8+
>>> import os
9+
>>> filepath = os.path.dirname(os.path.realpath(__file__))
10+
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
11+
>>> os.chdir(datadir)
12+
13+
'''
14+
from __future__ import (print_function, division, unicode_literals,
15+
absolute_import)
16+
17+
import numpy as np
18+
import nibabel as nb
19+
20+
from .. import logging
21+
from ..interfaces.base import (traits, TraitedSpec, BaseInterface,
22+
BaseInterfaceInputSpec, File, InputMultiPath)
23+
IFLOG = logging.getLogger('interface')
24+
25+
class SignalExtractionInputSpec(BaseInterfaceInputSpec):
26+
in_file = File(exists=True, mandatory=True, desc='4-D fMRI nii file')
27+
label_files = InputMultiPath(File(exists=True), mandatory=True,
28+
desc='a 3-D label image, with 0 denoting '
29+
'background, or a list of 3-D probability '
30+
'maps (one per label) or the equivalent 4D '
31+
'file.')
32+
class_labels = traits.List(mandatory=True,
33+
desc='Human-readable labels for each segment '
34+
'in the label file, in order. The length of '
35+
'class_labels must be equal to the number of '
36+
'segments (background excluded). This list '
37+
'corresponds to the class labels in label_file '
38+
'in ascending order')
39+
out_file = File('signals.tsv', usedefault=True, exists=False,
40+
mandatory=False, desc='The name of the file to output to. '
41+
'signals.tsv by default')
42+
incl_shared_variance = traits.Bool(True, usedefault=True, mandatory=False, desc='By default '
43+
'(True), returns simple time series calculated from each '
44+
'region independently (e.g., for noise regression). If '
45+
'False, returns unique signals for each region, discarding '
46+
'shared variance (e.g., for connectivity. Only has effect '
47+
'with 4D probability maps.')
48+
include_global = traits.Bool(False, usedefault=True, mandatory=False,
49+
desc='If True, include an extra column '
50+
'labeled "global", with values calculated from the entire brain '
51+
'(instead of just regions).')
52+
detrend = traits.Bool(False, usedefault=True, mandatory=False,
53+
desc='If True, perform detrending using nilearn.')
54+
55+
class SignalExtractionOutputSpec(TraitedSpec):
56+
out_file = File(exists=True, desc='tsv file containing the computed '
57+
'signals, with as many columns as there are labels and as '
58+
'many rows as there are timepoints in in_file, plus a '
59+
'header row with values from class_labels')
60+
61+
class SignalExtraction(BaseInterface):
62+
'''
63+
Extracts signals over tissue classes or brain regions
64+
65+
>>> seinterface = SignalExtraction()
66+
>>> seinterface.inputs.in_file = 'functional.nii'
67+
>>> seinterface.inputs.label_files = 'segmentation0.nii.gz'
68+
>>> seinterface.inputs.out_file = 'means.tsv'
69+
>>> segments = ['CSF', 'gray', 'white']
70+
>>> seinterface.inputs.class_labels = segments
71+
>>> seinterface.inputs.detrend = True
72+
>>> seinterface.inputs.include_global = True
73+
'''
74+
input_spec = SignalExtractionInputSpec
75+
output_spec = SignalExtractionOutputSpec
76+
77+
def _run_interface(self, runtime):
78+
maskers = self._process_inputs()
79+
80+
signals = []
81+
for masker in maskers:
82+
signals.append(masker.fit_transform(self.inputs.in_file))
83+
region_signals = np.hstack(signals)
84+
85+
output = np.vstack((self.inputs.class_labels, region_signals.astype(str)))
86+
87+
# save output
88+
np.savetxt(self.inputs.out_file, output, fmt=b'%s', delimiter='\t')
89+
return runtime
90+
91+
def _process_inputs(self):
92+
''' validate and process inputs into useful form.
93+
Returns a list of nilearn maskers and the list of corresponding label names.'''
94+
import nilearn.input_data as nl
95+
import nilearn.image as nli
96+
97+
label_data = nli.concat_imgs(self.inputs.label_files)
98+
maskers = []
99+
100+
# determine form of label files, choose appropriate nilearn masker
101+
if np.amax(label_data.get_data()) > 1: # 3d label file
102+
n_labels = np.amax(label_data.get_data())
103+
maskers.append(nl.NiftiLabelsMasker(label_data))
104+
else: # 4d labels
105+
n_labels = label_data.get_data().shape[3]
106+
if self.inputs.incl_shared_variance: # 4d labels, independent computation
107+
for img in nli.iter_img(label_data):
108+
maskers.append(nl.NiftiMapsMasker(self._4d(img.get_data(), img.affine)))
109+
else: # 4d labels, one computation fitting all
110+
maskers.append(nl.NiftiMapsMasker(label_data))
111+
112+
# check label list size
113+
if len(self.inputs.class_labels) != n_labels:
114+
raise ValueError('The length of class_labels {} does not '
115+
'match the number of regions {} found in '
116+
'label_files {}'.format(self.inputs.class_labels,
117+
n_labels,
118+
self.inputs.label_files))
119+
120+
if self.inputs.include_global:
121+
global_label_data = label_data.get_data().sum(axis=3) # sum across all regions
122+
global_label_data = np.rint(global_label_data).astype(int).clip(0, 1) # binarize
123+
global_label_data = self._4d(global_label_data, label_data.affine)
124+
global_masker = nl.NiftiLabelsMasker(global_label_data, detrend=self.inputs.detrend)
125+
maskers.insert(0, global_masker)
126+
self.inputs.class_labels.insert(0, 'global')
127+
128+
for masker in maskers:
129+
masker.set_params(detrend=self.inputs.detrend)
130+
131+
return maskers
132+
133+
def _4d(self, array, affine):
134+
''' takes a 3-dimensional numpy array and an affine,
135+
returns the equivalent 4th dimensional nifti file '''
136+
return nb.Nifti1Image(array[:, :, :, np.newaxis], affine)
137+
138+
def _list_outputs(self):
139+
outputs = self._outputs().get()
140+
outputs['out_file'] = self.inputs.out_file
141+
return outputs
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
2+
from .....testing import assert_equal
3+
from ..utilities import GenerateEdgeMapImage
4+
5+
6+
def test_GenerateEdgeMapImage_inputs():
7+
input_map = dict(args=dict(argstr='%s',
8+
),
9+
environ=dict(nohash=True,
10+
usedefault=True,
11+
),
12+
ignore_exception=dict(nohash=True,
13+
usedefault=True,
14+
),
15+
inputMRVolumes=dict(argstr='--inputMRVolumes %s...',
16+
),
17+
inputMask=dict(argstr='--inputMask %s',
18+
),
19+
lowerPercentileMatching=dict(argstr='--lowerPercentileMatching %f',
20+
),
21+
maximumOutputRange=dict(argstr='--maximumOutputRange %d',
22+
),
23+
minimumOutputRange=dict(argstr='--minimumOutputRange %d',
24+
),
25+
numberOfThreads=dict(argstr='--numberOfThreads %d',
26+
),
27+
outputEdgeMap=dict(argstr='--outputEdgeMap %s',
28+
hash_files=False,
29+
),
30+
outputMaximumGradientImage=dict(argstr='--outputMaximumGradientImage %s',
31+
hash_files=False,
32+
),
33+
terminal_output=dict(nohash=True,
34+
),
35+
upperPercentileMatching=dict(argstr='--upperPercentileMatching %f',
36+
),
37+
)
38+
inputs = GenerateEdgeMapImage.input_spec()
39+
40+
for key, metadata in list(input_map.items()):
41+
for metakey, value in list(metadata.items()):
42+
yield assert_equal, getattr(inputs.traits()[key], metakey), value
43+
44+
45+
def test_GenerateEdgeMapImage_outputs():
46+
output_map = dict(outputEdgeMap=dict(),
47+
outputMaximumGradientImage=dict(),
48+
)
49+
outputs = GenerateEdgeMapImage.output_spec()
50+
51+
for key, metadata in list(output_map.items()):
52+
for metakey, value in list(metadata.items()):
53+
yield assert_equal, getattr(outputs.traits()[key], metakey), value

0 commit comments

Comments
 (0)