Skip to content

Commit 060d4fe

Browse files
author
Josh Warner (Mac)
committed
Merge branch 'master' into patch-1
2 parents 46f3249 + 9da6ebb commit 060d4fe

File tree

7 files changed

+150
-66
lines changed

7 files changed

+150
-66
lines changed

CHANGES

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,9 @@ Next release
88
* FIX: Utility interface test dir (https://github.com/nipy/nipype/pull/986)
99
* FIX: IPython engine directory reset after crash (https://github.com/nipy/nipype/pull/987)
1010
* ENH: Resting state fMRI example with NiPy realignment and no SPM (https://github.com/nipy/nipype/pull/992)
11+
* FIX: Corrected Freesurfer SegStats _list_outputs to avoid error if summary_file is
12+
undefined (issue #994)(https://https://github.com/nipy/nipype/pull/996)
13+
* FIX: OpenfMRI support and FSL 5.0.7 changes (https://github.com/nipy/nipype/pull/1006)
1114

1215
Release 0.10.0 (October 10, 2014)
1316
============

bin/nipype_display_crash

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,6 @@ if __name__ == "__main__":
7474
default=None,
7575
help='Directory to run the node in' + defstr)
7676
args = parser.parse_args()
77-
print args.debug, args.ipydebug
7877
debug = 'ipython' if args.ipydebug else args.debug
7978
if debug == 'ipython':
8079
import sys

doc/devel/software_using_nipype.rst

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,11 @@ Forward
2828

2929
`Forward <http://cyclotronresearchcentre.github.io/forward/>`_ is set of tools simplifying the preparation of accurate electromagnetic head models for EEG forward modeling. It uses Nipype Workflows and Interfaces.
3030

31+
Limbo
32+
-----
33+
34+
`Limbo <https://github.com/Gilles86/in_limbo>`_ is a toolbox for finding brain regions that are neither significantly active nor inactive, but rather “in limbo”. It was build using custom Nipype Interfaces and Workflows.
35+
3136
Lyman
3237
-----
3338

@@ -38,6 +43,11 @@ Medimsight
3843

3944
`Medimsight <https://www.medimsight.com>`_ is a commercial service medical imaging cloud platform. It uses Nipype to interface with various neuroimaging software.
4045

46+
MIA
47+
---
48+
49+
`MIA <http://mia.sourceforge.net>`_ MIA is a a toolkit for gray scale medical image analysis. It provides Nipype interfaces for easy integration with other software.
50+
4151
Mindboggle
4252
----------
4353

examples/fmri_ants_openfmri.py

Lines changed: 92 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414

1515
from nipype import config
1616
config.enable_provenance()
17-
from nipype.external import six
1817

18+
from nipype.external import six
1919

2020
from glob import glob
2121
import os
@@ -174,8 +174,8 @@ def create_reg_workflow(name='registration'):
174174
reg.inputs.winsorize_upper_quantile = 0.995
175175
reg.inputs.args = '--float'
176176
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
177-
reg.inputs.num_threads = 4
178-
reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}
177+
reg.inputs.num_threads = 2
178+
reg.plugin_args = {'qsub_args': '-pe orte 2'}
179179
register.connect(stripper, 'out_file', reg, 'moving_image')
180180
register.connect(inputnode,'target_image_brain', reg,'fixed_image')
181181

@@ -284,12 +284,13 @@ def get_subjectinfo(subject_id, base_dir, task_id, model_id):
284284
for idx in range(n_tasks):
285285
taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1))
286286
conds.append([condition.replace(' ', '_') for condition
287-
in taskinfo[taskidx[0], 2]])
288-
files = glob(os.path.join(base_dir,
289-
subject_id,
290-
'BOLD',
291-
'task%03d_run*' % (idx + 1)))
292-
run_ids.insert(idx, range(1, len(files) + 1))
287+
in taskinfo[taskidx[0], 2]]) # if 'junk' not in condition])
288+
files = sorted(glob(os.path.join(base_dir,
289+
subject_id,
290+
'BOLD',
291+
'task%03d_run*' % (idx + 1))))
292+
runs = [int(val[-3:]) for val in files]
293+
run_ids.insert(idx, runs)
293294
TR = np.genfromtxt(os.path.join(base_dir, 'scan_key.txt'))[1]
294295
return run_ids[task_id - 1], conds[task_id - 1], TR
295296

@@ -361,25 +362,45 @@ def analyze_openfmri_dataset(data_dir, subject=None, model_id=None,
361362
"""
362363
Return data components as anat, bold and behav
363364
"""
364-
365-
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id',
365+
contrast_file = os.path.join(data_dir, 'models', 'model%03d' % model_id,
366+
'task_contrasts.txt')
367+
has_contrast = os.path.exists(contrast_file)
368+
if has_contrast:
369+
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id',
366370
'task_id', 'model_id'],
367371
outfields=['anat', 'bold', 'behav',
368372
'contrasts']),
369373
name='datasource')
374+
else:
375+
datasource = pe.Node(nio.DataGrabber(infields=['subject_id', 'run_id',
376+
'task_id', 'model_id'],
377+
outfields=['anat', 'bold', 'behav']),
378+
name='datasource')
370379
datasource.inputs.base_directory = data_dir
371380
datasource.inputs.template = '*'
372-
datasource.inputs.field_template = {'anat': '%s/anatomy/highres001.nii.gz',
373-
'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
374-
'behav': ('%s/model/model%03d/onsets/task%03d_'
375-
'run%03d/cond*.txt'),
376-
'contrasts': ('models/model%03d/'
377-
'task_contrasts.txt')}
378-
datasource.inputs.template_args = {'anat': [['subject_id']],
381+
382+
if has_contrast:
383+
datasource.inputs.field_template = {'anat': '%s/anatomy/highres001.nii.gz',
384+
'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
385+
'behav': ('%s/model/model%03d/onsets/task%03d_'
386+
'run%03d/cond*.txt'),
387+
'contrasts': ('models/model%03d/'
388+
'task_contrasts.txt')}
389+
datasource.inputs.template_args = {'anat': [['subject_id']],
379390
'bold': [['subject_id', 'task_id']],
380391
'behav': [['subject_id', 'model_id',
381392
'task_id', 'run_id']],
382393
'contrasts': [['model_id']]}
394+
else:
395+
datasource.inputs.field_template = {'anat': '%s/anatomy/highres001.nii.gz',
396+
'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
397+
'behav': ('%s/model/model%03d/onsets/task%03d_'
398+
'run%03d/cond*.txt')}
399+
datasource.inputs.template_args = {'anat': [['subject_id']],
400+
'bold': [['subject_id', 'task_id']],
401+
'behav': [['subject_id', 'model_id',
402+
'task_id', 'run_id']]}
403+
383404
datasource.inputs.sort_filelist = True
384405

385406
"""
@@ -412,9 +433,11 @@ def get_highpass(TR, hpcutoff):
412433

413434
def get_contrasts(contrast_file, task_id, conds):
414435
import numpy as np
415-
contrast_def = np.genfromtxt(contrast_file, dtype=object)
416-
if len(contrast_def.shape) == 1:
417-
contrast_def = contrast_def[None, :]
436+
import os
437+
contrast_def = []
438+
if os.path.exists(contrast_file):
439+
with open(contrast_file, 'rt') as fp:
440+
contrast_def.extend([np.array(row.split()) for row in fp.readlines() if row.strip()])
418441
contrasts = []
419442
for row in contrast_def:
420443
if row[0] != 'task%03d' % task_id:
@@ -448,22 +471,33 @@ def get_contrasts(contrast_file, task_id, conds):
448471
name="modelspec")
449472
modelspec.inputs.input_units = 'secs'
450473

451-
def check_behav_list(behav):
452-
out_behav = []
474+
def check_behav_list(behav, run_id, conds):
475+
from nipype.external import six
476+
import numpy as np
477+
num_conds = len(conds)
453478
if isinstance(behav, six.string_types):
454479
behav = [behav]
455-
for val in behav:
456-
if not isinstance(val, list):
457-
out_behav.append([val])
458-
else:
459-
out_behav.append(val)
460-
return out_behav
480+
behav_array = np.array(behav).flatten()
481+
num_elements = behav_array.shape[0]
482+
return behav_array.reshape(num_elements/num_conds, num_conds).tolist()
483+
484+
reshape_behav = pe.Node(niu.Function(input_names=['behav', 'run_id', 'conds'],
485+
output_names=['behav'],
486+
function=check_behav_list),
487+
name='reshape_behav')
461488

462489
wf.connect(subjinfo, 'TR', modelspec, 'time_repetition')
463-
wf.connect(datasource, ('behav', check_behav_list), modelspec, 'event_files')
490+
wf.connect(datasource, 'behav', reshape_behav, 'behav')
491+
wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id')
492+
wf.connect(subjinfo, 'conds', reshape_behav, 'conds')
493+
wf.connect(reshape_behav, 'behav', modelspec, 'event_files')
494+
464495
wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval')
465496
wf.connect(subjinfo, 'conds', contrastgen, 'conds')
466-
wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
497+
if has_contrast:
498+
wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
499+
else:
500+
contrastgen.inputs.contrast_file = ''
467501
wf.connect(infosource, 'task_id', contrastgen, 'task_id')
468502
wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts')
469503

@@ -487,32 +521,39 @@ def check_behav_list(behav):
487521
Reorder the copes so that now it combines across runs
488522
"""
489523

490-
def sort_copes(files):
491-
numelements = len(files[0])
492-
outfiles = []
493-
for i in range(numelements):
494-
outfiles.insert(i, [])
495-
for j, elements in enumerate(files):
496-
outfiles[i].append(elements[i])
497-
return outfiles
498-
499-
def num_copes(files):
500-
return len(files)
524+
def sort_copes(copes, varcopes, contrasts):
525+
import numpy as np
526+
if not isinstance(copes, list):
527+
copes = [copes]
528+
varcopes = [varcopes]
529+
num_copes = len(contrasts)
530+
n_runs = len(copes)
531+
all_copes = np.array(copes).flatten()
532+
all_varcopes = np.array(varcopes).flatten()
533+
outcopes = all_copes.reshape(len(all_copes)/num_copes, num_copes).T.tolist()
534+
outvarcopes = all_varcopes.reshape(len(all_varcopes)/num_copes, num_copes).T.tolist()
535+
return outcopes, outvarcopes, n_runs
536+
537+
cope_sorter = pe.Node(niu.Function(input_names=['copes', 'varcopes',
538+
'contrasts'],
539+
output_names=['copes', 'varcopes',
540+
'n_runs'],
541+
function=sort_copes),
542+
name='cope_sorter')
501543

502544
pickfirst = lambda x: x[0]
503545

546+
wf.connect(contrastgen, 'contrasts', cope_sorter, 'contrasts')
504547
wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
505548
'flameo.mask_file')]),
506-
(modelfit, fixed_fx, [(('outputspec.copes', sort_copes),
507-
'inputspec.copes'),
508-
('outputspec.dof_file',
549+
(modelfit, cope_sorter, [('outputspec.copes', 'copes')]),
550+
(modelfit, cope_sorter, [('outputspec.varcopes', 'varcopes')]),
551+
(cope_sorter, fixed_fx, [('copes', 'inputspec.copes'),
552+
('varcopes', 'inputspec.varcopes'),
553+
('n_runs', 'l2model.num_copes')]),
554+
(modelfit, fixed_fx, [('outputspec.dof_file',
509555
'inputspec.dof_files'),
510-
(('outputspec.varcopes',
511-
sort_copes),
512-
'inputspec.varcopes'),
513-
(('outputspec.copes', num_copes),
514-
'l2model.num_copes'),
515-
])
556+
])
516557
])
517558

518559
wf.connect(preproc, 'outputspec.mean', registration, 'inputspec.mean_image')

nipype/interfaces/freesurfer/model.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -635,8 +635,9 @@ class SegStats(FSCommand):
635635

636636
def _list_outputs(self):
637637
outputs = self.output_spec().get()
638-
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
639-
if not isdefined(outputs['summary_file']):
638+
if isdefined(self.inputs.summary_file):
639+
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
640+
else:
640641
outputs['summary_file'] = os.path.join(os.getcwd(), 'summary.stats')
641642
suffices = dict(avgwf_txt_file='_avgwf.txt', avgwf_file='_avgwf.nii.gz',
642643
sf_avg_file='sfavg.txt')

nipype/workflows/fmri/fsl/estimate.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,10 @@ def get_dofvolumes(dof_files, cope_files):
250250
import nibabel as nb
251251
import numpy as np
252252
img = nb.load(cope_files[0])
253-
out_data = np.zeros(img.get_shape())
253+
if len(img.get_shape()) > 3:
254+
out_data = np.zeros(img.get_shape())
255+
else:
256+
out_data = np.zeros(list(img.get_shape()) + [1])
254257
for i in range(out_data.shape[-1]):
255258
dof = np.loadtxt(dof_files[i])
256259
out_data[:, :, :, i] = dof

nipype/workflows/fmri/fsl/preprocess.py

Lines changed: 38 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
import nipype.interfaces.freesurfer as fs # freesurfer
99
import nipype.interfaces.spm as spm
1010

11+
from nipype import LooseVersion
12+
1113
from ...smri.freesurfer.utils import create_getmask_flow
1214

1315
def getthreshop(thresh):
@@ -415,6 +417,11 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle')
415417
>>> preproc.run() # doctest: +SKIP
416418
"""
417419

420+
version = 0
421+
if fsl.Info.version() and \
422+
LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'):
423+
version = 507
424+
418425
featpreproc = pe.Workflow(name=name)
419426

420427
"""
@@ -658,17 +665,6 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle')
658665

659666
featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, 'op_string')
660667

661-
"""
662-
Perform temporal highpass filtering on the data
663-
"""
664-
665-
if highpass:
666-
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
667-
iterfield=['in_file'],
668-
name='highpass')
669-
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
670-
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
671-
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
672668

673669
"""
674670
Generate a mean functional image from the first run
@@ -681,6 +677,37 @@ def create_featreg_preproc(name='featpreproc', highpass=True, whichvol='middle')
681677

682678
featpreproc.connect(meanscale, ('out_file', pickfirst), meanfunc3, 'in_file')
683679
featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean')
680+
681+
"""
682+
Perform temporal highpass filtering on the data
683+
"""
684+
685+
if highpass:
686+
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
687+
iterfield=['in_file'],
688+
name='highpass')
689+
featpreproc.connect(inputnode, ('highpass', highpass_operand), highpass, 'op_string')
690+
featpreproc.connect(meanscale, 'out_file', highpass, 'in_file')
691+
692+
if version < 507:
693+
featpreproc.connect(highpass, 'out_file', outputnode, 'highpassed_files')
694+
else:
695+
"""
696+
Add back the mean removed by the highpass filter operation as of FSL 5.0.7
697+
"""
698+
meanfunc4 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
699+
suffix='_mean'),
700+
iterfield=['in_file'],
701+
name='meanfunc4')
702+
703+
featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file')
704+
addmean = pe.MapNode(interface=fsl.BinaryMaths(operation='add'),
705+
iterfield=['in_file', 'operand_file'],
706+
name='addmean')
707+
featpreproc.connect(highpass, 'out_file', addmean, 'in_file')
708+
featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file')
709+
featpreproc.connect(addmean, 'out_file', outputnode, 'highpassed_files')
710+
684711
return featpreproc
685712

686713

0 commit comments

Comments
 (0)