Skip to content

Commit fe302ff

Browse files
committed
Merge branch 'fix/csv'
* fix/csv: (47 commits) added group_id to pipeline names Fixed TesselationSmooth outputs and error reporting. Fixed an unintended comment CSVFibers fix Added more global metrics Fixes for global CSV merging Number/percentage of tracts are now output from CreateMatrix as a .mat file Typo fix Group mrtrix tutorial runs properly Fixed os.path.exists call Spacing fix Uncommented subject lists Fix for std::badalloc, lmax float to int, parcellation subject dir must exist Fix for creatematrix outputs ConcatCSV interface and CreateMatrix output fixes Changed CSV concatenation to use Function interfaces Fix for single subject global network csvs' Improved CSV merging. Single subj and group level fiber csvs Number/percentage of tracts are now output from CreateMatrix as a .mat file Typo fix ...
2 parents de14916 + 9ec34d2 commit fe302ff

File tree

13 files changed

+348
-222
lines changed

13 files changed

+348
-222
lines changed

examples/dmri_connectivity_advanced.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,13 @@
5656
import nipype.interfaces.cmtk as cmtk
5757
import nipype.interfaces.dipy as dipy
5858
import inspect
59-
import os.path as op # system functions
59+
import os, os.path as op # system functions
6060
from nipype.workflows.dmri.fsl.dti import create_eddy_correct_pipeline
6161
from nipype.workflows.dmri.camino.connectivity_mapping import select_aparc_annot
6262
from nipype.utils.misc import package_check
6363
import warnings
6464
from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline
65+
from nipype.workflows.smri.freesurfer import create_tessellation_flow
6566

6667
try:
6768
package_check('cmp')
@@ -82,6 +83,9 @@
8283
fs.FSCommand.set_default_subjects_dir(subjects_dir)
8384
fsl.FSLCommand.set_default_output_type('NIFTI')
8485

86+
fs_dir = os.environ['FREESURFER_HOME']
87+
lookup_file = op.join(fs_dir,'FreeSurferColorLUT.txt')
88+
8589
"""
8690
This needs to point to the fdt folder you can find after extracting
8791
@@ -328,7 +332,7 @@
328332

329333
CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter")
330334
CFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe()))
331-
giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces")
335+
giftiSurfaces = pe.Node(interface=util.Merge(9), name="GiftiSurfaces")
332336
giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels")
333337
niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes")
334338
fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays")
@@ -344,6 +348,9 @@
344348
NxStatsCFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="NxStatsCFFConverter")
345349
NxStatsCFFConverter.inputs.script_files = op.abspath(inspect.getfile(inspect.currentframe()))
346350

351+
tessflow = create_tessellation_flow(name='tessflow', out_format='gii')
352+
tessflow.inputs.inputspec.lookup_file = lookup_file
353+
347354
"""
348355
Connecting the workflow
349356
=======================
@@ -371,6 +378,9 @@
371378
mapping.connect([(inputnode, FreeSurferSourceRH,[("subjects_dir","subjects_dir")])])
372379
mapping.connect([(inputnode, FreeSurferSourceRH,[("subject_id","subject_id")])])
373380

381+
mapping.connect([(inputnode, tessflow,[("subjects_dir","inputspec.subjects_dir")])])
382+
mapping.connect([(inputnode, tessflow,[("subject_id","inputspec.subject_id")])])
383+
374384
mapping.connect([(inputnode, parcellate,[("subjects_dir","subjects_dir")])])
375385
mapping.connect([(inputnode, parcellate,[("subject_id","subject_id")])])
376386
mapping.connect([(parcellate, mri_convert_ROI_scale500,[('roi_file','in_file')])])
@@ -516,6 +526,7 @@
516526
mapping.connect([(mris_convertRHinflated, giftiSurfaces,[("converted","in6")])])
517527
mapping.connect([(mris_convertLHsphere, giftiSurfaces,[("converted","in7")])])
518528
mapping.connect([(mris_convertRHsphere, giftiSurfaces,[("converted","in8")])])
529+
mapping.connect([(tessflow, giftiSurfaces,[("outputspec.meshes","in9")])])
519530

520531
mapping.connect([(mris_convertLHlabels, giftiLabels,[("converted","in1")])])
521532
mapping.connect([(mris_convertRHlabels, giftiLabels,[("converted","in2")])])

examples/dmri_group_connectivity_mrtrix.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -140,12 +140,6 @@
140140

141141
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
142142

143-
# This is used to demonstrate the ease through which different parameters can be set for each group.
144-
if group_id == 'parkinsons':
145-
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.5
146-
else:
147-
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.7
148-
149143
# Here with invert the b-vectors in the Y direction and set the maximum harmonic order of the
150144
# spherical deconvolution step
151145
l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True

nipype/algorithms/misc.py

Lines changed: 42 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -649,32 +649,33 @@ def _list_outputs(self):
649649
return outputs
650650

651651
def merge_csvs(in_list):
652-
for idx, in_file in enumerate(in_list):
653-
try:
654-
in_array = np.loadtxt(in_file, delimiter=',')
655-
except ValueError, ex:
656-
try:
657-
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
658-
except ValueError, ex:
659-
first = open(in_file, 'r')
660-
header_line = first.readline()
661-
header_list = header_line.split(',')
662-
n_cols = len(header_list)
663-
try:
664-
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1, usecols=range(1,n_cols))
665-
except ValueError, ex:
666-
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1, usecols=range(1,n_cols-1))
667-
if idx == 0:
668-
out_array = in_array
669-
else:
670-
out_array = np.dstack((out_array, in_array))
671-
out_array = np.squeeze(out_array)
672-
iflogger.info('Final output array shape:')
673-
iflogger.info(np.shape(out_array))
674-
return out_array
652+
for idx, in_file in enumerate(in_list):
653+
try:
654+
in_array = np.loadtxt(in_file, delimiter=',')
655+
except ValueError, ex:
656+
try:
657+
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
658+
except ValueError, ex:
659+
first = open(in_file, 'r')
660+
header_line = first.readline()
661+
header_list = header_line.split(',')
662+
n_cols = len(header_list)
663+
try:
664+
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1, usecols=range(1,n_cols))
665+
except ValueError, ex:
666+
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1, usecols=range(1,n_cols-1))
667+
if idx == 0:
668+
out_array = in_array
669+
else:
670+
out_array = np.dstack((out_array, in_array))
671+
out_array = np.squeeze(out_array)
672+
iflogger.info('Final output array shape:')
673+
iflogger.info(np.shape(out_array))
674+
return out_array
675675

676676
def remove_identical_paths(in_files):
677677
import os.path as op
678+
from nipype.utils.filemanip import split_filename
678679
if len(in_files) > 1:
679680
out_names = list()
680681
commonprefix = op.commonprefix(in_files)
@@ -699,24 +700,27 @@ def maketypelist(rowheadings, shape, extraheadingBool, extraheading):
699700
for idx in range(1,(min(shape)+1)):
700701
typelist.append((str(idx), float))
701702
else:
702-
typelist.append((str(1), float))
703+
for idx in range(1,(shape[0]+1)):
704+
typelist.append((str(idx), float))
703705
if extraheadingBool:
704706
typelist.append((extraheading, 'a40'))
705707
iflogger.info(typelist)
706708
return typelist
707709

708710
def makefmtlist(output_array, typelist, rowheadingsBool, shape, extraheadingBool):
709-
output = np.zeros(max(shape), typelist)
710711
fmtlist = []
711712
if rowheadingsBool:
712713
fmtlist.append('%s')
713714
if len(shape) > 1:
715+
output = np.zeros(max(shape), typelist)
714716
for idx in range(1,min(shape)+1):
715717
output[str(idx)] = output_array[:,idx-1]
716718
fmtlist.append('%f')
717719
else:
718-
output[str(1)] = output_array
719-
fmtlist.append('%f')
720+
output = np.zeros(1, typelist)
721+
for idx in range(1,len(output_array)+1):
722+
output[str(idx)] = output_array[idx-1]
723+
fmtlist.append('%f')
720724
if extraheadingBool:
721725
fmtlist.append('%s')
722726
fmt = ','.join(fmtlist)
@@ -727,6 +731,7 @@ class MergeCSVFilesInputSpec(TraitedSpec):
727731
out_file = File('merged.csv', usedefault=True, desc='Output filename for merged CSV file')
728732
column_headings = traits.List(traits.Str, desc='List of column headings to save in merged CSV file (must be equal to number of input files). If left undefined, these will be pulled from the input filenames.')
729733
row_headings = traits.List(traits.Str, desc='List of row headings to save in merged CSV file (must be equal to number of rows in the input files).')
734+
row_heading_title = traits.Str('label', usedefault=True, desc='Column heading for the row headings added')
730735
extra_column_heading = traits.Str(desc='New heading to add for the added field.')
731736
extra_field = traits.Str(desc='New field to add to each row. This is useful for saving the group or subject ID in the file.')
732737

@@ -756,6 +761,7 @@ class MergeCSVFiles(BaseInterface):
756761

757762
def _run_interface(self, runtime):
758763
extraheadingBool = False
764+
extraheading = ''
759765
rowheadingsBool = False
760766
"""
761767
This block defines the column headings.
@@ -775,14 +781,15 @@ def _run_interface(self, runtime):
775781
extraheading = 'type'
776782
iflogger.info('Extra column heading was not defined. Using "type"')
777783
headings.append(extraheading)
778-
extraheadingBool = True
784+
extraheadingBool = True
779785

780786
if len(self.inputs.in_files) == 1:
781787
iflogger.warn('Only one file input!')
782788

783789
if isdefined(self.inputs.row_headings):
784790
iflogger.info('Row headings have been provided. Adding "labels" column header.')
785-
csv_headings = '"labels","' + '","'.join(itertools.chain(headings)) + '"\n'
791+
prefix = '"{p}","'.format(p=self.inputs.row_heading_title)
792+
csv_headings = prefix + '","'.join(itertools.chain(headings)) + '"\n'
786793
rowheadingsBool = True
787794
else:
788795
iflogger.info('Row headings have not been provided.')
@@ -814,12 +821,16 @@ def _run_interface(self, runtime):
814821
for row_heading in row_heading_list:
815822
row_heading_with_quotes = '"' + row_heading + '"'
816823
row_heading_list_with_quotes.append(row_heading_with_quotes)
817-
row_headings = np.array(row_heading_list_with_quotes)
824+
row_headings = np.array(row_heading_list_with_quotes, dtype='|S40')
818825
output['heading'] = row_headings
819826

820827
if isdefined(self.inputs.extra_field):
821828
extrafieldlist = []
822-
for idx in range(0,max(shape)):
829+
if len(shape) > 1:
830+
mx = shape[0]
831+
else:
832+
mx = 1
833+
for idx in range(0,mx):
823834
extrafieldlist.append(self.inputs.extra_field)
824835
iflogger.info(len(extrafieldlist))
825836
output[extraheading] = extrafieldlist

0 commit comments

Comments
 (0)