|
39 | 39 |
|
40 | 40 | The 2mm version was generated with::
|
41 | 41 |
|
42 |
| - >>> from nipype import freesurfer as fs |
| 42 | + >>> from nipype.interfaces import freesurfer as fs |
43 | 43 | >>> rs = fs.Resample()
|
44 | 44 | >>> rs.inputs.in_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152.nii.gz'
|
45 | 45 | >>> rs.inputs.resampled_file = 'OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm.nii.gz'
|
@@ -113,7 +113,7 @@ def median(in_files):
|
113 | 113 | for idx, filename in enumerate(filename_to_list(in_files)):
|
114 | 114 | img = nb.load(filename)
|
115 | 115 | data = np.median(img.get_data(), axis=3)
|
116 |
| - if not average: |
| 116 | + if average is None: |
117 | 117 | average = data
|
118 | 118 | else:
|
119 | 119 | average = average + data
|
@@ -216,30 +216,20 @@ def extract_noise_components(realigned_file, mask_file, num_components=5,
|
216 | 216 | """
|
217 | 217 | imgseries = nb.load(realigned_file)
|
218 | 218 | components = None
|
219 |
| - variance_map = np.var(imgseries.get_data(), axis=3).squeeze() |
220 |
| - variance_mask = variance_map > np.percentile(variance_map.flatten(), 98) |
221 |
| - for filename in range(1): #filename_to_list(mask_file): |
222 |
| - #noise_mask = nb.load(filename) |
223 |
| - voxel_timecourses = imgseries.get_data()[np.nonzero(variance_mask)] |
| 219 | + for filename in filename_to_list(mask_file): |
| 220 | + mask = nb.load(filename) |
| 221 | + voxel_timecourses = imgseries.get_data()[np.nonzero(mask)] |
224 | 222 | voxel_timecourses = voxel_timecourses.byteswap().newbyteorder()
|
225 | 223 | voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
|
226 |
| - voxel_timecourses = voxel_timecourses - np.mean(voxel_timecourses, axis=1)[:, None] |
227 |
| - ''' |
228 |
| - from sklearn.decomposition import PCA |
229 |
| - pca = PCA(n_components=num_components).fit(voxel_timecourses) |
| 224 | + # remove mean and normalize by variance |
| 225 | + # voxel_timecourses.shape == [nvoxels, time] |
| 226 | + X = voxel_timecourses.T |
| 227 | + X = (X - np.mean(X, axis=0))/np.std(X, axis=0) |
| 228 | + u, _, _ = sp.linalg.svd(X, full_matrices=False) |
230 | 229 | if components is None:
|
231 |
| - components = pca.components_.T |
| 230 | + components = u[:, :num_components] |
232 | 231 | else:
|
233 |
| - components = np.hstack((components, pca.components_.T)) |
234 |
| - ''' |
235 |
| - #from scipy.stats import zscore |
236 |
| - voxel_timecourses = voxel_timecourses/np.var(voxel_timecourses, axis=1)[:, None] |
237 |
| - C = voxel_timecourses.T.dot(voxel_timecourses) |
238 |
| - _, _, v = sp.linalg.svd(C, full_matrices=False) |
239 |
| - if components is None: |
240 |
| - components = v[:num_components, :].T |
241 |
| - else: |
242 |
| - components = np.hstack((components, v[:num_components, :].T)) |
| 232 | + components = np.hstack((components, u[:, :num_components])) |
243 | 233 | if extra_regressors:
|
244 | 234 | regressors = np.genfromtxt(extra_regressors)
|
245 | 235 | components = np.hstack((components, regressors))
|
@@ -797,6 +787,10 @@ def get_names(files, suffix):
|
797 | 787 | sample2mni.inputs.reference_image = \
|
798 | 788 | os.path.abspath('OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz')
|
799 | 789 | sample2mni.inputs.terminal_output = 'file'
|
| 790 | + sample2mni.inputs.num_threads = 4 |
| 791 | + sample2mni.plugin_args = {'qsub_args': |
| 792 | + '-q max10 -l nodes=1:ppn=%d' % sample2mni.inputs.num_threads, |
| 793 | + 'overwrite': True} |
800 | 794 | wf.connect(bandpass, 'out_file', sample2mni, 'input_image')
|
801 | 795 | wf.connect(merge, 'out', sample2mni, 'transforms')
|
802 | 796 |
|
|
0 commit comments