Skip to content
This repository was archived by the owner on Jan 27, 2023. It is now read-only.

Commit 6783c20

Browse files
author
Antonio Ulloa
committed
Added code to process auditory output data and to visualize connectivity matrix of LSNM models
1 parent c5fb9ae commit 6783c20

File tree

105 files changed

+16346
-41
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

105 files changed

+16346
-41
lines changed

analysis/compute_BOLD_balloon_model_auditory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,9 +75,9 @@
7575

7676
# define the name of the input files where the synaptic activities are stored
7777
# we have four files: TC-PSL, Tones-PSL, TC-DMS, Tones-DMS
78-
SYN_file_2 = 'output.TC_PSL/synaptic_in_ROI.npy'
78+
SYN_file_4 = 'output.TC_PSL/synaptic_in_ROI.npy'
7979
SYN_file_3 = 'output.Tones_PSL/synaptic_in_ROI.npy'
80-
SYN_file_4 = 'output.TC_DMS/synaptic_in_ROI.npy'
80+
SYN_file_2 = 'output.TC_DMS/synaptic_in_ROI.npy'
8181
SYN_file_1 = 'output.Tones_DMS/synaptic_in_ROI.npy'
8282

8383
# define the name of the output file where the BOLD timeseries will be stored

analysis/compute_NAI_auditory.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,23 @@
5353
# total number of trials in simulated experiment
5454
number_of_trials = 12
5555

56+
# define the names of the input files where the MEG timeseries are contained, for four conditions:
57+
# TC-PSL, Tones-PSL, TC-DMS and Tones-DMS
58+
#MEG_TC_PSL_subj = np.array(['subject_4_with_feedback/output.TC_PSL/meg_source_activity.npy',
59+
# 'subject_original_with_feedback/output.TC_PSL/meg_source_activity.npy'])
60+
#MEG_Tones_PSL_subj = np.array(['subject_4_with_feedback/output.Tones_PSL/meg_source_activity.npy',
61+
# 'subject_original_with_feedback/output.Tones_PSL/meg_source_activity.npy'])
62+
#MEG_TC_DMS_subj = np.array(['subject_4_with_feedback/output.TC_DMS/meg_source_activity.npy',
63+
# 'subject_original_with_feedback/output.TC_DMS/meg_source_activity.npy'])
64+
#MEG_Tones_DMS_subj = np.array(['subject_4_with_feedback/output.Tones_DMS/meg_source_activity.npy',
65+
# 'subject_original_with_feedback/output.Tones_DMS/meg_source_activity.npy'])
66+
5667
# name of the input file where MEG source activity is stored
5768
MEG_source_file = 'meg_source_activity.npy'
5869

70+
# name of the ouput file where the NAI timecourses will be stored
71+
NAI_file = 'NAI_timecourse.npy'
72+
5973
# Load MEG source activity data files
6074
MEG_source_activity = np.load(MEG_source_file)
6175

@@ -120,10 +134,15 @@
120134

121135
print 'Modulation Index:', MI
122136

137+
NAI_timecourse = aud_MEG_source_activity
138+
139+
# now, save all NAI timecourses to a single file
140+
np.save(NAI_file, NAI_timecourse)
141+
123142
# Set up figure to plot MEG source dynamics averaged across trials
124143
fig = plt.figure(1)
125144

126-
plt.suptitle('MEG SOURCE DYNAMICS AVERAGED ACROSS TRIALS')
145+
plt.suptitle('NEURAL ACTIVITY INDEX TIME COURSE AVERAGED ACROSS TRIALS')
127146

128147
# Plot MEG signal
129148
aud_plot_S1 = plt.plot(aud_MEG_source_activity[0], label='S1')

analysis/compute_PSC_auditory.py

Lines changed: 27 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@
8383
num_of_syn_blocks = 1 # we have more synaptic blocks than fmri blocks
8484
# because we get rid of blocks in BOLD timeseries
8585

86-
num_of_subjects = 1
86+
num_of_subjects = 4
8787

8888
scans_removed = 0 # number of scans removed from BOLD computation
8989
synaptic_steps_removed = 0 # number of synaptic steps removed from synaptic
@@ -102,10 +102,25 @@
102102

103103
# define the names of the input files where the BOLD timeseries are contained, for four conditions:
104104
# TC-PSL, Tones-PSL, TC-DMS and Tones-DMS
105-
BOLD_TC_PSL_subj = np.array(['subject_original_with_feedback/output.TC_PSL/lsnm_bold_balloon.npy'])
106-
BOLD_Tones_PSL_subj = np.array(['subject_original_with_feedback/output.Tones_PSL/lsnm_bold_balloon.npy'])
107-
BOLD_TC_DMS_subj = np.array(['subject_original_with_feedback/output.TC_DMS/lsnm_bold_balloon.npy'])
108-
BOLD_Tones_DMS_subj = np.array(['subject_original_with_feedback/output.Tones_DMS/lsnm_bold_balloon.npy'])
105+
BOLD_TC_PSL_subj = np.array(['subject_original_with_feedback/output.TC_PSL/lsnm_bold_balloon.npy',
106+
'subject_4_with_feedback/output.TC_PSL/lsnm_bold_balloon.npy',
107+
'subject_5_with_feedback/output.TC_PSL/lsnm_bold_balloon.npy',
108+
'subject_6_with_feedback/output.TC_PSL/lsnm_bold_balloon.npy'])
109+
110+
BOLD_Tones_PSL_subj = np.array(['subject_original_with_feedback/output.Tones_PSL/lsnm_bold_balloon.npy',
111+
'subject_4_with_feedback/output.Tones_PSL/lsnm_bold_balloon.npy',
112+
'subject_5_with_feedback/output.Tones_PSL/lsnm_bold_balloon.npy',
113+
'subject_6_with_feedback/output.Tones_PSL/lsnm_bold_balloon.npy'])
114+
115+
BOLD_TC_DMS_subj = np.array(['subject_original_with_feedback/output.TC_DMS/lsnm_bold_balloon.npy',
116+
'subject_4_with_feedback/output.TC_DMS/lsnm_bold_balloon.npy',
117+
'subject_5_with_feedback/output.TC_DMS/lsnm_bold_balloon.npy',
118+
'subject_6_with_feedback/output.TC_DMS/lsnm_bold_balloon.npy'])
119+
120+
BOLD_Tones_DMS_subj = np.array(['subject_original_with_feedback/output.Tones_DMS/lsnm_bold_balloon.npy',
121+
'subject_4_with_feedback/output.Tones_DMS/lsnm_bold_balloon.npy',
122+
'subject_5_with_feedback/output.Tones_DMS/lsnm_bold_balloon.npy',
123+
'subject_6_with_feedback/output.Tones_DMS/lsnm_bold_balloon.npy'])
109124

110125
# set matplot lib parameters to produce visually appealing plots
111126
mpl.style.use('ggplot')
@@ -155,10 +170,11 @@
155170
print BOLD_TC_PSL.shape
156171

157172
# Perform time-course normalization by converting each timeseries to percent signal change
158-
# for each subject and for each module (DMS relative to the mean of the PSL condition).
173+
# for each subject and for each module (TC and Tones DMS relative to the mean of the Tones PSL
174+
# condition, as per Husain et al (2004), pp 1711, "Simulating PET and fMRI".
159175
for s in range(0, num_of_subjects):
160176
for m in range(0, num_of_modules):
161-
timecourse_mean = np.mean(BOLD_TC_PSL[s,m])
177+
timecourse_mean = np.mean(BOLD_Tones_DMS[s,m])
162178
BOLD_TC_DMS[s,m] = BOLD_TC_DMS[s,m] / timecourse_mean * 100. - 100.
163179

164180
for s in range(0, num_of_subjects):
@@ -167,14 +183,14 @@
167183
BOLD_Tones_DMS[s,m] = BOLD_Tones_DMS[s,m] / timecourse_mean * 100. - 100.
168184

169185
# now calculate PSC of TC-DMS with respect ot Tones-DMS
170-
BOLD_DMS = BOLD_TC_DMS - BOLD_Tones_DMS
186+
BOLD_DMS = BOLD_TC_DMS #- BOLD_Tones_DMS
171187

172188
mean_PSC_dms = np.zeros((num_of_subjects, num_of_modules))
173189

174-
# now, perform a mean of PSCs of chosen scans
190+
# now, perform a mean of PSCs
175191
for s in range(0, num_of_subjects):
176192
for m in range(0, num_of_modules):
177-
mean_PSC_dms[s,m] = np.mean(BOLD_DMS[s,m,8:17])
193+
mean_PSC_dms[s,m] = np.mean(BOLD_DMS[s,m][5:10])
178194

179195
# now, calculate the average PSC across subjects for each brain region and for each condition:
180196
BOLD_a1_dms_avg = np.mean(mean_PSC_dms[:,0])
@@ -303,7 +319,7 @@
303319

304320
fig, ax = plt.subplots()
305321

306-
#ax.set_ylim([0,3.5])
322+
#ax.set_ylim([0,16])
307323

308324
# now, group the values to be plotted by brain module and by task condition
309325

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
# ============================================================================
2+
#
3+
# PUBLIC DOMAIN NOTICE
4+
#
5+
# National Institute on Deafness and Other Communication Disorders
6+
#
7+
# This software/database is a "United States Government Work" under the
8+
# terms of the United States Copyright Act. It was written as part of
9+
# the author's official duties as a United States Government employee and
10+
# thus cannot be copyrighted. This software/database is freely available
11+
# to the public for use. The NIDCD and the U.S. Government have not placed
12+
# any restriction on its use or reproduction.
13+
#
14+
# Although all reasonable efforts have been taken to ensure the accuracy
15+
# and reliability of the software and data, the NIDCD and the U.S. Government
16+
# do not and cannot warrant the performance or results that may be obtained
17+
# by using this software or data. The NIDCD and the U.S. Government disclaim
18+
# all warranties, express or implied, including warranties of performance,
19+
# merchantability or fitness for any particular purpose.
20+
#
21+
# Please cite the author in any work or product based on this material.
22+
#
23+
# ==========================================================================
24+
25+
26+
27+
# ***************************************************************************
28+
#
29+
# Large-Scale Neural Modeling software (LSNM)
30+
#
31+
# Section on Brain Imaging and Modeling
32+
# Voice, Speech and Language Branch
33+
# National Institute on Deafness and Other Communication Disorders
34+
# National Institutes of Health
35+
#
36+
# This file (compute_avg_NAI_auditory.py) was created on April 19, 2016.
37+
#
38+
#
39+
# Author: Antonio Ulloa. Last updated by Antonio Ulloa on April 19, 2016
40+
# **************************************************************************/
41+
42+
# compute_avg_NAI_auditory.py
43+
#
44+
# Compute average of Neural Activity Index (NAI) of auditory primary cortex using NAI's for a
45+
# a number of subjects (previously stored in a python data file)
46+
47+
import numpy as np
48+
import matplotlib.pyplot as plt
49+
50+
num_of_subjects = 2
51+
NAI_ts_length = 30
52+
53+
# define the names of the input files where the NAI timecourses are contained, for four conditions:
54+
# TC-PSL, Tones-PSL, TC-DMS and Tones-DMS
55+
NAI_TC_PSL_subj = np.array(['subject_4_with_feedback/output.TC_PSL/NAI_timecourse.npy',
56+
'subject_original_with_feedback/output.TC_PSL/NAI_timecourse.npy'])
57+
NAI_Tones_PSL_subj = np.array(['subject_4_with_feedback/output.Tones_PSL/NAI_timecourse.npy',
58+
'subject_original_with_feedback/output.Tones_PSL/NAI_timecourse.npy'])
59+
NAI_TC_DMS_subj = np.array(['subject_4_with_feedback/output.TC_DMS/NAI_timecourse.npy',
60+
'subject_original_with_feedback/output.TC_DMS/NAI_timecourse.npy'])
61+
NAI_Tones_DMS_subj = np.array(['subject_4_with_feedback/output.Tones_DMS/NAI_timecourse.npy',
62+
'subject_original_with_feedback/output.Tones_DMS/NAI_timecourse.npy'])
63+
64+
65+
66+
# Load NAI source activity data files
67+
# open files containing NAI time-series
68+
NAI_TC_PSL = np.zeros((num_of_subjects, 2, NAI_ts_length))
69+
NAI_Tones_PSL = np.zeros((num_of_subjects, 2, NAI_ts_length))
70+
NAI_TC_DMS = np.zeros((num_of_subjects, 2, NAI_ts_length))
71+
NAI_Tones_DMS = np.zeros((num_of_subjects, 2, NAI_ts_length))
72+
for idx in range(0, num_of_subjects):
73+
NAI_TC_PSL[idx] = np.load(NAI_TC_PSL_subj[idx])
74+
NAI_Tones_PSL[idx] = np.load(NAI_Tones_PSL_subj[idx])
75+
NAI_TC_DMS[idx] = np.load(NAI_TC_DMS_subj[idx])
76+
NAI_Tones_DMS[idx] = np.load(NAI_Tones_DMS_subj[idx])
77+
78+
# average NAI across subjects
79+
for idx in range(0, num_of_subjects):
80+
NAI_TC_PSL_avg = np.mean(NAI_TC_PSL, axis=0)
81+
NAI_Tones_PSL_avg = np.mean(NAI_Tones_PSL, axis=0)
82+
NAI_TC_DMS_avg = np.mean(NAI_TC_DMS, axis=0)
83+
NAI_Tones_DMS_avg = np.mean(NAI_Tones_DMS, axis=0)
84+
85+
86+
87+
# plot the average NAI's
88+
89+
# Set up figure to plot MEG source dynamics averaged across trials
90+
fig = plt.figure(1)
91+
92+
plt.suptitle('NAI average timecourse during TC DMS')
93+
94+
# Plot NAI
95+
aud_plot_S1 = plt.plot(NAI_TC_DMS_avg[0], label='S1')
96+
aud_plot_S2 = plt.plot(NAI_TC_DMS_avg[1], label='S2')
97+
98+
plt.legend()
99+
100+
# Set up figure to plot MEG source dynamics averaged across trials
101+
fig = plt.figure(2)
102+
103+
plt.suptitle('NAI average timecourse during Tones DMS')
104+
105+
# Plot NAI
106+
aud_plot_S1 = plt.plot(NAI_Tones_DMS_avg[0], label='S1')
107+
aud_plot_S2 = plt.plot(NAI_Tones_DMS_avg[1], label='S2')
108+
109+
plt.legend()
110+
# Set up figure to plot MEG source dynamics averaged across trials
111+
fig = plt.figure(3)
112+
113+
plt.suptitle('NAI average timecourse during TC PSL')
114+
115+
# Plot NAI
116+
aud_plot_S1 = plt.plot(NAI_TC_PSL_avg[0], label='S1')
117+
aud_plot_S2 = plt.plot(NAI_TC_PSL_avg[1], label='S2')
118+
119+
plt.legend()
120+
# Set up figure to plot MEG source dynamics averaged across trials
121+
fig = plt.figure(4)
122+
123+
plt.suptitle('NAI average timecourse during Tones PSL')
124+
125+
# Plot NAI
126+
aud_plot_S1 = plt.plot(NAI_Tones_PSL_avg[0], label='S1')
127+
aud_plot_S2 = plt.plot(NAI_Tones_PSL_avg[1], label='S2')
128+
129+
plt.legend()
130+
131+
# Show the plot on the screen
132+
plt.show()

analysis/compute_syn_auditory.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -116,15 +116,15 @@
116116

117117
# add all units WITHIN each region together across space to calculate
118118
# synaptic activity in EACH brain region
119-
a1_syn = np.sum(ea1u + ea1d + ia1u + ia1d, axis = 1) + np.sum(tvb_ea1+tvb_ia1, axis=1)
120-
a2_syn = np.sum(ea2u + ea2c + ea2d + ia2u + ia2c + ia2d, axis = 1) + np.sum(tvb_ea2+tvb_ia2, axis=1)
121-
st_syn = np.sum(estg + istg, axis = 1) + np.sum(tvb_est+tvb_ist, axis=1)
119+
a1_syn = np.sum(ea1u + ea1d + ia1u + ia1d, axis = 1) #+ np.sum(tvb_ea1+tvb_ia1, axis=1)
120+
a2_syn = np.sum(ea2u + ea2c + ea2d + ia2u + ia2c + ia2d, axis = 1) #+ np.sum(tvb_ea2+tvb_ia2, axis=1)
121+
st_syn = np.sum(estg + istg, axis = 1) #+ np.sum(tvb_est+tvb_ist, axis=1)
122122
d1_syn = np.sum(efd1 + ifd1, axis = 1)
123123
d2_syn = np.sum(efd2 + ifd2, axis = 1)
124124
fs_syn = np.sum(exfs + infs, axis = 1)
125125
fr_syn = np.sum(exfr + infr, axis = 1)
126126

127-
pf_syn = d1_syn + d2_syn + fs_syn + fr_syn + np.sum(tvb_epf+tvb_ipf, axis=1)
127+
pf_syn = d1_syn + d2_syn + fs_syn + fr_syn #+ np.sum(tvb_epf+tvb_ipf, axis=1)
128128

129129
# get rid of the first time point ('zero point') bc it could skew correlations later
130130
#v1_syn[0] = v1_syn[1]
@@ -136,16 +136,6 @@
136136
#fr_syn[0] = fr_syn[1]
137137
#lit_syn[0] = lit_syn[1]
138138

139-
# ...and normalize the synaptic activities of each module (convert to percentage signal change)
140-
#v1_syn = v1_syn / np.mean(v1_syn) * 100. - 100.
141-
#v4_syn = v4_syn / np.mean(v4_syn) * 100. - 100.
142-
#it_syn = it_syn / np.mean(it_syn) * 100. - 100.
143-
#d1_syn = d1_syn / np.mean(d1_syn) * 100. - 100.
144-
#d2_syn = d2_syn / np.mean(d2_syn) * 100. - 100.
145-
#fs_syn = fs_syn / np.mean(fs_syn) * 100. - 100.
146-
#fr_syn = fr_syn / np.mean(fr_syn) * 100. - 100.
147-
#lit_syn= lit_syn/ np.mean(lit_syn)* 100. - 100.
148-
149139
# create a numpy array of timeseries
150140
synaptic = np.array([a1_syn, a2_syn, st_syn, pf_syn])
151141

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Simulation Start Time: Sun Apr 17 13:21:45 2016
2+
Simulation End Time: Sun Apr 17 15:43:14 2016
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
Simulation Start Time: Sun Apr 17 10:57:11 2016
2-
Simulation End Time: Sun Apr 17 11:02:08 2016
1+
Simulation Start Time: Sun Apr 17 11:13:38 2016
2+
Simulation End Time: Sun Apr 17 13:07:49 2016
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Simulation Start Time: Sun Apr 17 16:03:14 2016
2+
Simulation End Time: Sun Apr 17 18:07:24 2016

auditory_model/subject_3_with_feedback/neuralnet.json

Lines changed: 1 addition & 0 deletions
Large diffs are not rendered by default.
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
Simulation Start Time: Sun Apr 17 18:12:33 2016
2+
Simulation End Time: Sun Apr 17 20:20:30 2016

0 commit comments

Comments
 (0)