Skip to content
This repository was archived by the owner on Jan 27, 2023. It is now read-only.

Commit 6e6f505

Browse files
author
Antonio Ulloa
committed
Added tables that describe location of LSNM modules within TVB connectome
1 parent 8215aae commit 6e6f505

File tree

6 files changed

+196
-5
lines changed

6 files changed

+196
-5
lines changed

analysis/compute_BOLD_balloon_66_regions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@
8282
from matplotlib import cm as CM
8383

8484
# define the name of the input file where the synaptic activities are stored
85-
SYN_file = 'synaptic_in_TVB_ROI.npy'
85+
SYN_file = 'synaptic_in_66_ROIs.npy'
8686

8787
# define the name of the output file where the BOLD timeseries will be stored
8888
BOLD_file = 'bold_balloon_66_regions.npy'
Lines changed: 191 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,191 @@
1+
# ============================================================================
2+
#
3+
# PUBLIC DOMAIN NOTICE
4+
#
5+
# National Institute on Deafness and Other Communication Disorders
6+
#
7+
# This software/database is a "United States Government Work" under the
8+
# terms of the United States Copyright Act. It was written as part of
9+
# the author's official duties as a United States Government employee and
10+
# thus cannot be copyrighted. This software/database is freely available
11+
# to the public for use. The NIDCD and the U.S. Government have not placed
12+
# any restriction on its use or reproduction.
13+
#
14+
# Although all reasonable efforts have been taken to ensure the accuracy
15+
# and reliability of the software and data, the NIDCD and the U.S. Government
16+
# do not and cannot warrant the performance or results that may be obtained
17+
# by using this software or data. The NIDCD and the U.S. Government disclaim
18+
# all warranties, express or implied, including warranties of performance,
19+
# merchantability or fitness for any particular purpose.
20+
#
21+
# Please cite the author in any work or product based on this material.
22+
#
23+
# ==========================================================================
24+
25+
26+
27+
# ***************************************************************************
28+
#
29+
# Large-Scale Neural Modeling software (LSNM)
30+
#
31+
# Section on Brain Imaging and Modeling
32+
# Voice, Speech and Language Branch
33+
# National Institute on Deafness and Other Communication Disorders
34+
# National Institutes of Health
35+
#
36+
# This file (compute_syn_visual_66_regions.py) was created on October 11, 2016.
37+
#
38+
#
39+
# Author: Antonio Ulloa
40+
#
41+
# Last updated by Antonio Ulloa on October 11, 2016
42+
#
43+
# Based on computer code originally developed by Barry Horwitz et al
44+
# **************************************************************************/
45+
46+
# compute_syn_visual_66_regions.py
47+
#
48+
# Calculate and plot simulated synaptic activities in 33 ROIs (right hemisphere)
49+
# as defined by Haggman et al
50+
#
51+
# ... using data from visual delay-match-to-sample simulation (or resting state simulation
52+
# of the same duration as the DMS).
53+
# It also saves the synaptic activities for 33 ROIs (right hemisphere) in a python data file
54+
# (*.npy)
55+
# The data is saved in a numpy array where the columns are the 33 ROIs:
56+
#
57+
58+
import numpy as np
59+
60+
import matplotlib.pyplot as plt
61+
62+
import matplotlib as mpl
63+
64+
# set matplot lib parameters to produce visually appealing plots
65+
mpl.style.use('ggplot')
66+
67+
# define the name of the output file where the integrated synaptic activity will be stored
68+
syn_file = 'synaptic_in_66_ROIs.npy'
69+
70+
# the following ranges define the location of the nodes within a given ROI in Hagmann's brain.
71+
# They were taken from the excel document:
72+
# "Hagmann's Talairach Coordinates (obtained from TVB).xlsx"
73+
# Extracted from The Virtual Brain Demo Data Sets
74+
# Please note that arrays in Python start from zero so one does need to account for that and shift
75+
# indices given by the above document by one location.
76+
# Use 6 nodes within rPCAL including host node 345
77+
v1_loc = range(344, 350) # Hagmann's brain nodes included within V1 ROI
78+
79+
# Use 6 nodes within rFUS including host node 393
80+
v4_loc = range(390, 396) # Hagmann's brain nodes included within V4 ROI
81+
82+
# Use 6 nodes within rPARH including host node 413
83+
it_loc = range(412, 418) # Hagmann's brain nodes included within IT ROI
84+
85+
# Use 6 nodes within rRMF including host node 74
86+
d1_loc = range(73, 79) # Hagmann's brain nodes included within D1 ROI
87+
88+
# Use 6 nodes within rPTRI including host node 41
89+
d2_loc = range(39, 45) # Hagmann's brain nodes included within D2 ROI
90+
91+
# Use 6 nodes within rPOPE including host node 47
92+
fs_loc = range(47, 53) # Hagmann's brain nodes included within FS ROI
93+
94+
# Use 6 nodes within rCMF including host node 125
95+
fr_loc = range(125, 131) # Hagmann's brain nodes included within FR ROI
96+
97+
# Use 6 nodes within lPARH
98+
lit_loc= range(911, 917) # Hagmann's brain nodes included within left IT ROI
99+
100+
# Load TVB nodes synaptic activity
101+
tvb_synaptic = np.load("tvb_abs_syn.npy")
102+
103+
# Load TVB host node synaptic activities into separate numpy arrays
104+
tvb_ev1 = tvb_synaptic[:, 0, v1_loc[0]:v1_loc[-1]+1, 0]
105+
tvb_ev4 = tvb_synaptic[:, 0, v4_loc[0]:v4_loc[-1]+1, 0]
106+
tvb_eit = tvb_synaptic[:, 0, it_loc[0]:it_loc[-1]+1, 0]
107+
tvb_ed1 = tvb_synaptic[:, 0, d1_loc[0]:d1_loc[-1]+1, 0]
108+
tvb_ed2 = tvb_synaptic[:, 0, d2_loc[0]:d2_loc[-1]+1, 0]
109+
tvb_efs = tvb_synaptic[:, 0, fs_loc[0]:fs_loc[-1]+1, 0]
110+
tvb_efr = tvb_synaptic[:, 0, fr_loc[0]:fr_loc[-1]+1, 0]
111+
tvb_iv1 = tvb_synaptic[:, 1, v1_loc[0]:v1_loc[-1]+1, 0]
112+
tvb_iv4 = tvb_synaptic[:, 1, v4_loc[0]:v4_loc[-1]+1, 0]
113+
tvb_iit = tvb_synaptic[:, 1, it_loc[0]:it_loc[-1]+1, 0]
114+
tvb_id1 = tvb_synaptic[:, 1, d1_loc[0]:d1_loc[-1]+1, 0]
115+
tvb_id2 = tvb_synaptic[:, 1, d2_loc[0]:d2_loc[-1]+1, 0]
116+
tvb_ifs = tvb_synaptic[:, 1, fs_loc[0]:fs_loc[-1]+1, 0]
117+
tvb_ifr = tvb_synaptic[:, 1, fr_loc[0]:fr_loc[-1]+1, 0]
118+
119+
# now extract synaptic activity in the contralateral IT
120+
tvb_elit = tvb_synaptic[:, 0, lit_loc[0]:lit_loc[-1]+1, 0]
121+
tvb_ilit = tvb_synaptic[:, 1, lit_loc[0]:lit_loc[-1]+1, 0]
122+
123+
# add all units WITHIN each region together across space to calculate
124+
# synaptic activity in EACH brain region
125+
v1_syn = np.sum(tvb_ev1+tvb_iv1, axis=1)
126+
v4_syn = np.sum(tvb_ev4+tvb_iv4, axis=1)
127+
it_syn = np.sum(tvb_eit+tvb_iit, axis=1)
128+
d1_syn = np.sum(tvb_ed1+tvb_id1, axis=1)
129+
d2_syn = np.sum(tvb_ed2+tvb_id2, axis=1)
130+
fs_syn = np.sum(tvb_efs+tvb_ifs, axis=1)
131+
fr_syn = np.sum(tvb_efr+tvb_ifr, axis=1)
132+
133+
# now, add unit across space in the contralateral IT
134+
lit_syn = np.sum(tvb_elit + tvb_ilit, axis=1)
135+
136+
# create a numpy array of timeseries
137+
synaptic = np.array([v1_syn, v4_syn, it_syn, fs_syn, d1_syn, d2_syn, fr_syn, lit_syn])
138+
139+
# now, save all synaptic timeseries to a single file
140+
np.save(syn_file, synaptic)
141+
142+
# Extract number of timesteps from one of the matrices
143+
timesteps = v1_syn.shape[0]
144+
print 'Timesteps = ', timesteps
145+
146+
# Construct a numpy array of timesteps (data points provided in data file)
147+
# to convert from timesteps to time in seconds we do the following:
148+
# Each simulation time-step equals 5 milliseconds
149+
# However, we are recording only once every 10 time-steps
150+
# Therefore, each data point in the output files represents 50 milliseconds.
151+
# Thus, we need to multiply the datapoint times 50 ms...
152+
# ... and divide by 1000 to convert to seconds
153+
#t = np.linspace(0, 659*50./1000., num=660)
154+
t = np.linspace(0, timesteps * 50.0 / 1000., num=timesteps)
155+
156+
157+
# Set up figures to plot synaptic activity
158+
plt.figure()
159+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN V1')
160+
plt.plot(t, v1_syn)
161+
# Set up figures to plot synaptic activity
162+
plt.figure()
163+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN V4')
164+
plt.plot(v4_syn)
165+
# Set up figures to plot synaptic activity
166+
plt.figure()
167+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN IT')
168+
plt.plot(it_syn)
169+
# Set up figures to plot synaptic activity
170+
plt.figure()
171+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN FS')
172+
plt.plot(fs_syn)
173+
# Set up figures to plot synaptic activity
174+
plt.figure()
175+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN D1')
176+
plt.plot(d1_syn)
177+
# Set up figures to plot synaptic activity
178+
plt.figure()
179+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN D2')
180+
plt.plot(d2_syn)
181+
# Set up figures to plot synaptic activity
182+
plt.figure()
183+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN FR')
184+
plt.plot(fr_syn)
185+
# Set up figures to plot synaptic activity
186+
plt.figure()
187+
plt.suptitle('SIMULATED SYNAPTIC ACTIVITY IN LEFT IT')
188+
plt.plot(lit_syn)
189+
190+
# Show the plots on the screen
191+
plt.show()

analysis/compute_syn_visual_TVB.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@
105105
lit_loc= range(911, 917) # Hagmann's brain nodes included within left IT ROI
106106

107107
# Load TVB nodes synaptic activity
108-
tvb_synaptic = np.load("tvb_synaptic.npy")
108+
tvb_synaptic = np.load("tvb_abs_syn.npy")
109109

110110
# Load TVB host node synaptic activities into separate numpy arrays
111111
tvb_ev1 = tvb_synaptic[:, 0, v1_loc[0]:v1_loc[-1]+1, 0]
Binary file not shown.
Binary file not shown.
Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
* Simulation Start Time: Tue Oct 4 14:46:21 2016
2-
* Simulation End Time: Tue Oct 4 19:21:31 2016
1+
* Simulation Start Time: Wed Oct 5 14:17:40 2016
2+
* Simulation End Time: Thu Oct 6 14:03:51 2016
33
* Simulation duration (timesteps): 39600
44
* Model description used: /Users/Antonio/Documents/NEURALBYTES/NIHPROJECT2/lsnm_in_python/visual_model/model.txt
55
* Weights list used: /Users/Antonio/Documents/NEURALBYTES/NIHPROJECT2/lsnm_in_python/visual_model/subject_12/weightslist.txt
66
* Neural net used:
77
* Simulation script used: /Users/Antonio/Documents/NEURALBYTES/NIHPROJECT2/lsnm_in_python/visual_model/script_resting_state_198_seconds.py
88
* Were weights changed to generate a new subject? NO
9-
* Was TVB Connectome used in the simulation? NO
9+
* Was TVB Connectome used in the simulation? YES

0 commit comments

Comments
 (0)