|
| 1 | +# ============================================================================ |
| 2 | +# |
| 3 | +# PUBLIC DOMAIN NOTICE |
| 4 | +# |
| 5 | +# National Institute on Deafness and Other Communication Disorders |
| 6 | +# |
| 7 | +# This software/database is a "United States Government Work" under the |
| 8 | +# terms of the United States Copyright Act. It was written as part of |
| 9 | +# the author's official duties as a United States Government employee and |
| 10 | +# thus cannot be copyrighted. This software/database is freely available |
| 11 | +# to the public for use. The NIDCD and the U.S. Government have not placed |
| 12 | +# any restriction on its use or reproduction. |
| 13 | +# |
| 14 | +# Although all reasonable efforts have been taken to ensure the accuracy |
| 15 | +# and reliability of the software and data, the NIDCD and the U.S. Government |
| 16 | +# do not and cannot warrant the performance or results that may be obtained |
| 17 | +# by using this software or data. The NIDCD and the U.S. Government disclaim |
| 18 | +# all warranties, express or implied, including warranties of performance, |
| 19 | +# merchantability or fitness for any particular purpose. |
| 20 | +# |
| 21 | +# Please cite the author in any work or product based on this material. |
| 22 | +# |
| 23 | +# ========================================================================== |
| 24 | + |
| 25 | + |
| 26 | + |
| 27 | +# *************************************************************************** |
| 28 | +# |
| 29 | +# Large-Scale Neural Modeling software (LSNM) |
| 30 | +# |
| 31 | +# Section on Brain Imaging and Modeling |
| 32 | +# Voice, Speech and Language Branch |
| 33 | +# National Institute on Deafness and Other Communication Disorders |
| 34 | +# National Institutes of Health |
| 35 | +# |
| 36 | +# This file (compute_NAI_MI_auditory.py) was created on March 22, 2016. |
| 37 | +# |
| 38 | +# |
| 39 | +# Author: Antonio Ulloa. Last updated by Antonio Ulloa on March 22, 2016 |
| 40 | +# **************************************************************************/ |
| 41 | + |
| 42 | +# compute_NAI_MI_auditory.py |
| 43 | +# |
| 44 | +# Compute Neural Activity Index (NAI) of auditory primary cortex using MEG source activity |
| 45 | +# of A1 and A2 (previously stored in a python data file). Store the NAI timecourse in an npy file |
| 46 | +# for using it later. |
| 47 | +# Compute also the Modulation Index (MI) across trials, MI across match trials, and MI across |
| 48 | +# nonmatch trials and store the results in an npy file that containt a numpy array with those 3 MIs |
| 49 | + |
| 50 | +import numpy as np |
| 51 | +import matplotlib.pyplot as plt |
| 52 | + |
| 53 | +# length of the array that contains simulated experiment (in synaptic timesteps) |
| 54 | +synaptic_length = 960 |
| 55 | + |
| 56 | +# total number of trials in simulated experiment |
| 57 | +number_of_trials = 12 |
| 58 | + |
| 59 | +# name of the input file where MEG source activity is stored |
| 60 | +MEG_source_file = 'meg_source_activity.npy' |
| 61 | + |
| 62 | +# name of the ouput file where the average NAI timecourses will be stored (for given subject and for given |
| 63 | +# condition) |
| 64 | +NAI_file = 'NAI_timecourse.npy' |
| 65 | + |
| 66 | +# name of the output file where the Modulation Index will be stored (for given subject and for given |
| 67 | +# condition). The MIs stored will be 3 numbers, one for match, one for non-match, and one across both |
| 68 | +# conditions |
| 69 | +MI_file = "modulation_index.npy" |
| 70 | + |
| 71 | +# Load MEG source activity data files |
| 72 | +MEG_source_activity = np.load(MEG_source_file) |
| 73 | + |
| 74 | +# Extract number of timesteps from one of the matrices |
| 75 | +timesteps = MEG_source_activity.shape[1] |
| 76 | + |
| 77 | +print "\rNumber of timesteps in MEG input file: ", timesteps |
| 78 | + |
| 79 | +# Extract MEG source activity for A1 and A2 from synaptic array |
| 80 | +a1_MEG_source = MEG_source_activity[0] |
| 81 | +a2_MEG_source = MEG_source_activity[1] |
| 82 | + |
| 83 | +# Combine a1 and a2 arrays into a single one |
| 84 | +aud_MEG_source = a1_MEG_source + a2_MEG_source |
| 85 | + |
| 86 | +# format the MEG source activity to eliminate timesteps not part of the experiment |
| 87 | +aud_MEG_source = aud_MEG_source[1:synaptic_length+1] |
| 88 | + |
| 89 | +# divide MEG source arrays into subarrays, each subarray containing exactly one experimental trial |
| 90 | +aud_MEG_source_trials = np.split(aud_MEG_source, number_of_trials) |
| 91 | + |
| 92 | +# compute average of MEG activities across trials |
| 93 | +#a1_MEG_source_avg = np.mean(a1_MEG_source_trials, axis=0) |
| 94 | +#a2_MEG_source_avg = np.mean(a2_MEG_source_trials, axis=0) |
| 95 | + |
| 96 | +# also compute average of MEG activities for match and non-match separately, across trials |
| 97 | +# to extract match trials, we start with item 0 and extract every other element |
| 98 | +# to extract non-match trials, we start with item 1 and extract every other element |
| 99 | +#a1_MEG_source_match_avg = np.mean(a1_MEG_source_trials[0::2], axis=0) |
| 100 | +#a1_MEG_source_nonmatch_avg = np.mean(a1_MEG_source_trials[1::2], axis=0) |
| 101 | +#a2_MEG_source_match_avg = np.mean(a2_MEG_source_trials[0::2], axis=0) |
| 102 | +#a2_MEG_source_nonmatch_avg = np.mean(a2_MEG_source_trials[1::2], axis=0) |
| 103 | + |
| 104 | +# combine MEG source activity of A1 and A2 together |
| 105 | +#aud_MEG_source_activity = np.sum([a1_MEG_source_avg, a2_MEG_source_avg], axis=0) |
| 106 | +#aud_MEG_source_activity_match = np.sum([a1_MEG_source_match_avg, a2_MEG_source_match_avg], axis=0) |
| 107 | +#aud_MEG_source_activity_nonmatch = np.sum([a1_MEG_source_nonmatch_avg, a2_MEG_source_nonmatch_avg], axis=0) |
| 108 | + |
| 109 | +#print "\rNumber of timesteps in each trial: ", aud_MEG_source_trials |
| 110 | + |
| 111 | +# split average trial into S1 and S2 presentation, given that each trial is 80 synaptic timesteps long and it |
| 112 | +# is divided up as follows: |
| 113 | +# 1-20: Prior to S1 presentation |
| 114 | +# 21-30: S1 presentation |
| 115 | +# 31-50: Delay period |
| 116 | +# 51:60: S2 presentation |
| 117 | +# 61:80: Post stimulus presentation |
| 118 | +# get rid of 10 timesteps at beginning of each trial and 10 timesteps |
| 119 | +# at the end of each trial |
| 120 | +for trial in range(0, number_of_trials): |
| 121 | + aud_MEG_source_trials[trial] = aud_MEG_source_trials[trial][10:70] |
| 122 | +#aud_MEG_source_activity_match = aud_MEG_source_activity_match[10:70] |
| 123 | +#aud_MEG_source_activity_nonmatch = aud_MEG_source_activity_nonmatch[10:70] |
| 124 | + |
| 125 | +# split up the array in two equal parts to obtain auditory response to S1 and S2 separately. |
| 126 | +# We therefore end up with NAI-S1 and NAI-S2 of 30 timesteps each. |
| 127 | +# Which means that we each NAI is divided contains: |
| 128 | +# (a) 10 ts prior to S1/S2 onset, |
| 129 | +# (b) 10 ts for S1 / S2, and |
| 130 | +# (c) 10 ts after S1/S2 presentation |
| 131 | +for trial in range(0, number_of_trials): |
| 132 | + aud_MEG_source_trials[trial] = np.split(aud_MEG_source_trials[trial], 2) |
| 133 | +#aud_MEG_source_activity_match = np.split(aud_MEG_source_activity_match, 2) |
| 134 | +#aud_MEG_source_activity_nonmatch = np.split(aud_MEG_source_activity_nonmatch, 2) |
| 135 | + |
| 136 | +# Now, we are going to calculate an AER (Auditory Evoked Response) due to S1 and S2: |
| 137 | +# (1) Normalize NAI activity by subtracting average baseline NAI activity: |
| 138 | +normalized_NAI = np.zeros((number_of_trials, 2, 10)) |
| 139 | +for trial in range(0, number_of_trials): |
| 140 | + normalized_NAI[trial][0] = aud_MEG_source_trials[trial][0][10:20] - np.mean(aud_MEG_source_trials[trial][0][0:9]) |
| 141 | + normalized_NAI[trial][1] = aud_MEG_source_trials[trial][1][10:20] - np.mean(aud_MEG_source_trials[trial][0][0:9]) |
| 142 | +#normalized_NAI_S1_match = aud_MEG_source_activity_match[0][10:20] - np.mean(aud_MEG_source_activity_match[0][0:9]) |
| 143 | +#normalized_NAI_S2_match = aud_MEG_source_activity_match[1][10:20] - np.mean(aud_MEG_source_activity_match[0][0:9]) |
| 144 | +#normalized_NAI_S1_nonmatch = aud_MEG_source_activity_nonmatch[0][10:20] - np.mean(aud_MEG_source_activity_nonmatch[0][0:9]) |
| 145 | +#normalized_NAI_S2_nonmatch = aud_MEG_source_activity_nonmatch[1][10:20] - np.mean(aud_MEG_source_activity_nonmatch[0][0:9]) |
| 146 | +# (1) integrate the NAI in a 350-ms window (stimulus presentation duration): |
| 147 | +integrated_NAI = np.zeros((number_of_trials, 2)) |
| 148 | +for trial in range(0, number_of_trials): |
| 149 | + integrated_NAI[trial][0] = np.trapz(normalized_NAI[trial][0]) |
| 150 | + integrated_NAI[trial][1] = np.trapz(normalized_NAI[trial][1]) |
| 151 | +#integrated_NAI_S1_match = np.trapz(normalized_NAI_S1_match) |
| 152 | +#integrated_NAI_S2_match = np.trapz(normalized_NAI_S2_match) |
| 153 | +#integrated_NAI_S1_nonmatch = np.trapz(normalized_NAI_S1_nonmatch) |
| 154 | +#integrated_NAI_S2_nonmatch = np.trapz(normalized_NAI_S2_nonmatch) |
| 155 | +# (2) normalize the integrated NAI by subtracting average baseline NAI activity: |
| 156 | +AER_S1 = integrated_NAI[:, 0] |
| 157 | +AER_S2 = integrated_NAI[:, 1] |
| 158 | +#AER_S1_match = integrated_NAI_S1_match |
| 159 | +#AER_S2_match = integrated_NAI_S2_match |
| 160 | +#AER_S1_nonmatch = integrated_NAI_S1_nonmatch |
| 161 | +#AER_S2_nonmatch = integrated_NAI_S2_nonmatch |
| 162 | + |
| 163 | +# Now, we are going to calculate modulation index (MI): |
| 164 | +MI = ((AER_S1 - AER_S2) / (AER_S1 + AER_S2)) * 100. |
| 165 | +#MI_match = ((AER_S1_match - AER_S2_match) / (AER_S1_match + AER_S2_match)) * 100. |
| 166 | +#MI_nonmatch = ((AER_S1_nonmatch - AER_S2_nonmatch) / (AER_S1_nonmatch + AER_S2_nonmatch)) * 100. |
| 167 | + |
| 168 | +print '\rModulation Index per trial: ', MI |
| 169 | +#print '\rModulation Index across match trials: ', MI_match |
| 170 | +#print '\rModulation Index across nonmatch trials: ', MI_nonmatch |
| 171 | + |
| 172 | +NAI_timecourse = np.mean(aud_MEG_source_trials, axis=0) |
| 173 | + |
| 174 | +#MI_array = np.array([MI, MI_match, MI_nonmatch]) |
| 175 | + |
| 176 | +# now, save all NAI timecourses to a single file |
| 177 | +np.save(NAI_file, NAI_timecourse) |
| 178 | + |
| 179 | +# also, save MI across all trials, MI across match trials, and MI across nonmatch trials |
| 180 | +np.save(MI_file, MI) |
| 181 | + |
| 182 | +# Set up figure to plot MEG source dynamics averaged across trials |
| 183 | +fig = plt.figure(1) |
| 184 | + |
| 185 | +plt.suptitle('NEURAL ACTIVITY INDEX TIME COURSE AVERAGED ACROSS TRIALS') |
| 186 | + |
| 187 | +# Plot MEG signal |
| 188 | +aud_plot_S1 = plt.plot(NAI_timecourse[0], label='S1') |
| 189 | +aud_plot_S2 = plt.plot(NAI_timecourse[1], label='S2') |
| 190 | + |
| 191 | +#plt.ylim(75, 120) |
| 192 | + |
| 193 | +plt.legend() |
| 194 | + |
| 195 | +# Show the plot on the screen |
| 196 | +plt.show() |
| 197 | + |
0 commit comments