-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathalzheimerDatabaseWordCounter.py
More file actions
179 lines (143 loc) · 6.27 KB
/
alzheimerDatabaseWordCounter.py
File metadata and controls
179 lines (143 loc) · 6.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
import xml.etree.ElementTree as ET
import re
import csv
import os
import pandas as pd
import numpy as np
import time
import traceback
from tqdm import tqdm
#get list of words
def getWords(text):
#remove everything but words, digits, whitespace, apostrophe, and dash (replace with space)
text = re.sub(r'[^\w\d\s\'-]+', ' ', text)
#lowercase
text = text.lower()
#split into words (removes whitespace)
text=text.split()
#get rid of 's at END of words
text = [re.sub("'s$", '', word) for word in text]
return text
#get brief_summary and detailed_description
def getSectionsWeWant(filepath):
parsed = ET.parse(filepath)
root = parsed.getroot()
sectionsToProcess = []
sectionsToProcess.append(root.findall('brief_summary'))
sectionsToProcess.append(root.findall('detailed_description'))
text = ""
for section in sectionsToProcess:
assert(len(section)==1 or len(section)==0)
if(len(section)==0):continue
section = section[0]
sectionText = section.findall('textblock')
assert(len(sectionText)==1)
sectionText = sectionText[0].text
text += "\n" + sectionText
assert(text!="")
return text
#get a nice word list of the sections we want from an xml file
def preProcessItem(inputItem):
try:
filepath = os.path.join(os.getcwd(),"search_results",inputItem["NCTNum"]+".xml")
text = getSectionsWeWant(filepath)
words = getWords(text)
inputItem["words"] = words
return inputItem
except Exception as e:
print("error while processing")
print(inputItem)
traceback.print_exc()
raise e
def main():
startTime = time.time()
print("Starting")
wordsWeWant = None#["loneliness","depression","anxiety","distress"] #["the","alzheimer","disease","diseases","patient","patients"]
if(wordsWeWant is not None):
wordsWeWant = [wordInList.lower() for wordInList in wordsWeWant] #lowercase
#change directory to current file path
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
#check for old output and create output folder
outputDir = os.path.abspath("./Output")
os.makedirs(outputDir, exist_ok = True)
#read original csv
statuses = dict()
toGet = []
inputFile = os.path.abspath("./SearchResults_1.csv")
inputDF = pd.read_csv(inputFile, engine='python').dropna(how="all")
for index, row in inputDF.iterrows():
yearKeyToLookFor = "Start Date"
year = row[yearKeyToLookFor]
if(type(year) != str):
year = "No "+yearKeyToLookFor
else:
year=year[-4:]
status = row["Status"]
NCTNum = row["NCT Number"]
#prepare output
if status not in statuses:
statuses[status] = dict()
if year not in statuses[status]:
statuses[status][year] = [dict(),0] #[word dict, number of studies for that year]
#prepare input
toGet.append({"year":year,
"status":status,
"NCTNum":NCTNum})
print("Starting file reads, total current runtime:",time.time()-startTime)
#read all the xml files and get word lists
results = [preProcessItem(item) for item in tqdm(toGet,desc= "Reading files and doing some preprocessing", leave=True)]
print("Starting Finalization:",time.time()-startTime)
#count statistics
for study in results:
yearArr = statuses[study["status"]][study["year"]]
yearDict = yearArr[0]
yearArr[1] = yearArr[1]+1
wordsFoundInCurrentStudy = set()
currentWordlist = study["words"]
for word in currentWordlist:
#filter only words we want
if wordsWeWant is not None and (word not in wordsWeWant):
continue
#word already found in year
if word in yearDict:
yearDict[word][0] = yearDict[word][0]+1
else:
yearDict[word] = [1,[]] #[word count, list of percent of word in respective text]
#word not already found in block
#percent of word in respective text
if(word not in wordsFoundInCurrentStudy):
wordsFoundInCurrentStudy.add(word)
percentOfWord = 100.0*currentWordlist.count(word)/len(currentWordlist)
yearDict[word][1].append(percentOfWord)
#format data
colsPerYear = 3
for statusKey,statusVal in statuses.items():
#count max words
maxWords = 0
for yearVal in statusVal.values():
maxWords = max(maxWords,len(yearVal[0]))
shape = (maxWords+2,colsPerYear*len(statusVal))
output =np.full(shape, "", dtype="object", order='C')
sortedYears = sorted(statusVal.items(), key=lambda item: int(item[0]) if item[0].isdecimal() else 99999999)
for yearInd,(yearKey,yearValArr) in enumerate(sortedYears):
yearVal = yearValArr[0]
#titles
col = yearInd*colsPerYear
output[0,col] = yearKey
output[0,col+1] = "Num studies:"+str(yearValArr[1])
output[1,col:col+colsPerYear]=["word","number of mentions","Avg percent of mentions per study"]
#values
sortedWords = sorted(yearVal.items(), key=lambda item: item[1][0],reverse =True)
for row,(wordKey,wordVal) in enumerate(sortedWords):
#for average, divide by number of studies in group (not len(wordVal[1]) since wordVal[1] has no entries of 0%)
avgPercent = str(sum(wordVal[1])/yearValArr[1])
output[row+2,col:col+colsPerYear]=[wordKey,wordVal[0],avgPercent+"%"]
#save the excel sheet with name of status
with open(os.path.join(outputDir,statusKey+'.csv'), 'w', newline='', encoding="utf-8-sig") as fp:
writer = csv.writer(fp, quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(output.tolist())
print("Done:",time.time()-startTime)
if __name__ == "__main__":
main()