-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparam_file.py
More file actions
239 lines (165 loc) · 9.79 KB
/
param_file.py
File metadata and controls
239 lines (165 loc) · 9.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
####################################
# EXTRACT and create SENTENCES
####################################
NF = '/home/tek/work/OoOM/ndl/necessary_files' # necessary files that are user defined
TOP = '/home/tek/work/OoOM/ndl/test_location/data_preparation' #the top level directory to place files created by the process in
WD_EXTRACT = TOP + "/extract/" # directory to store the results of this step
TAGGED_FILE = '/home/tek/work/OoOM/ndl/necessary_files/BNC.tagged.supershort' # should be a .txt file
RESULTS = WD_EXTRACT + "extracted_sentences" # this should be a csv file
RESULTS_O_SENS = WD_EXTRACT + "o_sens_extracted_sentences" # this should be a csv file
EXTRACT_SENTENCES_DIRS = [WD_EXTRACT] # main directory in which the results of this step are stored
EXTRACT_SENTENCES_FILES = [TAGGED_FILE, RESULTS] # list of files that need to be created (potentially with sub-directories)
#--------------------------------------------------------
######################################
# ANNOTATE TENSES
######################################
WD_ANNOTATE = TOP + "/annotate_complex_sentences/" # directory to store the results of this step
SENTS = RESULTS
TENSES_ANNOTATED_NOINF_CLEAN = WD_ANNOTATE + "tenses_annotated_noinf_clean" #csv
ANNOTATE_DIRS = [WD_ANNOTATE]
ANNOTATE_FILES = [SENTS, TENSES_ANNOTATED_NOINF_CLEAN]
#--------------------------------------------------------
######################################
# PREPARE DATA
######################################
WD_PREPDAT = TOP + '/prepare_events/' # directory to store the results of this step
### Define file paths
TENSES = TENSES_ANNOTATED_NOINF_CLEAN
TENSES_ONE_SENT_PER_VERB_WITH_MODALS = WD_PREPDAT + "annotated_one_sent_per_verb_with_modals" #.csv
TENSES_ONE_SENT_PER_VERB_SHUF_GZ = WD_PREPDAT + "annotated_one_sent_per_verb_randomised" #.csv.gz
#TENSES_ONE_VERB = WD_PREPDAT + "/tenses_annotated_oneverb.csv"
#TENSES_ONE_VERB_READY_GZ = WD_PREPDAT + "/tenses_annotated_oneverb_ready.csv.gz"
#TENSES_ONE_VERB_SHUF_GZ = WD_PREPDAT + "/tenses_annotated_oneverb_shuffeled.csv.gz"
AE2BE_LIST = NF + "/List_AE2BE.csv" #this file has to be created or obtained from the repository
INFINITIVE_CORR_LIST = NF + "/Infinitive_corrections_freq10" #.csv file
PREPDAT_DIRS = [WD_PREPDAT]
PREPDAT_FILES = [TENSES,TENSES_ONE_SENT_PER_VERB_WITH_MODALS,TENSES_ONE_SENT_PER_VERB_SHUF_GZ,
AE2BE_LIST, INFINITIVE_CORR_LIST]
#--------------------------------------------------------
######################################
# PREPARE TRAIN VALID TEST
######################################
### Define file paths
WD_PREPTRAIN = TOP + "/prep_train"
TENSES_TRAIN_GZ = WD_PREPTRAIN + "/tenses_one_sent_per_verb_train" # will be saved as csv.gz, the file path to the training set
TENSES_VALID_GZ = WD_PREPTRAIN + "/tenses_one_sent_per_verb_valid" # will be saved as csv.gz, the file path to the validation set
TENSES_TEST_GZ = WD_PREPTRAIN + "/tenses_one_sent_per_verb_test" # will be saved as csv.gz, the file path to the test set
PROP_VALID = 1/20 # proportion of validation data
PROP_TEST = 1/20 # proportion of test data
#TENSES_ONE_VERB_TRAIN_GZ = WD_PREPTRAIN + "/tenses_oneverb_train.csv.gz"
#TENSES_ONE_VERB_VALID_GZ = WD_PREPTRAIN + "/tenses_oneverb_valid.csv.gz"
#TENSES_ONE_VERB_TEST_GZ = WD_PREPTRAIN + "/tenses_oneverb_test.csv.gz"
# n-gram based event files ready for training NDL (verb infinitives included as cues)
NGRAM_EVENTS_MULTI_VERBS_TRAIN = WD_PREPTRAIN + "/ngram_eventfile_multiverbs_train" # will be a .gz
NGRAM_EVENTS_MULTI_VERBS_VALID = WD_PREPTRAIN + "/ngram_eventfile_multiverbs_valid" # will be a .gz
NGRAM_EVENTS_MULTI_VERBS_TEST = WD_PREPTRAIN + "/ngram_eventfile_multiverbs_test" # will be a .gz
# word cue based event files ready for training NDL (verb infinitives included as cues)
#WORD_EVENTS_MULTI_VERBS_TRAIN = WD_PREPTRAIN + "/word_eventfile_multiverbs_train.gz"
#WORD_EVENTS_MULTI_VERBS_VALID = WD_PREPTRAIN + "/word_eventfile_multiverbs_valid.gz"
#WORD_EVENTS_MULTI_VERBS_TEST = WD_PREPTRAIN + "/word_eventfile_multiverbs_test.gz"
PREPARE_TRAIN_VALID_TEST_FILES = [TENSES_TRAIN_GZ, TENSES_VALID_GZ, TENSES_TEST_GZ,
NGRAM_EVENTS_MULTI_VERBS_TRAIN, NGRAM_EVENTS_MULTI_VERBS_VALID, NGRAM_EVENTS_MULTI_VERBS_TEST]
CREATE_TRAIN_VALID_TEST_FILES = [TENSES_ONE_SENT_PER_VERB_SHUF_GZ, TENSES_TRAIN_GZ,TENSES_VALID_GZ,TENSES_TEST_GZ]
#--------------------------------------------------------
######################################
# EXTRACT INFINITIVES FOR TRAINING
######################################
WD_EXTRACT_INF = TOP + "/extract_infinitives"
### Define file paths
TENSES_GZ = TENSES_ONE_SENT_PER_VERB_SHUF_GZ
COOC_FREQ_CSV = WD_EXTRACT_INF + "/Cooc_freq" #co-occurence frequencies, a .csv file
INFINITIVES_CSV = WD_EXTRACT_INF + "/infinitives_freq10" #top 10 most frequent infinitives, a .csv file
EXTRACT_SENTENCES_FOLDERS = [WD_EXTRACT_INF]
EXTRACT_INFINITIVE_FILES = [TENSES_GZ, COOC_FREQ_CSV, INFINITIVES_CSV]
#--------------------------------------------------------
WD_EXTRACT_NGRAM = TOP + '/prepare_ngrams/'
### Parameters to use
NUM_THREADS = 4
### Get up to N ngrams from each ngram group such as all extracted ngrams have freq>=10
NGRAMN = 10000
### Define file paths
NGRAM1 = WD_EXTRACT_NGRAM + "1grams" #csv
NGRAM2 = WD_EXTRACT_NGRAM + "2grams" #csv
NGRAM3 = WD_EXTRACT_NGRAM + "3grams" #csv
NGRAM4 = WD_EXTRACT_NGRAM + "4grams" #csv
NGRAM = WD_EXTRACT_NGRAM + "ngrams" #csv
TEMP_DIR_EXT = WD_EXTRACT_NGRAM + "data"
######################################
# NGRAMs TO USE
######################################
# Final list of ngrams to use in training (5000)
TARGETS = WD_EXTRACT_NGRAM + "ngrams_touse" #.csv file
# Separate lists of chunks
TARGETS_1G = WD_EXTRACT_NGRAM + "1grams_touse" #csv
TARGETS_2G = WD_EXTRACT_NGRAM + "2grams_touse" #csv
TARGETS_3G = WD_EXTRACT_NGRAM + "3grams_touse" #csv
TARGETS_4G = WD_EXTRACT_NGRAM + "4grams_touse" #csv
EVENT_FILE = WD_EXTRACT_NGRAM + "events_4grams" #csv
NGRAM_FOLDERS = [WD_EXTRACT_NGRAM, TEMP_DIR_EXT]
NGRAM_FILES = [NGRAM, NGRAM1, NGRAM2, NGRAM3, NGRAM4, EVENT_FILE]
TARGETS_FILES = [TARGETS, TARGETS_1G, TARGETS_2G, TARGETS_3G, TARGETS_4G]
K_NGRAMS = 10000
#--------------------------------------------------------
######################################
# PREPARE CUES
######################################
WD_CUES = TOP + '/prepare_events'
### File paths
# list of ngrams to use in training (10k n-grams from each n level with 1<= n< =4)
NGRAMS = TARGETS
# list of ngrams to use in training (4681)
INFINITIVES = INFINITIVES_CSV
# final list of all cues
ALL_CUES = WD_CUES + '/cues_touse' #csv
#--------------------------------------------------------
######################################
# SIMULATIONS
######################################
WD_SIM = 'D:/work/OoOM/ndl/test_location/simulations/'
### Define file paths
#TENSE_SET_WITH_PRED = TOP + "Data_preparation/Data_shared/tenses_multiverbs_test_withpreds.csv.gz"
CUE_INDEX = ALL_CUES
OUTCOME_INDEX = NF + "/outcome_index_ngram_multiverbs" #user defined csv to determine what the possible outcomes are
TEMP_DIR_SIM = WD_SIM + "data/"
MODEL_PATH = WD_SIM + 'NDL_model_ngrams_multiverbs' #h5 file (h5py)
WEIGHTS_PATH = WD_SIM + 'NDL_weights_ngrams_multiverbs' #.nc file (netCDF)
RESULTS_TEST = WD_SIM + "results_testset_ngrams_multiverbs" #.csv
ACTIVATION_TEST = WD_SIM + "activations_testset_ngrams_multiverbs" #.csv
NO_THREADS = 15
SIM_DIR = [WD_SIM, TEMP_DIR_SIM]
SIM_FILES = [NGRAM_EVENTS_MULTI_VERBS_TRAIN, NGRAM_EVENTS_MULTI_VERBS_VALID, NGRAM_EVENTS_MULTI_VERBS_TEST,
TENSES_TEST_GZ, CUE_INDEX, OUTCOME_INDEX, TEMP_DIR_SIM,
WEIGHTS_PATH, MODEL_PATH, RESULTS_TEST, ACTIVATION_TEST]
SIM_PARAMS = [NO_THREADS]
#-------------------------------------------------------- #--------------------------------------------------------
#-------------------------------------------------------- #--------------------------------------------------------
#-------------------------------------------------------- #--------------------------------------------------------
#-------------------------------------------------------- #--------------------------------------------------------
######################################
# DATA ANALYSIS - data preparation
######################################
TOP = "/home/tek/work/OoOM/ndl/ndl_tense/data_analysis"
WD_ANALYSIS_PREP = TOP + "/model_results"
RESULTS_TEST = WD_ANALYSIS_PREP + "/results/results_testset_ngrams_multiverbs.csv"
RESULTS_ALL_TEST = WD_ANALYSIS_PREP + "/results/results_all_testset_ngrams_multiverbs.csv"
DATA_ANALYSIS_DIR = [WD_ANALYSIS_PREP, RESULTS_ALL_TEST + WD_ANALYSIS_PREP + "/results_29_09_2021"]
DATA_ANALYSIS_FILES = [RESULTS_TEST, RESULTS_ALL_TEST, ACTIVATION_TEST, OUTCOME_INDEX]
######################################
# DATA ANALYSIS - plots
######################################
WD_PLOTS_PATH = WD_ANALYSIS_PREP
# Results file - this file is too large to be on GitHub so must be created post-processing
RESULTS = RESULTS_ALL_TEST
OVERALL_ACCURACY = WD_PLOTS_PATH + "/results/Accuracy_per_tense.png"
ACCURACY_PER_COMP = WD_PLOTS_PATH + "/results/Accuracy_per_tense_compact.png"
ACCURACY_SECOND_TENSE = WD_PLOTS_PATH + "/results/Accuracy2_per_tense.png.png"
ACCURACY2_TENSE_COMP = WD_PLOTS_PATH + "/results/Accuracy2_per_tense_compact.png"
ACCURACY_THIRD_TENSE = WD_PLOTS_PATH + "/results/Accuracy3_per_tense.png"
ACCURACY3_TENSE_COMP = WD_PLOTS_PATH + "/results/Accuracy3_per_tense_compact.png"
ACCURACY_TOP_THREE = WD_PLOTS_PATH + "/results/Accuracy_top3_per_tense.png"
ACCURACY_T3_COMP = WD_PLOTS_PATH + "/results/Accuracy_top3_per_tense_compact.png"
DATA_ANALYSIS_PLOTS_DIR = [WD_PLOTS_PATH]
DATA_ANALYSIS_PLOTS_FILES = [OVERALL_ACCURACY, ACCURACY_PER_COMP,
ACCURACY_SECOND_TENSE, ACCURACY2_TENSE_COMP,
ACCURACY_THIRD_TENSE, ACCURACY3_TENSE_COMP,
ACCURACY_TOP_THREE, ACCURACY_T3_COMP,]