-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDANN_documentation.txt
More file actions
371 lines (220 loc) · 15.1 KB
/
DANN_documentation.txt
File metadata and controls
371 lines (220 loc) · 15.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
#########################################################################
################### data processing for training ########################
#########################################################################
(1) Run for each epoch for all chromosomes
In GenerateTrainingData.py define the sample and sub-sample size(e.g. 150), window size and sliding window jump.
Also define directory of data for each epoch (e.g path='/u/project/ngarud/Garud_lab/aDNA/epochH/VCF.v51.0.H.chr')
Define outup file path as well as which chromosomes to process and haplotype sorting style.
qsub qsub_GenerateData # I should use this code to generate training data from different epochs.
# Example output file:
Tranining.H.n150.w201.j5.dat
(2) Once I have generated the data files from all epochs, merge them together to use for training
qsub qsub_MergeData
(3) Next generate the target.npy file I will use as input (target doamin) for training the DANN
qsub qsub_DataForTraining # make sure the qsub is running the script data_for_training_target.py
#########################################################################
############################# Training DANN #############################
#########################################################################
1) After processing simulations and data for training we are ready to train the DANN
2) Train model
qsub qsub_TrainModel
3) Test model
########################################################################################################################
############### scan processing ########################################################################################
########################################################################################################################
for chr in {1..22};do
python ../addRecomb.py -i chr${chr}_CEUnoMD_pred_j10_22.txt -o chr${chr}_CEUnoMD_pred_j10_22_rec.txt -g "/u/project/ngarud/Garud_lab/aDNA/RecombMaps/DeCodeSexAveraged_GRCh37/genetic_map_decode_sex-averaged_chr${chr}.txt"
done
#add constant chrom number to file
for chr in {1..22};do
awk -v num="$chr" '{print $0 "\t" num}' chr${chr}_CEUnoMD_pred_j10_22_rec.txt > chr${chr}_CEUnoMD_pred_j10_22_rec_tmp.txt
mv chr${chr}_CEUnoMD_pred_j10_22_rec_tmp.txt chr${chr}_CEUnoMD_pred_j10_22_rec.txt
done
cat chr*CEUnoMD*_rec.txt > scan_RowFreq_CEUnoMD_j10_22_rec.txt
for chr in {1..22};do
rm -f chr${chr}_CEUnoMD_pred_j10_22.txt
done
##### GET TOP PEAKS VCF AND ANNOTION
# get full vcf
plink --bfile v51.0.H --recode vcf --out VCFfiles/v51.0.H
#sort
bcftools query -f '%CHROM\t%POS\t%POS\t%ID\n' v51.0.H.vcf > v51.0.H.bed
mv peaks_H_RowDist.txt peaks_H_RowDist.bed
# filter common chromosomes
awk '{print $1}' peaks_H_RowDist.bed | sort -u > query_chroms.txt
awk '{print $1}' v51.0.H.bed | sort -u > db_chroms.txt
comm -12 query_chroms.txt db_chroms.txt > common_chroms.txt
# filter BED files:
grep -wFf common_chroms.txt peaks_H_RowDist.bed > filtered_query.bed
grep -wFf common_chroms.txt v51.0.H.bed > filtered_db.bed
mv filtered_query.bed peaks_H_RowDist.bed
mv filtered_db.bed v51.0.H.bed
bedtools closest -a peaks_H_RowDist.bed -b v51.0.H.bed -d > topPeaks_v51.0.H.bed
#get positons to filter VCF
cut -f4,5 topPeaks_v51.0.H.bed > filter_positions.txt
#get subset VCF
bgzip v51.0.H.vcf
bcftools index v51.0.H.vcf.gz
bcftools view -R filter_positions.txt v51.0.H.vcf.gz -Oz -o topPeaks_v51.0.H.vcf.gz
#### Find closest genes
#I used VEP to get genses in 256000 windows upstream and downstream of variant
grep "protein_coding" VEP_Hpeaks.txt > VEP_Hpeaks_proteinCoding.txt
##using bedtools only
wget "https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_42/GRCh37_mapping/gencode.v42lift37.annotation.gtf.gz"
zgrep 'gene_type "protein_coding"' gencode.v42lift37.annotation.gtf > protein_coding.gtf
module load bedops
awk '{ if ($0 ~ "transcript_id") print $0; else print $0" transcript_id \"\";"; }' protein_coding.gtf | gtf2bed - > protein_coding.bed
#sort files
#bedtools sort -i peaks_H_RowDist.bed > sorted_peaks_H_RowDist.bed
bedtools sort -i protein_coding.bed > genes_sorted.bed
sed -i 's/chr//g' genes_sorted.bed
grep "protein_coding" genes_sorted.bed > genes_sorted_proteinCoding.bed
bedtools closest -a peaks_H_RowDist.bed -b genes_sorted_proteinCoding.bed -D a -t first > closest_genes.tsv
bedtools closest -a peaks_H_RowDist.bed -b genes_sorted_proteinCoding.bed -D a -t first \
| awk 'BEGIN {OFS="\t"} {print $1,$2,$3,$4,$18,$NF}' > closest_genes.tsv
###### Visualize peaks
module load htslib
module load bcftools
#21592609-22105845
#72584990-73282330
#109284096-109624296
#135756777-136631071 #LCT
#152521376-152940018
#167860886-168320237
#178235617-178600535
#197233200-197723715
#Neutral
#29970016-30630912
# 27550000 - 29000000 #HLA
# 28050000 - 28560000 # OCA2
bgzip -c VCF.v40.3..chr6.vcf > VCF.v51.0.H.chr6.vcf.gz
tabix -p vcf VCF.v51.0.H.chr6.vcf.gz
bcftools view -r 4:33679737-34856751 VCF.v40.3.EN.chr4.vcf.gz -o chr4.peakN.33679737-34856751.vcf
bcftools view -r 15:28050000-28560000 VCF.v51.0.H.chr15.vcf.gz -o chr15.peakH.OCA2.28050000-28560000.vcf
#chr6 peak 6:27550000-29000000 bcftools view -H chr6.peakH.H12.HLA.27550000-29000000.vcf | wc -l # count num. variants
qsub qsub_DataForViz #edit file path accordingly
# ------------------------------------------------------------------------------------------------------------------------------------------------
##### Find overlapping peaks bedtools
bedtools multiinter -names N BA IA H -i peaks_N.bed peaks_BA.bed peaks_IA.bed peaks_H.bed > peaks_overalp.bed
#bedtools multiinter -names N BA IA H -i TopPeaks_N.bed TopPeaks_BA.bed TopPeaks_IA.bed TopPeaks_H.bed > TopPeaks_overalp.bed
#Add a cluster ID to each overlapping group
bedtools cluster -i peaks_overalp.bed > peaks_overalp_clusters.bed ## the fourth column that it adds is the cluster ID to wich it belongs to
#For each cluster, keep the line with the most epochs in overlap in column 4
awk '{
cluster = $NF;
if ($4 > max_count[cluster]) {
max_count[cluster] = $4;
best_line[cluster] = $0;
}
} END {
for (c in best_line) print best_line[c];
}' peaks_overalp_clusters.bed| cut -f1-9 | sort -k1,1n -k2,2n > peaks_overlap_final.bed
#### Find overlap with genes
# Downloads refGene annotation for hg19
wget http://hgdownload.cse.ucsc.edu/goldenPath/hg19/database/refGene.txt.gz
gunzip refGene.txt.gz
# Convert UCSC's weird refGene format to BED (columns 3 = chrom, 5 = txStart, 6 = txEnd, 13 = gene name)
cut -f3,5,6,13 refGene.txt > refGene.hg19.bed
#remove chr string from first column to match our format
sed -i 's/chr//g' refGene.hg19.bed
#sort, make sure both files are sorted
sort -k1,1 -k2,2n refGene.hg19.sorted.bed
# intersect with bedtools
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/GRCh37_annotations/refGene.hg19.sorted.bed -wa -wb > overlaps_with_genes.bed # this gives us all genes that overlap
bedtools intersect -a peaks_overalp_clusters.bed -b /u/project/ngarud/Garud_lab/aDNA/GRCh37_annotations/refGene.hg19.sorted.bed -wa -wb > overlaps_with_genes_notmerged.bed
# Find the closest gene by distance
#bedtools closest -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/GRCh37_annotations/refGene.hg19.sorted.bed -d > closest_genes.bed
# unique genes (remove repeted rows igoring columns 11,12)
awk '{
key = ""
for (i = 1; i <= NF; i++) {
if (i != 11 && i != 12) key = key $i OFS
}
if (!seen[key]++) print
}' overlaps_with_genes.bed > overlaps_with_genes_unique.bed
#Overlap with Schrider and Kern 2017 paper
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/all_sweeps_merged_CEU_sorted.bed -wa -wb | awk '!seen[$2]++' > overlaps_with_Schrider.bed
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/all_sweeps_merged_CEU_sorted.bed -b peaks_overlap_final.bed -w 1000000 | awk '!seen[$2]++' > overlaps_with_Schrider_1Mb.bed
# From my sweep list file
tail -n +2 List_sweeps.csv >List_sweeps.csv_2 # remove header
mv List_sweeps.csv_2 List_sweeps.csv
# Overlap Irving-Pease
grep "Irving" List_sweeps.csv > List_sweeps_Irving.csv
awk -F',' '{OFS="\t"; print $3, $7, $8, $4}' List_sweeps_Irving.csv > List_sweeps_Irving.bed
sort -k1,1n -k2,2n List_sweeps_Irving.bed > List_sweeps_Irving_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Irving_sorted.bed -wa -wb > overlaps_with_Irving.bed
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Irving_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_Irving_1Mb.bed
# Overlap Souilmi
grep "Souilmi et" List_sweeps.csv > List_sweeps_Souilmi.csv
awk -F',' '{OFS="\t"; print $3, $7, $8, $4}' List_sweeps_Souilmi.csv > List_sweeps_Souilmi.bed
sort -k1,1n -k2,2n List_sweeps_Souilmi.bed > List_sweeps_Souilmi_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Souilmi_sorted.bed -wa -wb > overlaps_with_Souilmi.bed
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Souilmi_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_Souilmi_1Mb.bed
# Overlap Kerner
grep "Kerner et" List_sweeps.csv > List_sweeps_Kerner.csv
awk -F',' '{OFS="\t"; print $3, $7, $8, $4}' List_sweeps_Kerner.csv > List_sweeps_Kerner.bed
sort -k1,1n -k2,2n List_sweeps_Kerner.bed > List_sweeps_Kerner_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Kerner_sorted.bed -wa -wb > overlaps_with_Kerner.bed
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Kerner_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_Kerner_1Mb.bed
#Overlap wit Le
grep "Le et" List_sweeps.csv > List_sweeps_Le.csv
awk -F',' '{OFS="\t"; print $2, $6, $7, $3}' List_sweeps_Le.csv > List_sweeps_Le.bed
sort -k1,1n -k2,2n List_sweeps_Le.bed > List_sweeps_Le_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Le_sorted.bed -wa -wb > overlaps_with_Le.bed
# within 1Mb of peaks
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Le_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_Le_1Mb.bed
grep "Akbari" List_sweeps.csv > List_sweeps_Akbari.csv
awk -F',' '{OFS="\t"; print $2, $6, $7, $3}' List_sweeps_Akbari.csv > List_sweeps_Akbari.bed
sort -k1,1n -k2,2n List_sweeps_Akbari.bed > List_sweeps_Akbari_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Akbari_sorted.bed -wa -wb > overlaps_with_Akbari.bed
# within 1Mb of peaks
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Akbari_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_Akbari_1Mb.bed
# Pandey Harris
grep "Pandey" List_sweeps.csv > List_sweeps_PandeyHarris.csv
awk -F',' '{OFS="\t"; print $2, $6, $7, $3}' List_sweeps_PandeyHarris.csv > List_sweeps_PandeyHarris.bed
sort -k1,1n -k2,2n List_sweeps_PandeyHarris.bed > List_sweeps_PandeyHarris_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_PandeyHarris_sorted.bed -wa -wb > overlaps_with_PandeyHarris.bed
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_PandeyHarris_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_PandeyHarris_1Mb.bed
#Mathieson
grep "Mathieson" List_sweeps.csv > List_sweeps_Mathieson.csv
awk -F',' '{OFS="\t"; print $2, $6, $7, $3}' List_sweeps_Mathieson.csv > List_sweeps_Mathieson.bed
sort -k1,1n -k2,2n List_sweeps_Mathieson.bed > List_sweeps_Mathieson_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Mathieson_sorted.bed -wa -wb > overlaps_with_Mathieson.bed
bedtools window -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Mathieson_sorted.bed -b peaks_overlap_final.bed -w 1000000 > overlaps_with_Mathieson_1Mb.bed
#Field et al
grep "Field" List_sweeps.csv > List_sweeps_Field.csv
awk -F',' '{OFS="\t"; print $2, $6, $7, $3}' List_sweeps_Field.csv > List_sweeps_Field.bed
sort -k1,1n -k2,2n List_sweeps_Field.bed > List_sweeps_Field_sorted.bed
bedtools intersect -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Field_sorted.bed -wa -wb > overlaps_with_Field.bed
#overlap in schrider and akbari
bedtools intersect -a /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_Akbari_sorted.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/all_sweeps_merged_CEU_sorted.bed -wa -wb | awk '!seen[$2]++' |wc -l
#overlap across scans
bedtools multiinter -i Model_ALL_RowFreq_201_ConstantNeMD43srcALLSNPs_batch64_lr1e-5/peaks_overlap_final.bed Model_ALL_RowFreq_201_SouilmiMD43srcALLSNPs_batch64_lr1e-5/peaks_overlap_final.bed Model_ALL_RowFreq_201_SouilmiMD05srcALLSNPs_batch64_lr1e-5/peaks_overlap_final.bed > peaks_DANNmodels_overalp.bed
#Add a cluster ID to each overlapping group
bedtools cluster -i peaks_DANNmodels_overalp.bed > peaks_DANNmodels_overalp_clusters.bed ## the fourth column that it adds is the cluster ID to wich it belongs to
#For each cluster, keep the line with the most overlaps in column 4
awk '{
cluster = $NF;
if ($4 > max_count[cluster]) {
max_count[cluster] = $4;
best_line[cluster] = $0;
}
} END {
for (c in best_line) print best_line[c];
}' peaks_DANNmodels_overalp_clusters.bed| cut -f1-9 | sort -k1,1n -k2,2n > peaks_DANNmodels_overlap_final.bed
#just keep instances where all three scans overlap
awk '$4 == 3' peaks_DANNmodels_overlap_final.bed > peaks_DANNmodels_AllScansoverlap_final.bed
##### Akbari table 4 online
# get all rows with posterior prob of sweep >0.99
zcat Selection_Summary_Statistics.tsv.gz | tail -n +24 | awk -F'\t' '$13 > 0.99' > Akbari_Summary_Statistics_prob99.txt
### Merge all bed files with aDNA sweeps
cat List_sweeps_Akbari_sorted.bed List_sweeps_Irving_sorted.bed List_sweeps_Kerner_sorted.bed List_sweeps_Mathieson_sorted.bed List_sweeps_PandeyHarris_sorted.bed List_sweeps_Le.bed > List_sweeps_ALL_aDNA.bed
awk '{OFS="\t"; $2 -= 500000; $3 += 500000; print }' peaks_overlap_final.bed > peaks_overlap_final_plus500Kb.bed
awk '{OFS="\t"; $2 -= 1000000; $3 += 1000000; print }' peaks_overlap_final.bed > peaks_overlap_final_plus1Mb.bed
awk -F'\t' '{OFS="\t"; print $1, $2, $3}' List_sweeps_ALL_aDNA.bed > List_sweeps_ALL_aDNA_tmp.bed
sort -k1,1n -k2,2n List_sweeps_ALL_aDNA_tmp.bed > List_sweeps_ALL_aDNA_sorted.bed
rm List_sweeps_ALL_aDNA_tmp.bed
bedtools intersect -v -a peaks_overlap_final.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_ALL_aDNA_sorted.bed > novel_sweeps.bed
bedtools intersect -v -a peaks_overlap_final_plus1Mb.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_ALL_aDNA_sorted.bed
bedtools intersect -v -a peaks_overlap_final_plus1Mb.bed -b /u/project/ngarud/Garud_lab/aDNA/SweepsInLit/List_sweeps_ALL_aDNA_sorted.bed | wc -l
bedtools intersect -v -a novel_sweeps.bed -b overlaps_with_Souilmi_1Mb.bed | wc -l #remove reamining overlap with souilmi