-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdata_helpers.py
More file actions
184 lines (140 loc) · 7.09 KB
/
data_helpers.py
File metadata and controls
184 lines (140 loc) · 7.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import random
import pandas as pd
import numpy as np
import torch
import tifffile as tiff
from torch.utils.data import Dataset
elith_data_dir = 'data/Records/'
valavi_bg_dir = 'data/Valavi_DataS1/background_50k/'
rasters_dir = 'data/Environment'
region_list = ['AWT', 'CAN', 'NSW', 'NZ', 'SA', 'SWI']
group_dictionary = {
'AWT': ['bird', 'plant'],
'CAN': [],
'NSW': ['ba', 'db', 'nb', 'ot', 'ou', 'rt', 'ru', 'sr'],
'NZ': [],
'SA': [],
'SWI': []
}
# Covariates of Elith et al. 2020
covariate_dictionary = {
'AWT': ['bc01', 'bc04', 'bc05', 'bc06', 'bc12', 'bc15', 'bc17', 'bc20', 'bc31', 'bc33', 'slope', 'topo', 'tri'],
'CAN': ['alt', 'asp2', 'ontprec', 'ontprec4', 'ontprecsd', 'ontslp', 'onttemp', 'onttempsd', 'onttmin4', 'ontveg', 'watdist'],
'NSW': ['cti', 'disturb', 'mi', 'rainann', 'raindq', 'rugged', 'soildepth', 'soilfert', 'solrad', 'tempann', 'tempmin', 'topo', 'vegsys'],
'NZ': ['age', 'deficit', 'dem', 'hillshade', 'mas', 'mat', 'r2pet', 'rain', 'slope', 'sseas', 'toxicats', 'tseas', 'vpd'],
'SA': ['sabio1', 'sabio2', 'sabio4', 'sabio5', 'sabio6', 'sabio7', 'sabio12', 'sabio15', 'sabio17', 'sabio18',],
'SWI': ['bcc', 'calc', 'ccc', 'ddeg', 'nutri', 'pday', 'precyy', 'sfroyy', 'slope', 'sradyy', 'swb', 'tavecc', 'topo']
}
# Covariates of Valavi et al. 2022
valavi_covariate_dictionary = {
'AWT': ['bc04', 'bc05', 'bc06', 'bc12', 'bc15', 'slope', 'topo', 'tri'],
'CAN': ['alt', 'asp2', 'ontprec', 'ontslp', 'onttemp', 'ontveg', 'watdist'],
'NSW': ['cti', 'disturb', 'mi', 'rainann', 'raindq', 'rugged', 'soildepth', 'soilfert', 'solrad', 'tempann', 'topo', 'vegsys'],
'NZ': ['age', 'deficit', 'hillshade', 'mas', 'mat', 'r2pet', 'slope', 'sseas', 'toxicats', 'tseas', 'vpd'],
'SA': ['sabio12', 'sabio15', 'sabio17', 'sabio18', 'sabio2', 'sabio4', 'sabio5', 'sabio6'],
'SWI': ['bcc', 'calc', 'ccc', 'ddeg', 'nutri', 'pday', 'precyy', 'sfroyy', 'slope', 'sradyy', 'swb', 'topo']
}
# Same for Valavi et al. 2022 and Elith et al. 2020
categorical_covariates = {
'AWT': [],
'CAN': ['ontveg'],
'NSW': ['vegsys'],
'NZ': [], #'age', 'toxicats' are ordinal variables -> no one hot encoding
'SA': [],
'SWI': [] #'calc' is binary, no need to one hot encode
}
class SpeciesDataset(Dataset):
def __init__(self, x, y, bg):
self.x = x
self.y = y
self.bg = bg
self.length = len(self.x)
self.num_bg = len(self.bg)
def __getitem__(self, idx):
idx_bg = random.randint(0, self.num_bg - 1)
return self.x[idx], self.y[idx], self.bg[idx_bg]
def __len__(self):
return self.length
def get_data_one_region(region, co_occurrence=True, valavi=False):
"""Get data for a given region. Co_occurence equals to True implies that species presences
at the same location are merged. Valavi equals to True implies that the covariates of
Valavi et al. 2022 are used."""
covs = get_covariates(region, valavi=valavi)
cat_covs = categorical_covariates[region]
# Get presence-only occurrence records
train = pd.read_csv(elith_data_dir + 'train_po/' + region + 'train_po.csv')
train = train[['spid'] + covs + ["x", "y"]].reset_index(drop=True)
# One hot encoding of categorical variables to obtain x_train (covariates)
x_train = pd.get_dummies(train, columns=cat_covs).drop(["spid"], axis=1)
if co_occurrence:
# Merge species at same location (same covariates by definition)
x_train = x_train.groupby(["x", "y"]).mean().reset_index()
coordinates_train = x_train[["x", "y"]].to_numpy()
x_train = x_train.drop(["x", "y"], axis=1)
x_train = x_train.to_numpy()
# Encode the presence into a binary vector to obtain y_train
y_train = pd.get_dummies(train, columns=['spid']).drop(covs, axis=1)
if co_occurrence:
# Merge species at same location
y_train = y_train.groupby(["x", "y"]).sum().reset_index()
y_train = y_train.drop(["x", "y"], axis=1)
y_train = y_train.to_numpy().clip(0, 1)
# Get random background points
bg = pd.read_csv(valavi_bg_dir + region + '.csv')
coordinates_bg = bg[['x', 'y']].values
bg = pd.get_dummies(bg[covs], columns=cat_covs).to_numpy()
# Presence-absence and covariates
groups = group_dictionary[region]
if len(groups) > 0:
test_pa = []
test_env = []
for group in groups:
test_pa.append(pd.read_csv(elith_data_dir + 'test_pa/' + region + 'test_pa_' + group + '.csv'))
test_env.append(pd.read_csv(elith_data_dir + 'test_env/' + region + 'test_env_' + group + '.csv'))
test_pa = pd.concat(test_pa)
test_env = pd.concat(test_env)
else:
test_pa = pd.read_csv(elith_data_dir + 'test_pa/' + region + 'test_pa.csv')
test_env = pd.read_csv(elith_data_dir + 'test_env/' + region + 'test_env.csv')
x_test = np.array(pd.get_dummies(test_env.sort_values('siteid')[covs], columns=cat_covs))
y_test = np.array(test_pa.sort_values('siteid')[get_species_list(region, remove=False)])
coordinates_test = np.array(test_env.sort_values('siteid')[["x", "y"]])
return x_train, y_train, coordinates_train, x_test, y_test, coordinates_test, bg, coordinates_bg
def one_hot_covariates(x, region, valavi=False):
"""One-hot encodes the covariates of the given region. Valavi equals to True
implies that the covariates of Valavi et al. 2022 are used."""
covs = get_covariates(region, valavi=valavi)
cat_covs = categorical_covariates[region]
x = pd.DataFrame(x, columns=covs)
x = pd.get_dummies(x, columns=cat_covs).to_numpy()
return x
def get_species_list(region, remove=True):
"""Returns the list of species for the given region."""
species = list(pd.read_csv(elith_data_dir + 'train_po/' + region + 'train_po.csv')['spid'].unique())
if region == 'NSW' and remove:
species.remove('nsw30') # species with only 2 occurrences in train
return species
def get_covariates(region, valavi=True):
"""Returns the list of covariates for a given region."""
if valavi:
covs = valavi_covariate_dictionary[region]
else:
covs = covariate_dictionary[region]
return covs
def get_rasters(region, valavi=False):
"""Get rasters of the given region.
:returns rasters: numpy array of shape #covariates x height x width"""
rasters = []
raster_files_paths = [f'{rasters_dir}/{region}/{covariate}.tif' for covariate in get_covariates(region, valavi=valavi)]
for raster_file_path in raster_files_paths:
rasters.append(np.array(tiff.imread(raster_file_path)))
rasters = np.stack(rasters)
return rasters
def get_all_bg(rasters):
"""Get all the point locations of the raster.
:returns bg_points: numpy array of shape #locations x #covariates"""
# Heuristic: we consider the first pixel of the first covariate
# as being outside of the polygon. It has been verified for the 6 regions.
mask = np.logical_not(rasters[0] == rasters[0, 0, 0])
bg_points = rasters[:, mask].swapaxes(0, 1)
return bg_points