forked from mozboz/transformapetl
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtransformap.py
More file actions
245 lines (175 loc) · 7.02 KB
/
transformap.py
File metadata and controls
245 lines (175 loc) · 7.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
#!/usr/bin/env python
# coding: utf-8
import urllib
import logging
import logging.config
import json
import argparse
import yaml
from sqlalchemy import exists
from datetime import datetime
import orm
class TMJob(object):
def __init__(self, job_file):
# Set up database connection
self.session = orm.db_connect()
# Load YAML map config file
# Job config is stored in self.config
with open(job_file, 'r') as f:
self.config = yaml.load(f)
class Extractor(TMJob):
def run(self):
file_url = self.config.get('file_url')
return self.fetch_via_http(file_url)
def fetch_via_http(self, file_url):
'''
Fetch a file via HTTP and save it to data dir.
'''
geojson_file = urllib.URLopener()
file_name = '%s' % file_url.strip('/').split('/')[-1]
logger.info("Retrieving file %s" % file_name)
geojson_file.retrieve(file_url, 'data/%s' % file_name)
return {
'file_name' : file_name
}
class Transformer(TMJob):
def run(self, extractor_response):
file_name = extractor_response.get('file_name')
return self.transform_geojson(file_name)
def transform_geojson(self, file_name):
'''
Transform map data in file according to schema.
'''
logger.info("Transforming data according to schema")
# Get schema
schema = self.config.get('schema')
transformed_data = []
# Open source file
with open('data/%s' % file_name) as f:
data = json.load(f)
# Transform each row
for row in data.get('features'):
new_row = {
'longitude' : row.get('geometry', {}).get('coordinates', {})[0],
'latitude' : row.get('geometry', {}).get('coordinates', {})[1],
}
for field, value in row.get('properties').items():
if field not in schema.keys():
continue
else:
new_field_name = schema[field].get('name')
new_row[new_field_name] = value
transformed_data.append(new_row)
return {
'map_data' : transformed_data,
'file_name' : file_name,
}
class Loader(TMJob):
def run(self, transformer_response):
map_data = transformer_response.get('map_data')
file_name = transformer_response.get('file_name')
new_map = self.initialise_map(file_name)
self.save_map(new_map, map_data)
def save_map(self, new_map, map_data):
'''
Save data to database.
'''
# Fetch schema for this map instance from DB
map_schema = {}
schema = self.session.query(orm.SchemaField).filter_by(schema_id=new_map['map_schema'].id).all()
for schema_field in schema:
field_name = schema_field.field_name
field_id = schema_field.id
map_schema[field_name] = field_id
# Save data to db
logger.info("Saving map data to database")
for row in map_data:
# save map object
map_object = orm.MapObject(
longitude = row.get('longitude'),
latitude = row.get('latitude'),
map_instance = new_map['map_instance'],
)
self.session.add(map_object)
self.session.commit()
# save new rows for object
for field, value in row.items():
if field not in ('latitude', 'longitude'):
new_row = orm.MapData(
map_instance = new_map['map_instance'],
map_object = map_object,
schema_field_id = map_schema[field],
field_value = value,
)
self.session.add(new_row)
self.session.commit()
logger.info("Done!")
def initialise_map(self, file_name):
'''
Save new map instance metadata to database as needed.
'''
# Map Owner (if necessary)
map_owner = self.session.query(orm.MapOwner).filter_by(name=self.config.get('owner')).first()
if not map_owner:
logger.info("Saving new map owner %s" % self.config.get('owner'))
map_owner = orm.MapOwner(name=self.config.get('owner'))
self.session.add(map_owner)
# Map Definition (if necessary)
map_definition = self.session.query(orm.MapDefinition).filter_by(name=self.config.get('definition')).first()
if not map_definition:
logger.info("Saving new map definition %s" % self.config.get('definition'))
map_definition = orm.MapDefinition(
name=self.config.get('definition'),
owner=map_owner,
)
self.session.add(map_definition)
# Schema Version
logger.info("Saving schema version")
schema = orm.SchemaVersion(
map_definition = map_definition,
map_owner = map_owner
)
self.session.add(map_definition)
# Map Instance
logger.info("Saving map instance")
map_instance = orm.MapInstance(
schema = schema,
map_definition = map_definition,
retrieval_date = datetime.now(),
source_path = 'data/%s' % file_name,
)
# Schema Fields
logger.info("Saving schema fields")
for field, field_meta in self.config.get('schema', {}).items():
new_field = orm.SchemaField(
is_base_field = False,
schema = schema,
field_name = field_meta.get('name'),
field_type = field_meta.get('type'),
field_description = field_meta.get('description'),
)
self.session.add(new_field)
# Commit to database
self.session.commit()
return {
'map_instance' : map_instance,
'map_schema' : schema,
}
# Run ETL process
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input_file", default=None, help="Path to a job file to process", metavar="input")
results = parser.parse_args()
# Set up logging
logging.config.fileConfig("logging.conf")
logger = logging.getLogger("Transformap")
job_config = results.input_file
# Fetch and save file
EX = Extractor(job_config)
extractor_response = EX.run()
# Transform data
TR = Transformer(job_config)
transformer_response = TR.run(extractor_response)
# Load data into database
LD = Loader(job_config)
LD.run(transformer_response)