forked from MafiAtUN/Afrobarometer-Data-Explorer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconvert_data.py
More file actions
295 lines (242 loc) · 12.1 KB
/
convert_data.py
File metadata and controls
295 lines (242 loc) · 12.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
#!/usr/bin/env python3
"""
Convert Afrobarometer .sav file to efficient formats for GitHub upload.
This script tries multiple formats and picks the most efficient one.
"""
import pandas as pd
import pyreadstat
import json
import sqlite3
from pathlib import Path
import sys
import time
def convert_sav_to_efficient_format():
"""Convert .sav file to the most efficient format possible."""
# File paths
sav_file = Path("data/raw_data/R9.Merge_39ctry.20Nov23.final_.release_Updated.4Jun25-3.sav")
print("🚀 Afrobarometer Data Converter")
print("=" * 50)
# Check if .sav file exists
if not sav_file.exists():
print(f"❌ Error: {sav_file} not found!")
print("💡 Please download the data file first:")
print(" python download_data.py")
return False
try:
# Read the .sav file
print(f"📖 Reading .sav file...")
start_time = time.time()
df, meta = pyreadstat.read_sav(str(sav_file))
load_time = time.time() - start_time
print(f"✅ Data loaded successfully!")
print(f" Shape: {df.shape}")
print(f" Columns: {len(df.columns)}")
print(f" Load time: {load_time:.2f} seconds")
# Clean data for better compression
print(f"\n🧹 Cleaning data for optimal compression...")
df_clean = df.copy()
# Convert all columns to appropriate types
for col in df_clean.columns:
if df_clean[col].dtype == 'object':
# Convert to string and clean
df_clean[col] = df_clean[col].astype(str)
df_clean[col] = df_clean[col].replace('nan', None)
df_clean[col] = df_clean[col].replace('None', None)
df_clean[col] = df_clean[col].replace('', None)
# Test different formats
formats_to_test = []
# 1. Parquet with different compression
print(f"\n📊 Testing Parquet formats...")
parquet_formats = [
('parquet_snappy', 'snappy'),
('parquet_gzip', 'gzip'),
('parquet_brotli', 'brotli'),
('parquet_lz4', 'lz4')
]
for format_name, compression in parquet_formats:
try:
output_file = f"data/raw_data/afrobarometer_data.{format_name}.parquet"
start_time = time.time()
df_clean.to_parquet(output_file, index=False, compression=compression, engine='pyarrow')
save_time = time.time() - start_time
file_size = Path(output_file).stat().st_size
formats_to_test.append({
'format': format_name,
'file': output_file,
'size_mb': file_size / (1024*1024),
'save_time': save_time,
'compression_ratio': (sav_file.stat().st_size / file_size),
'type': 'parquet'
})
print(f" ✅ {format_name}: {file_size / (1024*1024):.1f} MB ({save_time:.2f}s)")
except Exception as e:
print(f" ❌ {format_name}: Failed - {e}")
# 2. CSV with compression
print(f"\n📊 Testing CSV formats...")
csv_formats = [
('csv_gzip', 'gzip'),
('csv_bz2', 'bz2'),
('csv_xz', 'xz')
]
for format_name, compression in csv_formats:
try:
output_file = f"data/raw_data/afrobarometer_data.{format_name}.csv"
start_time = time.time()
df_clean.to_csv(output_file, index=False, compression=compression)
save_time = time.time() - start_time
file_size = Path(output_file).stat().st_size
formats_to_test.append({
'format': format_name,
'file': output_file,
'size_mb': file_size / (1024*1024),
'save_time': save_time,
'compression_ratio': (sav_file.stat().st_size / file_size),
'type': 'csv'
})
print(f" ✅ {format_name}: {file_size / (1024*1024):.1f} MB ({save_time:.2f}s)")
except Exception as e:
print(f" ❌ {format_name}: Failed - {e}")
# 3. SQLite database
print(f"\n📊 Testing SQLite format...")
try:
output_file = "data/raw_data/afrobarometer_data.sqlite"
start_time = time.time()
# Create SQLite database
conn = sqlite3.connect(output_file)
df_clean.to_sql('afrobarometer_data', conn, if_exists='replace', index=False)
conn.close()
save_time = time.time() - start_time
file_size = Path(output_file).stat().st_size
formats_to_test.append({
'format': 'sqlite',
'file': output_file,
'size_mb': file_size / (1024*1024),
'save_time': save_time,
'compression_ratio': (sav_file.stat().st_size / file_size),
'type': 'sqlite'
})
print(f" ✅ sqlite: {file_size / (1024*1024):.1f} MB ({save_time:.2f}s)")
except Exception as e:
print(f" ❌ sqlite: Failed - {e}")
# Find the best format
if formats_to_test:
# Sort by compression ratio (higher is better)
best_format = max(formats_to_test, key=lambda x: x['compression_ratio'])
print(f"\n🏆 BEST FORMAT SELECTED:")
print(f" Format: {best_format['format']}")
print(f" File: {best_format['file']}")
print(f" Size: {best_format['size_mb']:.1f} MB")
print(f" Compression ratio: {best_format['compression_ratio']:.1f}x")
print(f" Size reduction: {((sav_file.stat().st_size - best_format['size_mb'] * 1024*1024) / sav_file.stat().st_size * 100):.1f}%")
# Create the final optimized file with correct extension
if best_format['type'] == 'parquet':
final_file = "data/raw_data/afrobarometer_data.parquet"
elif best_format['type'] == 'csv':
final_file = "data/raw_data/afrobarometer_data.csv"
elif best_format['type'] == 'sqlite':
final_file = "data/raw_data/afrobarometer_data.sqlite"
else:
final_file = "data/raw_data/afrobarometer_data.parquet"
if best_format['file'] != final_file:
Path(best_format['file']).rename(final_file)
print(f" ✅ Renamed to: {final_file}")
# Save metadata
metadata_file = "data/raw_data/afrobarometer_metadata.json"
metadata_dict = {
'var_labels': meta.column_labels if hasattr(meta, 'column_labels') else {},
'value_labels': meta.variable_value_labels if hasattr(meta, 'variable_value_labels') else {},
'file_encoding': meta.file_encoding if hasattr(meta, 'file_encoding') else 'unknown',
'file_format_version': meta.file_format_version if hasattr(meta, 'file_format_version') else 'unknown',
'creation_time': meta.creation_time if hasattr(meta, 'creation_time') else 'unknown',
'modification_time': meta.modification_time if hasattr(meta, 'modification_time') else 'unknown',
'variable_count': len(df.columns),
'case_count': len(df),
'data_format': best_format['format'],
'compression_ratio': best_format['compression_ratio']
}
with open(metadata_file, 'w', encoding='utf-8') as f:
json.dump(metadata_dict, f, indent=2, ensure_ascii=False, default=str)
print(f" ✅ Metadata saved to: {metadata_file}")
# Clean up other test files
print(f"\n🧹 Cleaning up test files...")
for format_info in formats_to_test:
if format_info['file'] != final_file and Path(format_info['file']).exists():
Path(format_info['file']).unlink()
print(f" 🗑️ Removed: {format_info['file']}")
print(f"\n🎉 Conversion completed successfully!")
print(f"✅ Original .sav: {sav_file.stat().st_size / (1024*1024):.1f} MB")
print(f"✅ Optimized file: {best_format['size_mb']:.1f} MB")
print(f"✅ Size reduction: {((sav_file.stat().st_size - best_format['size_mb'] * 1024*1024) / sav_file.stat().st_size * 100):.1f}%")
print(f"✅ Data preserved: {df.shape[0]:,} rows × {df.shape[1]} columns")
return True
else:
print(f"\n❌ No formats could be created successfully!")
return False
except Exception as e:
print(f"❌ Error during conversion: {e}")
return False
def test_optimized_file():
"""Test that the optimized file can be loaded correctly."""
metadata_file = Path("data/raw_data/afrobarometer_metadata.json")
print(f"\n🧪 Testing optimized file...")
try:
# Load metadata to determine file type
with open(metadata_file, 'r', encoding='utf-8') as f:
metadata = json.load(f)
data_format = metadata.get('data_format', 'unknown')
# Determine the file path based on format
if 'parquet' in data_format:
data_file = Path("data/raw_data/afrobarometer_data.parquet")
elif 'csv' in data_format:
data_file = Path("data/raw_data/afrobarometer_data.csv")
elif 'sqlite' in data_format:
data_file = Path("data/raw_data/afrobarometer_data.sqlite")
else:
data_file = Path("data/raw_data/afrobarometer_data.parquet")
# Test loading based on format
start_time = time.time()
if 'parquet' in data_format:
df = pd.read_parquet(data_file)
elif 'csv' in data_format:
# Determine compression from format name
if 'gzip' in data_format:
df = pd.read_csv(data_file, compression='gzip')
elif 'bz2' in data_format:
df = pd.read_csv(data_file, compression='bz2')
elif 'xz' in data_format:
df = pd.read_csv(data_file, compression='xz')
else:
df = pd.read_csv(data_file)
elif 'sqlite' in data_format:
conn = sqlite3.connect(data_file)
df = pd.read_sql_query("SELECT * FROM afrobarometer_data", conn)
conn.close()
else:
df = pd.read_parquet(data_file)
load_time = time.time() - start_time
print(f"✅ Optimized file loading successful!")
print(f" Shape: {df.shape}")
print(f" Load time: {load_time:.2f} seconds")
print(f" Variable labels: {len(metadata.get('var_labels', {}))}")
print(f" Value labels: {len(metadata.get('value_labels', {}))}")
print(f" Data format: {data_format}")
return True
except Exception as e:
print(f"❌ Error testing optimized file: {e}")
return False
if __name__ == "__main__":
print("🚀 Afrobarometer Data Converter")
print("=" * 50)
# Convert the file
success = convert_sav_to_efficient_format()
if success:
# Test the conversion
test_success = test_optimized_file()
if test_success:
print(f"\n🎉 All done! The data has been optimized for GitHub upload.")
print(f"💡 You can now exclude the .sav file from Git and use the optimized file.")
else:
print(f"\n⚠️ Conversion completed but testing failed. Please check the files.")
else:
print(f"\n❌ Conversion failed. Please check the error messages above.")
sys.exit(1)