From 263d123a35532690dccbcb2e0cb4514b38d4a41a Mon Sep 17 00:00:00 2001 From: jasperschroeder Date: Sun, 16 Nov 2025 16:49:51 +0100 Subject: [PATCH 1/4] Refactor GitHub Actions workflow to use micromamba for environment setup and streamline unit test execution --- .github/workflows/unittests.yml | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml index df1fb46..8315533 100644 --- a/.github/workflows/unittests.yml +++ b/.github/workflows/unittests.yml @@ -18,23 +18,18 @@ jobs: runs-on: ubuntu-latest steps: - # Check out repository - name: Checkout repository uses: actions/checkout@v3 - # Set up Python - - name: Set up Python 3.10 - uses: actions/setup-python@v4 + - name: Set up micromamba + environment + uses: mamba-org/setup-micromamba@v2 with: - python-version: '3.10' - - # Install dependencies - - name: Install dependencies - run: | - # $CONDA is an environment variable pointing to the root of the miniconda directory - $CONDA/bin/conda env update --file tools/code/rdl-tools.yml --name base - - - name: Test with pytest - run: | - conda install pytest --solver=classic - $CONDA/bin/pytest + environment-file: tools/code/rdl-tools.yml + cache-environment: true + create-args: >- + python=3.10 + init-shell: bash + + - name: Run unit tests + run: | + pytest From 2302945ee2d6667ec2cfd8d01f309b29fe360cf8 Mon Sep 17 00:00:00 2001 From: jasperschroeder Date: Sun, 16 Nov 2025 16:50:19 +0100 Subject: [PATCH 2/4] Refactor GitHub Actions workflow to enhance unit test execution with micromamba --- .github/workflows/unittests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unittests.yml b/.github/workflows/unittests.yml index 8315533..6b67a6d 100644 --- a/.github/workflows/unittests.yml +++ b/.github/workflows/unittests.yml @@ -32,4 +32,4 @@ jobs: - name: Run unit tests run: | - pytest + micromamba run -n rdl-tools pytest From 6510a2f357d67ce4ae622eb8e58424d257065729 Mon Sep 17 00:00:00 2001 From: jasperschroeder Date: Sun, 16 Nov 2025 17:01:58 +0100 Subject: [PATCH 3/4] Add pytest to dependencies for improved testing support --- tools/code/rdl-tools.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/code/rdl-tools.yml b/tools/code/rdl-tools.yml index 27a4760..5528294 100644 --- a/tools/code/rdl-tools.yml +++ b/tools/code/rdl-tools.yml @@ -47,6 +47,9 @@ dependencies: # File formats and I/O - openpyxl - python-dotenv + + # Testing + - pytest # Progress bars and utilities - tqdm From 2880cefac34e1e7b7de07ad0641436de05c313a7 Mon Sep 17 00:00:00 2001 From: jasperschroeder Date: Sun, 16 Nov 2025 17:58:41 +0100 Subject: [PATCH 4/4] Remove unused tkinter imports in gui_f3_utils.py to clean up the codebase. --- tools/code/common.py | 4 +- tools/code/custom_hazard_analysis.py | 243 ++++++------- tools/code/damageFunctions.py | 24 +- tools/code/gui_bivariate_utils.py | 509 ++++++++++++++------------- tools/code/gui_f3_utils.py | 2 - 5 files changed, 387 insertions(+), 395 deletions(-) diff --git a/tools/code/common.py b/tools/code/common.py index 52d51c5..30b4375 100644 --- a/tools/code/common.py +++ b/tools/code/common.py @@ -4,9 +4,9 @@ from dotenv import dotenv_values, find_dotenv config = dotenv_values(find_dotenv()) -DATA_DIR = config["DATA_DIR"] +DATA_DIR = config["DATA_DIR"] OUTPUT_DIR = config["OUTPUT_DIR"] -CACHE_DIR = config["CACHE_DIR"] +CACHE_DIR = config["CACHE_DIR"] # Ensure output and cache dirs exist os.makedirs(OUTPUT_DIR, exist_ok=True) diff --git a/tools/code/custom_hazard_analysis.py b/tools/code/custom_hazard_analysis.py index 95827f1..5d65020 100644 --- a/tools/code/custom_hazard_analysis.py +++ b/tools/code/custom_hazard_analysis.py @@ -1,27 +1,20 @@ import os import gc -import warnings import numpy as np import pandas as pd import geopandas as gpd -import folium -from branca.colormap import LinearColormap import rasterio from rasterio.warp import Resampling, calculate_default_transform, reproject -import rioxarray as rxr -from rasterstats import gen_zonal_stats, zonal_stats -import concurrent.futures +from rasterstats import zonal_stats from functools import partial import multiprocess as mp -from pathlib import Path import tempfile # Importing internal libraries import common import input_utils from runAnalysis import ( - calc_EAEI, result_df_reorder_columns, merge_dfs, - chunks, zonal_stats_parallel + calc_EAEI, result_df_reorder_columns ) DATA_DIR = common.DATA_DIR @@ -30,6 +23,7 @@ # Cache for reprojected rasters to avoid redundant calculations RASTER_CACHE = {} + def run_analysis_with_custom_hazard( country, haz_type, haz_cat, period, scenario, return_periods, min_haz_threshold, @@ -75,7 +69,7 @@ def run_analysis_with_custom_hazard( # Create a custom hazard folder if needed custom_haz_folder = os.path.join(DATA_DIR, "HZD", "CUSTOM", haz_cat) os.makedirs(custom_haz_folder, exist_ok=True) - + # Define folder for exposure data exp_folder = f"{DATA_DIR}/EXP" @@ -119,7 +113,7 @@ def run_analysis_with_custom_hazard( # IMPROVEMENT #1: Load and preprocess exposure data once print(f"Processing exposure data for {exp_cat}") exp_ras, _ = process_exposure_data(country, haz_type, exp_cat, exp_nam, exp_year, exp_folder) - + # Load exposure as a numpy array with proper masking to avoid I/O operations later with rasterio.open(exp_ras) as src: exp_meta = src.meta @@ -127,14 +121,14 @@ def run_analysis_with_custom_hazard( exp_crs = src.crs exp_data_array = src.read(1) exp_nodata = src.nodata - + # Properly mask nodata values if exp_nodata is not None: exp_data_array = np.where(exp_data_array == exp_nodata, np.nan, exp_data_array) - + # Ensure negative values are set to 0 exp_data_array = np.where(exp_data_array < 0, 0, exp_data_array) - + # Store the exposure metadata for reuse exp_metadata = { 'transform': exp_transform, @@ -148,16 +142,16 @@ def run_analysis_with_custom_hazard( print("Creating memory-mapped exposure data for shared access...") with tempfile.NamedTemporaryFile(suffix='.npy', delete=False) as tmp: exp_memmap_path = tmp.name - + # Save exposure array to memmap for multiprocessing exp_memmap = np.memmap(exp_memmap_path, dtype='float32', mode='w+', shape=exp_data_array.shape) exp_memmap[:] = exp_data_array[:] exp_memmap.flush() - + # Calculate total exposure per admin area once print(f"Calculating total {exp_cat} exposure using {zonal_stats_type}...") zones_geojson = [feature['geometry'] for feature in adm_data.__geo_interface__['features']] - + # IMPROVEMENT #7: Optimize zonal_stats parameters zonal_stats_params = { 'stats': zonal_stats_type, @@ -206,29 +200,26 @@ def run_analysis_with_custom_hazard( 'prob_RPs_UB':prob_RPs_UB, 'prob_RPs_Mean':prob_RPs_Mean}) prob_RPs_df.to_csv(os.path.join(OUTPUT_DIR, f"{country}_{haz_cat}_prob_RPs.csv"), index=False) - + # IMPROVEMENT #1: Preprocess all hazard rasters ahead of time - preprocessed_hazards = {} - print("Pre-processing hazard rasters to match exposure grid...") - valid_rp_files = [] for rp in return_periods: if rp in hazard_files and os.path.exists(hazard_files[rp]): valid_rp_files.append((rp, hazard_files[rp])) else: print(f"Warning: No valid hazard file found for RP {rp}") - + if not valid_rp_files: raise ValueError("No valid hazard files found for any return period") - + # Set up parameters for multiprocessing n_valid_RPs_gt_1 = len(valid_rp_files) > 1 cores = min(len(valid_rp_files), mp.cpu_count()) if n_cores is None else n_cores print(f"Using {cores} cores for parallel processing") - + # Process all return periods in parallel with optimized approach print(f"Processing {len(valid_rp_files)} return periods...") - + # Prepare parameters dictionary for the workers params = { "analysis_type": analysis_type, @@ -246,60 +237,60 @@ def run_analysis_with_custom_hazard( "exp_metadata": exp_metadata, "zones_geojson": zones_geojson } - + # Use multiprocessing to process each return period if len(valid_rp_files) > 1: with mp.Pool(cores) as pool: func = partial(process_return_period_optimized, **params) result_dfs = pool.map(func, valid_rp_files) - + # Filter out None results result_dfs = [df for df in result_dfs if df is not None] else: # Process single return period directly result_dfs = [process_return_period_optimized(valid_rp_files[0], **params)] - + # Clean up memory-mapped file safe_delete_file(exp_memmap_path) - + # Combine results if result_dfs and all(df is not None for df in result_dfs): # Concatenate the return period results to_concat = [result_df] + result_dfs result_df = pd.concat(to_concat, axis=1) result_df = result_df.replace(np.nan, 0) - + # Reorder columns and calculate expected annual impact/exposure result_df = result_df_reorder_columns( result_df, return_periods, analysis_type, exp_cat, adm_level, all_adm_codes, all_adm_names ) - + # Calculate EAI/EAE if multiple return periods if n_valid_RPs_gt_1: result_df = calc_EAEI( result_df, return_periods, prob_RPs_df, 'LB', analysis_type, exp_cat, adm_level, num_bins, n_valid_RPs_gt_1 ) - + result_df = calc_EAEI( result_df, return_periods, prob_RPs_df, 'UB', analysis_type, exp_cat, adm_level, num_bins, n_valid_RPs_gt_1 ) - + result_df = calc_EAEI( result_df, return_periods, prob_RPs_df, 'Mean', analysis_type, exp_cat, adm_level, num_bins, n_valid_RPs_gt_1 ) - + # Round values for better presentation result_df = result_df.round(3) - + # Simplify column names if needed replace_string = '_Mean' if n_valid_RPs_gt_1 else 'RP1' result_df_colnames = [s.replace(replace_string, '') for s in result_df.columns] result_df.columns = result_df_colnames - + print(f"Analysis completed successfully for {len(result_dfs)} return periods") return result_df else: @@ -317,40 +308,41 @@ def run_analysis_with_custom_hazard( # Run garbage collection gc.collect() + def preprocess_hazard_raster(hazard_file, exp_metadata, memmap_dir=None): """ Preprocess hazard raster to match exposure grid - returns a path to the reprojected raster - + IMPROVEMENT #1: This function reprojests hazard data once and caches it """ # Check if we already processed this file global RASTER_CACHE if hazard_file in RASTER_CACHE: return RASTER_CACHE[hazard_file] - + # Create a temporary directory if not provided if memmap_dir is None: memmap_dir = tempfile.mkdtemp() - + output_path = os.path.join(memmap_dir, f"reprojected_{os.path.basename(hazard_file)}") - + # If already processed, return the path if os.path.exists(output_path): RASTER_CACHE[hazard_file] = output_path return output_path - + try: # Read source hazard raster with rasterio.open(hazard_file) as src: # Calculate reprojection parameters - transform, width, height = calculate_default_transform( + _, _, _ = calculate_default_transform( src.crs, exp_metadata['crs'], src.width, src.height, *src.bounds, dst_width=exp_metadata['shape'][1], dst_height=exp_metadata['shape'][0] ) - + # Update metadata for output output_meta = src.meta.copy() output_meta.update({ @@ -360,7 +352,7 @@ def preprocess_hazard_raster(hazard_file, exp_metadata, memmap_dir=None): 'height': exp_metadata['shape'][0], 'nodata': src.nodata }) - + # Create output raster with rasterio.open(output_path, 'w', **output_meta) as dst: # Reproject and save @@ -373,26 +365,27 @@ def preprocess_hazard_raster(hazard_file, exp_metadata, memmap_dir=None): dst_crs=exp_metadata['crs'], resampling=Resampling.bilinear ) - + # Cache the path RASTER_CACHE[hazard_file] = output_path return output_path - + except Exception as e: print(f"Error preprocessing hazard raster: {str(e)}") raise + def process_return_period_optimized(rp_file_tuple, **kwargs): """ Process a single return period with optimized approach - + IMPROVEMENT #5: Reduced I/O operations with better memory management """ rp, hazard_file = rp_file_tuple - + try: print(f"Processing return period {rp}...") - + # Unpack required parameters analysis_type = kwargs.get('analysis_type') exp_cat = kwargs.get('exp_cat') @@ -408,52 +401,48 @@ def process_return_period_optimized(rp_file_tuple, **kwargs): exp_memmap_path = kwargs.get('exp_memmap_path') exp_metadata = kwargs.get('exp_metadata') zones_geojson = kwargs.get('zones_geojson') - + # Load exposure data from memmap exp_shape = exp_metadata['shape'] exp_array = np.memmap(exp_memmap_path, dtype='float32', mode='r', shape=exp_shape) - + # Create a tempdir for this process process_temp_dir = tempfile.mkdtemp() - + # IMPROVEMENT #1: Preprocess hazard raster to match exposure grid reprojected_hazard = preprocess_hazard_raster(hazard_file, exp_metadata, process_temp_dir) - + # Load hazard data once with rasterio.open(reprojected_hazard) as src: haz_array = src.read(1) - transform = src.transform + _ = src.transform haz_nodata = src.nodata - + # Apply threshold to hazard data if haz_nodata is not None: haz_array = np.where(haz_array == haz_nodata, np.nan, haz_array) - + # Apply minimum threshold haz_array = np.where(haz_array <= min_haz_threshold, np.nan, haz_array) - + # Create result dataframe result_df = pd.DataFrame(index=adm_data.index) - + # Identify affected exposure (where hazard > 0 and not nan) valid_mask = ~np.isnan(haz_array) affected_exp = np.copy(exp_array) affected_exp[~valid_mask] = np.nan - + # Save affected exposure if requested if save_check_raster: affected_exp_path = os.path.join(process_temp_dir, f"affected_exp_rp{rp}.tif") - with rasterio.open(affected_exp_path, 'w', - driver='GTiff', - height=exp_shape[0], - width=exp_shape[1], - count=1, - dtype='float32', - crs=exp_metadata['crs'], - transform=exp_metadata['transform'], - nodata=np.nan) as dst: + with rasterio.open( + affected_exp_path, 'w', driver='GTiff', height=exp_shape[0], width=exp_shape[1], + count=1, dtype='float32', crs=exp_metadata['crs'], + transform=exp_metadata['transform'], nodata=np.nan + ) as dst: dst.write(affected_exp, 1) - + # Process differently based on analysis approach if analysis_type == "Classes": # For classes approach @@ -466,29 +455,25 @@ def process_return_period_optimized(rp_file_tuple, **kwargs): else: bin_mask = (haz_array >= threshold) & valid_mask bin_idx[bin_mask] = i - + # For each class, calculate exposure for bin_x in reversed(range(num_bins)): # Create a mask for this class class_mask = (bin_idx == bin_x) - + # Apply the mask to the affected exposure class_exp = np.copy(affected_exp) class_exp[~class_mask] = np.nan - + # Calculate zonal statistics for this class class_path = os.path.join(process_temp_dir, f"class_{bin_x}.tif") - with rasterio.open(class_path, 'w', - driver='GTiff', - height=exp_shape[0], - width=exp_shape[1], - count=1, - dtype='float32', - crs=exp_metadata['crs'], - transform=exp_metadata['transform'], - nodata=np.nan) as dst: + with rasterio.open( + class_path, 'w', driver='GTiff', height=exp_shape[0], width=exp_shape[1], + count=1, dtype='float32', crs=exp_metadata['crs'], + transform=exp_metadata['transform'], nodata=np.nan + ) as dst: dst.write(class_exp, 1) - + # IMPROVEMENT #7: Optimized zonal_stats zonal_result = zonal_stats( zones_geojson, @@ -501,34 +486,30 @@ def process_return_period_optimized(rp_file_tuple, **kwargs): boundary_only=False, boundless=True ) - + # Add the results to the dataframe result_df[f"RP{rp}_{exp_cat}_C{bin_x}_exp"] = [x.get(zonal_stats_type, 0) for x in zonal_result] - + # Delete temporary class file os.remove(class_path) - + # Calculate cumulative exposure for classes for bin_x in reversed(range(num_bins-1)): result_df[f"RP{rp}_{exp_cat}_C{bin_x}_exp"] = ( result_df[f"RP{rp}_{exp_cat}_C{bin_x}_exp"] + result_df[f"RP{rp}_{exp_cat}_C{bin_x+1}_exp"] ) - + else: # Function approach # Calculate affected exposure per admin area affected_path = os.path.join(process_temp_dir, f"affected_exp_rp{rp}_total.tif") - with rasterio.open(affected_path, 'w', - driver='GTiff', - height=exp_shape[0], - width=exp_shape[1], - count=1, - dtype='float32', - crs=exp_metadata['crs'], - transform=exp_metadata['transform'], - nodata=np.nan) as dst: + with rasterio.open( + affected_path, 'w', driver='GTiff', height=exp_shape[0], width=exp_shape[1], + count=1, dtype='float32', crs=exp_metadata['crs'], transform=exp_metadata['transform'], + nodata=np.nan + ) as dst: dst.write(affected_exp, 1) - + affected_stats = zonal_stats( zones_geojson, affected_path, @@ -537,54 +518,46 @@ def process_return_period_optimized(rp_file_tuple, **kwargs): nodata=np.nan, boundless=True ) - + result_df[f"RP{rp}_{exp_cat}_exp"] = [x.get(zonal_stats_type, 0) for x in affected_stats] - + # IMPROVEMENT #2: Vectorized damage function application try: # Create a copy of hazard array for damage calculation damage_values = np.zeros_like(haz_array) - + # Apply the damage function only to valid pixels # This is a fully vectorized operation that applies custom_damage_func to all elements at once if valid_mask.any(): # Apply custom damage function to get damage factors # wb_region is passed as a scalar to all elements damage_values[valid_mask] = custom_damage_func(haz_array[valid_mask], wb_region) - + # Ensure damage values are between 0 and 1 damage_values = np.clip(damage_values, 0, 1) - + # Calculate impact (exposure * damage factor) impact_values = affected_exp * damage_values - + # Save impact raster if requested if save_check_raster: impact_path = os.path.join(OUTPUT_DIR, f"{country}_CUSTOM_{rp}_{exp_cat}_impact.tif") - with rasterio.open(impact_path, 'w', - driver='GTiff', - height=exp_shape[0], - width=exp_shape[1], - count=1, - dtype='float32', - crs=exp_metadata['crs'], - transform=exp_metadata['transform'], - nodata=np.nan) as dst: + with rasterio.open( + impact_path, 'w', driver='GTiff', height=exp_shape[0], width=exp_shape[1], + count=1, dtype='float32', crs=exp_metadata['crs'], transform=exp_metadata['transform'], + nodata=np.nan + ) as dst: dst.write(impact_values, 1) - + # Write impact to temporary file for zonal stats impact_path = os.path.join(process_temp_dir, f"impact_rp{rp}.tif") - with rasterio.open(impact_path, 'w', - driver='GTiff', - height=exp_shape[0], - width=exp_shape[1], - count=1, - dtype='float32', - crs=exp_metadata['crs'], - transform=exp_metadata['transform'], - nodata=np.nan) as dst: + with rasterio.open( + impact_path, 'w', driver='GTiff', height=exp_shape[0], width=exp_shape[1], + count=1, dtype='float32', crs=exp_metadata['crs'], transform=exp_metadata['transform'], + nodata=np.nan + ) as dst: dst.write(impact_values, 1) - + # Calculate zonal statistics for impact impact_stats = zonal_stats( zones_geojson, @@ -594,17 +567,17 @@ def process_return_period_optimized(rp_file_tuple, **kwargs): nodata=np.nan, boundless=True ) - + result_df[f"RP{rp}_{exp_cat}_imp"] = [x.get(zonal_stats_type, 0) for x in impact_stats] - + # Delete temporary impact file os.remove(impact_path) - + except Exception as e: print(f"Error applying damage function for RP {rp}: {str(e)}") # Add empty columns as fallback result_df[f"RP{rp}_{exp_cat}_imp"] = [0] * len(adm_data) - + # Clean up tempdir after processing try: for file in os.listdir(process_temp_dir): @@ -615,15 +588,16 @@ def process_return_period_optimized(rp_file_tuple, **kwargs): os.rmdir(process_temp_dir) except: pass - + return result_df - + except Exception as e: print(f"Error processing return period {rp}: {str(e)}") import traceback traceback.print_exc() return None + def process_exposure_data(country, haz_type, exp_cat, exp_nam, exp_year, exp_folder): """Process exposure data - adapted from runAnalysis.py""" try: @@ -632,7 +606,7 @@ def process_exposure_data(country, haz_type, exp_cat, exp_nam, exp_year, exp_fol exp_ras = f"{exp_folder}/{exp_nam}.tif" if not os.path.exists(exp_ras): raise FileNotFoundError(f"Custom exposure data not found: {exp_ras}") - + else: # Use default exposure data based on exp_cat if exp_cat == 'POP': @@ -659,7 +633,7 @@ def process_exposure_data(country, haz_type, exp_cat, exp_nam, exp_year, exp_fol raise FileNotFoundError(f"Failed to fetch agricultural data for {country}") else: raise ValueError(f"Missing or unknown exposure category: {exp_cat}") - + if not os.path.exists(exp_ras): raise FileNotFoundError(f"Exposure raster not found after processing: {exp_ras}") @@ -672,21 +646,22 @@ def process_exposure_data(country, haz_type, exp_cat, exp_nam, exp_year, exp_fol print(f"Error in process_exposure_data: {str(e)}") raise + # Add this function to custom_hazard_analysis.py def safe_delete_file(file_path): """ Safely delete a file, handling locked files gracefully. - + Args: file_path (str): Path to the file to delete """ import os import gc import time - + # Run garbage collection to release any lingering references gc.collect() - + # Try to delete the file, with a few retries max_attempts = 3 for attempt in range(max_attempts): @@ -702,4 +677,4 @@ def safe_delete_file(file_path): else: # On last attempt, just warn instead of failing print(f"Warning: Could not delete temporary file {file_path}: {str(e)}") - print("This is not critical, but you may want to clean up temporary files manually later.") \ No newline at end of file + print("This is not critical, but you may want to clean up temporary files manually later.") diff --git a/tools/code/damageFunctions.py b/tools/code/damageFunctions.py index b892895..ff8d69b 100644 --- a/tools/code/damageFunctions.py +++ b/tools/code/damageFunctions.py @@ -4,6 +4,7 @@ # Defining the damage functions + # Floods (river and coastal) over Population mortality def FL_mortality_factor(x: np.array, wb_region: str = None): """A polynomial fit to average population mortality due to nearby flooding. @@ -16,14 +17,14 @@ def FL_mortality_factor(x: np.array, wb_region: str = None): x = x/100 # convert cm to m return np.maximum(0.0, np.minimum(1.0, 0.985 / (1 + np.exp(6.32 - 1.412 * x)))) # Floods - Global -# Floods (river and coastal) over Built-Up areas +# Floods (river and coastal) over Built-Up areas def FL_damage_factor_builtup(x: np.array, wb_region: str): """A polynomial fit to average damage across builtup land cover relative to water depth in meters. The sectors are commercial, industry, transport, infrastructure and residential. Values are capped between 0 and 1 - + References ---------- Huizinga et al., 2017 - Global flood depth-damage functions: Methodology and the database. EU-JRC. @@ -39,14 +40,13 @@ def FL_damage_factor_builtup(x: np.array, wb_region: str): # CRITICAL: Map wb_region to the actual region key first region = wb_to_region.get(wb_region, 'GLOBAL') damage_func = function_mapping.get(region) - + # FIXED: Actually call the function instead of returning the function object result = damage_func(x) return result.astype(np.float32) # Floods (river and coastal) impact function over Agricultural areas - def FL_damage_factor_agri(x: np.array, wb_region: str): """A polynomial fit to average damage across agricultural land cover relative to water depth in meters. Values are capped between 0 and 1. @@ -66,8 +66,8 @@ def FL_damage_factor_agri(x: np.array, wb_region: str): region = wb_to_region.get(wb_region, 'GLOBAL') return function_mapping.get(region) -# Tropical Cyclone - Regional equations +# Tropical Cyclone - Regional equations def TC_damage_factor_builtup(x: np.array, country_iso3: str): """Calculate damage factor for tropical cyclone wind impact on built-up areas based on region-specific vulnerability curves. @@ -77,13 +77,13 @@ def TC_damage_factor_builtup(x: np.array, country_iso3: str): Wind speed in meters per second country_iso3 : str ISO3 country code to determine regional vulnerability curve - + Returns ------- np.array Damage factor between 0 and 1 - - Asigmoidal function is applied in the calibration process, based on the general impact function by Emanuel (2011). + + Asigmoidal function is applied in the calibration process, based on the general impact function by Emanuel (2011). While Vhalf is fitted during the calibration process, the lower threshold Vthresh is kept constant throughout the study. References @@ -94,7 +94,7 @@ def TC_damage_factor_builtup(x: np.array, country_iso3: str): # Get region from country code, default to GLOBAL if not found region = tc_region_mapping.get(country_iso3, 'GLOBAL') - + # Regional v_half values (wind speed at which 50% damage occurs) v_half = { 'NA1': 59.6, # Caribbean and Mexico @@ -108,13 +108,13 @@ def TC_damage_factor_builtup(x: np.array, country_iso3: str): 'WP4': 183.7, # North West Pacific 'GLOBAL': 83.7 # Global average } - + # Get Vhalf value for specified region Vhalf = v_half.get(region) # Global threshold value Vthres = 25.7 # m/s, below which no damage occurs - + # Calculate damage factor v = np.maximum(0.0, (x - Vthres))/(Vhalf - Vthres) - return (v**3)/(1 + (v**3)) \ No newline at end of file + return (v**3)/(1 + (v**3)) diff --git a/tools/code/gui_bivariate_utils.py b/tools/code/gui_bivariate_utils.py index 17f6c8f..6f29c38 100644 --- a/tools/code/gui_bivariate_utils.py +++ b/tools/code/gui_bivariate_utils.py @@ -9,7 +9,6 @@ import ipywidgets as widgets from IPython.display import display, clear_output, HTML import tkinter as tk -from tkinter import filedialog import warnings import common @@ -56,6 +55,7 @@ layer_selector_id = f'layer-selector-{id(layer_selector)}' layer_selector.add_class(layer_selector_id) + #TODO: Merge this with run_input_check class from notebook_utils.py once it is implemented as a class def validate_input( id_field_selector, @@ -74,9 +74,8 @@ def validate_input( ): print("Ensure all fields are selected.") return False - - return True + return True # Field selection dropdowns @@ -225,6 +224,7 @@ def validate_input( button_style='danger' ) + # Function to select file def select_file(b): root = tk.Tk() @@ -240,12 +240,13 @@ def select_file(b): update_layer_options(file_path) root.destroy() + # Function to update layer options based on selected file def update_layer_options(file_path): layer_selector.options = [] layer_selector.value = None layer_selector.disabled = True - + with output: clear_output() try: @@ -254,12 +255,12 @@ def update_layer_options(file_path): import fiona layers = fiona.listlayers(file_path) print(f"Available layers: {', '.join(layers)}") - + if layers: layer_selector.options = layers layer_selector.value = layers[0] layer_selector.disabled = False - + # Load the first layer to display preview update_field_selectors(file_path, layers[0]) else: @@ -274,6 +275,7 @@ def update_layer_options(file_path): except Exception as e2: print(f"Also failed to read as shapefile: {str(e2)}") + # Function to update field selector options when layer is selected def update_field_selectors(file_path, layer_name): id_field_selector.options = [] @@ -281,13 +283,13 @@ def update_field_selectors(file_path, layer_name): population_field_selector.options = [] wealth_field_selector.options = [] hazard_field_selector.options = [] - + id_field_selector.disabled = True name_field_selector.disabled = True population_field_selector.disabled = True wealth_field_selector.disabled = True hazard_field_selector.disabled = True - + with output: try: print(f"Loading layer: {layer_name}") @@ -297,28 +299,29 @@ def update_field_selectors(file_path, layer_name): except Exception as e: print(f"Error loading layer: {str(e)}") + # Function to update field options from loaded GeoDataFrame def update_field_options_from_gdf(gdf): # Get all columns columns = list(gdf.columns) columns_without_geometry = [col for col in columns if col.lower() != 'geometry'] - + print(f"Available fields: {', '.join(columns_without_geometry)}") - + # Update field selectors id_field_selector.options = columns_without_geometry name_field_selector.options = columns_without_geometry population_field_selector.options = columns_without_geometry wealth_field_selector.options = columns_without_geometry hazard_field_selector.options = columns_without_geometry - + # Try to guess appropriate fields based on common naming patterns id_fields = [f for f in columns_without_geometry if any(id_name in f.lower() for id_name in ['id', 'code', 'fid', 'hasc', 'key'])] name_fields = [f for f in columns_without_geometry if any(name in f.lower() for name in ['name', 'nam', 'title', 'label'])] pop_fields = [f for f in columns_without_geometry if any(pop in f.lower() for pop in ['pop', 'population', 'people'])] wealth_fields = [f for f in columns_without_geometry if any(wealth in f.lower() for wealth in ['rwi', 'wealth', 'income', 'gdp', 'poverty'])] hazard_fields = [f for f in columns_without_geometry if any(hazard in f.lower() for hazard in ['fl', 'flood', 'hazard', 'risk', 'tc', 'storm', 'eai', 'damage'])] - + # Set default values if matches found if id_fields: id_field_selector.value = id_fields[0] @@ -330,23 +333,24 @@ def update_field_options_from_gdf(gdf): wealth_field_selector.value = wealth_fields[0] if hazard_fields: hazard_field_selector.value = hazard_fields[0] - + # Enable selectors id_field_selector.disabled = False name_field_selector.disabled = False population_field_selector.disabled = False wealth_field_selector.disabled = False hazard_field_selector.disabled = False - + # Show map preview update_preview_map(gdf) + # Function to update preview map def update_preview_map(gdf): try: # Create a simple folium map with the boundaries m = folium.Map() - + # Add boundary layer with proper transformation folium.GeoJson( gdf, @@ -357,7 +361,7 @@ def update_preview_map(gdf): 'fillOpacity': 0 } ).add_to(m) - + # Fit map to bounds m.fit_bounds(m.get_bounds()) map_widget.value = m._repr_html_() @@ -365,27 +369,28 @@ def update_preview_map(gdf): with output: print(f"Error updating preview map: {str(e)}") + # Function to calculate population density-weighted RWI def calculate_weighted_rwi(gdf, pop_field, rwi_field): # Create a copy to avoid modifying the original result_gdf = gdf.copy() - + # Check CRS and reproject to a projected CRS if needed if result_gdf.crs is None: print("Warning: CRS is not defined. Assuming EPSG:4326 for area calculation.") result_gdf.set_crs(epsg=4326, inplace=True) - + # If using a geographic CRS (like WGS84/EPSG:4326), reproject to a suitable projected CRS if result_gdf.crs.is_geographic: print("Converting to projected CRS for accurate area calculation...") # Get centroid to determine appropriate UTM zone centroid = result_gdf.unary_union.centroid lon, lat = centroid.x, centroid.y - + # Calculate UTM zone utm_zone = int(1 + (lon + 180) // 6) epsg = 32600 + utm_zone + (0 if lat >= 0 else 100) # North vs South hemisphere - + # Reproject for area calculation area_gdf = result_gdf.to_crs(epsg=epsg) # Calculate area in square kilometers @@ -393,33 +398,33 @@ def calculate_weighted_rwi(gdf, pop_field, rwi_field): else: # Already in projected CRS result_gdf['area_km2'] = result_gdf.geometry.area / 10**6 # Convert from m² to km² - + # Calculate population density (people per km²) result_gdf['pop_density'] = result_gdf[pop_field] / result_gdf['area_km2'] - + # Calculate population density-weighted RWI result_gdf['rwi_x_popdens'] = result_gdf[rwi_field] * result_gdf['pop_density'] total_weighted_rwi = result_gdf['rwi_x_popdens'].sum() - + # Calculate the normalized weighted RWI result_gdf['w_RWIxPOP'] = result_gdf['rwi_x_popdens'] / total_weighted_rwi - + # Scale to 0-100 for easier interpretation min_val = result_gdf['w_RWIxPOP'].min() max_val = result_gdf['w_RWIxPOP'].max() result_gdf['w_RWIxPOP_scaled'] = 100 * (result_gdf['w_RWIxPOP'] - min_val) / (max_val - min_val) - + return result_gdf # Function to create quantile classifications # Here's a fixed version of the classify_data function: -# Here's a completely revised approach to fix the quantile mapping problem: +# Here's a completely revised approach to fix the quantile mapping problem: def classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_for_classification=None): """ Classify data into quantiles for bivariate mapping. - + Parameters: ----------- gdf : GeoDataFrame @@ -432,29 +437,29 @@ def classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_f Number of quantiles to create wealth_field_for_classification : str, optional Field to use for wealth classification, if different from wealth_field - + Returns: -------- result_gdf : GeoDataFrame Copy of input geodataframe with added classification fields """ - + # Create a copy to avoid modifying the original result_gdf = gdf.copy() - + # Determine which field to use for wealth classification if wealth_field_for_classification is None: wealth_field_for_classification = wealth_field - + # Get wealth values and check for pre-binned data wealth_values = sorted(result_gdf[wealth_field_for_classification].unique()) unique_wealth_values = len(wealth_values) - + print(f"Unique wealth values: {unique_wealth_values}") - + # Flag for whether to use pre-binned direct mapping approach use_direct_mapping = False - + # Check if data appears to be pre-binned (small number of discrete values) if unique_wealth_values <= 10: # Reasonable limit for pre-binned data # Check if values are all integers or integer-like floats @@ -462,14 +467,14 @@ def classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_f (isinstance(v, float) and v.is_integer()) for v in wealth_values): use_direct_mapping = True print("Detected pre-binned integer-like wealth data. Using direct mapping approach.") - + # Process wealth quantiles if use_direct_mapping: # Direct mapping approach for pre-binned data min_val = min(wealth_values) max_val = max(wealth_values) range_val = max_val - min_val - + # Create mapping dictionary from original values to quantiles wealth_mapping = {} for val in wealth_values: @@ -478,43 +483,43 @@ def classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_f # Map to quantile (0 to num_quantiles-1) using floor to ensure full range quantile = min(int(rel_pos * num_quantiles), num_quantiles - 1) wealth_mapping[val] = quantile - + print(f"Wealth value mapping: {wealth_mapping}") - + # Apply mapping to create wealth quantiles result_gdf['wealth_quantile'] = result_gdf[wealth_field_for_classification].map(wealth_mapping) else: # Standard quantile approach for continuous data try: result_gdf['wealth_quantile'] = pd.qcut( - result_gdf[wealth_field_for_classification], - q=num_quantiles, - labels=False, + result_gdf[wealth_field_for_classification], + q=num_quantiles, + labels=False, duplicates='drop' ) - + # Check if we got the full range of quantiles if result_gdf['wealth_quantile'].max() < (num_quantiles - 1): print(f"Warning: Wealth quantiles only range from 0 to {result_gdf['wealth_quantile'].max()}") print("Using manual quantile calculation...") - + # Try manual quantile calculation as fallback wealth_values = result_gdf[wealth_field_for_classification].values percentiles = np.linspace(0, 100, num_quantiles+1)[1:-1] breaks = np.percentile(wealth_values, percentiles) result_gdf['wealth_quantile'] = np.digitize(wealth_values, breaks) - + # Ensure proper range from 0 to num_quantiles-1 result_gdf['wealth_quantile'] = result_gdf['wealth_quantile'].clip(0, num_quantiles-1) except Exception as e: print(f"Error in wealth quantile calculation: {str(e)}") print("Falling back to linear mapping...") - + # Linear mapping from min to max as emergency fallback vals = result_gdf[wealth_field_for_classification].values min_val, max_val = vals.min(), vals.max() range_val = max_val - min_val - + if range_val > 0: # Create normalized values and map to quantiles norm_vals = (vals - min_val) / range_val @@ -523,39 +528,39 @@ def classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_f else: # If all values are the same, assign to middle quantile result_gdf['wealth_quantile'] = (num_quantiles - 1) // 2 - + # Process hazard quantiles try: # Calculate hazard quantiles result_gdf['hazard_quantile'] = pd.qcut( - result_gdf[hazard_field], - q=num_quantiles, - labels=False, + result_gdf[hazard_field], + q=num_quantiles, + labels=False, duplicates='drop' ) - + # Check if hazard quantiles are limited if result_gdf['hazard_quantile'].max() < (num_quantiles - 1): print(f"Warning: Hazard quantiles only range from 0 to {result_gdf['hazard_quantile'].max()}") print("Using manual hazard quantile calculation...") - + # Try manual quantile calculation as fallback hazard_values = result_gdf[hazard_field].values percentiles = np.linspace(0, 100, num_quantiles+1)[1:-1] breaks = np.percentile(hazard_values, percentiles) result_gdf['hazard_quantile'] = np.digitize(hazard_values, breaks) - + # Ensure proper range from 0 to num_quantiles-1 result_gdf['hazard_quantile'] = result_gdf['hazard_quantile'].clip(0, num_quantiles-1) except Exception as e: print(f"Error in hazard quantile calculation: {str(e)}") print("Falling back to linear hazard mapping...") - + # Linear mapping from min to max as emergency fallback vals = result_gdf[hazard_field].values min_val, max_val = vals.min(), vals.max() range_val = max_val - min_val - + if range_val > 0: # Create normalized values and map to quantiles norm_vals = (vals - min_val) / range_val @@ -564,49 +569,50 @@ def classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_f else: # If all values are the same, assign to middle quantile result_gdf['hazard_quantile'] = (num_quantiles - 1) // 2 - + # Ensure quantile columns are integers and have no nulls result_gdf['wealth_quantile'] = result_gdf['wealth_quantile'].fillna(0).astype(int) result_gdf['hazard_quantile'] = result_gdf['hazard_quantile'].fillna(0).astype(int) - + # Verify quantile ranges and print diagnostics wealth_range = (result_gdf['wealth_quantile'].min(), result_gdf['wealth_quantile'].max()) hazard_range = (result_gdf['hazard_quantile'].min(), result_gdf['hazard_quantile'].max()) - + print(f"Wealth quantile range: {wealth_range}") print(f"Hazard quantile range: {hazard_range}") - + # Confirm we have the full range of quantiles for the wealth dimension if wealth_range[1] < (num_quantiles - 1): print(f"Warning: Still missing some wealth quantiles. Expected max {num_quantiles-1}, got {wealth_range[1]}") - + # Create combined classification result_gdf['bivariate_class'] = result_gdf['wealth_quantile'] * num_quantiles + result_gdf['hazard_quantile'] - + # Calculate and print the bivariate class range bivariate_range = (result_gdf['bivariate_class'].min(), result_gdf['bivariate_class'].max()) print(f"Bivariate class range: {bivariate_range}") expected_max = (num_quantiles - 1) * num_quantiles + (num_quantiles - 1) if bivariate_range[1] < expected_max: print(f"Warning: Bivariate class does not reach expected maximum of {expected_max}") - + return result_gdf + # Function to generate bivariate color scheme with maximum saturation def create_bivariate_colormap(palette_key, num_quantiles): """ Create a bivariate colormap using the Stevens palette approach. - + This implementation uses predefined Stevens palettes and handles interpolation for larger grid sizes. - + Parameters: ----------- palette_key : str Key identifying which Stevens palette to use (e.g., 'blue_red') num_quantiles : int Number of quantiles for each variable (3, 4, or 5) - + Returns: -------- bivariate_colors : numpy.ndarray @@ -614,12 +620,12 @@ def create_bivariate_colormap(palette_key, num_quantiles): colors_list : list Flattened list of colors """ - + # Function to convert hex to RGB float values (0-1) def hex_to_rgb(hex_color): hex_color = hex_color.lstrip('#') return [int(hex_color[i:i+2], 16)/255 for i in (0, 2, 4)] + [1.0] # R,G,B + Alpha - + # Define Stevens 3×3 palettes stevens_palettes = { 'blue_red': { # Blue-Red palette @@ -678,39 +684,39 @@ def hex_to_rgb(hex_color): (2, 2): '#804d36', # High hazard, High poverty } } - + # Create a 4×4 palette based on the 3×3 palette (interpolation) def create_4x4_palette(palette_3x3): palette_4x4 = {} - + # Define corner points from 3×3 (these stay the same) palette_4x4[(0, 0)] = palette_3x3[(0, 0)] # Low-Low palette_4x4[(0, 3)] = palette_3x3[(0, 2)] # Low-High palette_4x4[(3, 0)] = palette_3x3[(2, 0)] # High-Low palette_4x4[(3, 3)] = palette_3x3[(2, 2)] # High-High - + # Convert all colors to RGB floats for interpolation colors_rgb = {pos: hex_to_rgb(color) for pos, color in palette_3x3.items()} - + # Generate the 4×4 palette through bilinear interpolation for i in range(4): for j in range(4): # Skip already defined corners if (i, j) in palette_4x4: continue - + # Map 4×4 position to 3×3 position for interpolation x = i / 3 * 2 # Map 0-3 to 0-2 y = j / 3 * 2 # Map 0-3 to 0-2 - + # Find the four surrounding points in the 3×3 grid x0, y0 = int(x), int(y) x1, y1 = min(x0 + 1, 2), min(y0 + 1, 2) - + # Calculate interpolation weights wx = x - x0 wy = y - y0 - + # Bilinear interpolation of RGB values color = [0, 0, 0, 1.0] for c in range(3): # RGB channels @@ -720,45 +726,45 @@ def create_4x4_palette(palette_3x3): (1-wx)*wy * colors_rgb[(x0, y1)][c] + wx*wy * colors_rgb[(x1, y1)][c] ) - + # Convert to hex and add to palette hex_color = mcolors.rgb2hex(color[:3]) palette_4x4[(i, j)] = hex_color - + return palette_4x4 - + # Create a 5×5 palette (similar approach to 4×4) def create_5x5_palette(palette_3x3): palette_5x5 = {} - + # Define corner points palette_5x5[(0, 0)] = palette_3x3[(0, 0)] # Low-Low palette_5x5[(0, 4)] = palette_3x3[(0, 2)] # Low-High palette_5x5[(4, 0)] = palette_3x3[(2, 0)] # High-Low palette_5x5[(4, 4)] = palette_3x3[(2, 2)] # High-High - + # Convert all colors to RGB floats for interpolation colors_rgb = {pos: hex_to_rgb(color) for pos, color in palette_3x3.items()} - + # Generate the 5×5 palette through bilinear interpolation for i in range(5): for j in range(5): # Skip already defined corners if (i, j) in palette_5x5: continue - + # Map 5×5 position to 3×3 position for interpolation x = i / 4 * 2 # Map 0-4 to 0-2 y = j / 4 * 2 # Map 0-4 to 0-2 - + # Find the four surrounding points in the 3×3 grid x0, y0 = int(x), int(y) x1, y1 = min(x0 + 1, 2), min(y0 + 1, 2) - + # Calculate interpolation weights wx = x - x0 wy = y - y0 - + # Bilinear interpolation of RGB values color = [0, 0, 0, 1.0] for c in range(3): # RGB channels @@ -768,20 +774,20 @@ def create_5x5_palette(palette_3x3): (1-wx)*wy * colors_rgb[(x0, y1)][c] + wx*wy * colors_rgb[(x1, y1)][c] ) - + # Convert to hex and add to palette hex_color = mcolors.rgb2hex(color[:3]) palette_5x5[(i, j)] = hex_color - + return palette_5x5 - + # Fallback to blue_red if an invalid palette key is provided if palette_key not in stevens_palettes: palette_key = 'blue_red' - + # Get the appropriate base palette base_palette = stevens_palettes[palette_key] - + # Select or generate the palette for the requested number of quantiles match num_quantiles: case 3: @@ -794,7 +800,7 @@ def create_5x5_palette(palette_3x3): # Fall back to 3×3 for any other value palette = base_palette num_quantiles = 3 - + # Create the color matrix from the palette using list comprehension bivariate_colors = np.array([ [hex_to_rgb(palette[(i, j)]) for j in range(num_quantiles)] @@ -803,14 +809,15 @@ def create_5x5_palette(palette_3x3): # Create a flattened list of colors colors_list = [bivariate_colors[i, j, :] for i in range(num_quantiles) for j in range(num_quantiles)] - + return bivariate_colors, colors_list + # Also update the legend creation to reflect the new color mixing logic def create_bivariate_legend(bivariate_colors, poverty_label, hazard_label, num_quantiles, palette_name, max_exposure): """ Create a legend for the bivariate map with proper labels based on palette type. - + Parameters: ----------- bivariate_colors : numpy.ndarray @@ -825,7 +832,7 @@ def create_bivariate_legend(bivariate_colors, poverty_label, hazard_label, num_q Name of the palette (e.g., 'blue_red') max_exposure : float Maximum exposure value for the scale - + Returns: -------- fig : matplotlib.figure.Figure @@ -835,28 +842,28 @@ def create_bivariate_legend(bivariate_colors, poverty_label, hazard_label, num_q palette_parts = palette_name.split('_') hazard_color = palette_parts[0].capitalize() if len(palette_parts) > 0 else "Blue" poverty_color = palette_parts[1].capitalize() if len(palette_parts) > 1 else "Red" - + fig, ax = plt.subplots(figsize=(4, 4)) - + # Remove axes ax.set_axis_off() - + # Create the legend grid - with fixed orientation for i in range(num_quantiles): # i capturing hazard (rows) for j in range(num_quantiles): # j capturing poverty (columns) # Add colored square ax.add_patch( plt.Rectangle( - (j, i), 1, 1, + (j, i), 1, 1, facecolor=bivariate_colors[i, j, :], edgecolor='k', linewidth=0.5 ) ) - + # Set labels at the middle of each axis with color information ax.text(num_quantiles/2, -0.7, f"{poverty_label} ({poverty_color})", ha='center', va='center', fontsize=12) ax.text(-1.2, num_quantiles/2, f"{hazard_label} ({hazard_color})", ha='center', va='center', rotation=90, fontsize=12) - + # Calculate percentage labels for fixed scale exposure exposure_percentages = [f"{int(x*100)}%" for x in np.linspace(0, max_exposure, num_quantiles+1)] @@ -864,22 +871,23 @@ def create_bivariate_legend(bivariate_colors, poverty_label, hazard_label, num_q # Position further to the left to avoid overlap for i in range(num_quantiles+1): ax.text(-0.1, i, exposure_percentages[i], ha='right', va='center', fontsize=10) - + # For poverty (x-axis): Low at left, High at right ax.text(0, -0.3, "Low", ha='center', va='top', fontsize=10) # Poverty low (left) ax.text(num_quantiles, -0.3, "High", ha='center', va='top', fontsize=10) # Poverty high (right) - + # Add title with palette name plt.title(f"Bivariate Legend: {hazard_color}-{poverty_color}", fontsize=12) - + # Set axis limits with more space for labels ax.set_xlim(-1.5, num_quantiles + 0.2) ax.set_ylim(-1.2, num_quantiles + 0.2) - + plt.tight_layout() - + return fig + # Function to create a summary table figure def create_summary_table(gdf, pop_field, wealth_field, hazard_field, bivariate_palette, num_quantiles): """Create a summary table with key statistics""" @@ -898,34 +906,34 @@ def create_summary_table(gdf, pop_field, wealth_field, hazard_field, bivariate_p 'Quantile Grid': f"{num_quantiles}×{num_quantiles}", 'Bivariate Palette': f"{bivariate_palette}" } - + # Additional stats for smaller datasets if len(gdf) <= 50: # Add quantile information for better interpretation wealth_quantiles = [f"{q:.2f}" for q in np.percentile(gdf[wealth_field], np.linspace(0, 100, num_quantiles+1))] hazard_quantiles = [f"{q:.2f}" for q in np.percentile(gdf[hazard_field], np.linspace(0, 100, num_quantiles+1))] - + stats['Poverty Quantile Breaks'] = ", ".join(wealth_quantiles) stats['Exposure Quantile Breaks'] = ", ".join(hazard_quantiles) - + # Create figure fig, ax = plt.subplots(figsize=(6, 4)) ax.axis('tight') ax.axis('off') - + # Create table with statistics table_data = [[k, v] for k, v in stats.items()] - table = ax.table(cellText=table_data, + table = ax.table(cellText=table_data, colLabels=['Statistic', 'Value'], loc='center', cellLoc='left', colWidths=[0.4, 0.6]) - + # Style the table table.auto_set_font_size(False) table.set_fontsize(9) table.scale(1, 1.5) # Adjust row height - + # Style header for (i, j), cell in table.get_celld().items(): if i == 0: # Header row @@ -933,32 +941,33 @@ def create_summary_table(gdf, pop_field, wealth_field, hazard_field, bivariate_p cell.set_facecolor('#E6E6E6') elif i % 2 == 0: # Alternating row colors cell.set_facecolor('#F5F5F5') - + # Add title plt.title('Bivariate Analysis Summary', fontsize=12, pad=10) plt.tight_layout() - + return fig + # Function to create a folium bivariate choropleth map without title overlay def create_bivariate_map(gdf, colors_list, id_field, name_field, num_quantiles): # Convert colors to hex hex_colors = [mcolors.to_hex(c) for c in colors_list] - + # Create a style function with fixed class calculation def style_function(feature): feature_id = feature['properties'][id_field] feature_data = gdf[gdf[id_field] == feature_id] - + if not feature_data.empty: # Get wealth and hazard quantiles wealth_q = feature_data['wealth_quantile'].values[0] hazard_q = feature_data['hazard_quantile'].values[0] - + # Calculate bivariate class with fixed orientation # hazard is the row (i), wealth is the column (j) bivariate_class = int(hazard_q * num_quantiles + wealth_q) - + color = hex_colors[bivariate_class] return { 'fillColor': color, @@ -973,7 +982,7 @@ def style_function(feature): 'weight': 1, 'fillOpacity': 0.5 } - + # Create hover function def highlight_function(feature): return { @@ -981,7 +990,7 @@ def highlight_function(feature): 'color': '#FFFFFF', 'fillOpacity': 0.8 } - + # Create popup function with added area and density information html = """
@@ -1001,7 +1010,7 @@ def highlight_function(feature): def popup_function(feature): feature_id = feature['properties'][id_field] feature_data = gdf[gdf[id_field] == feature_id] - + if not feature_data.empty: row = feature_data.iloc[0] try: @@ -1025,10 +1034,10 @@ def popup_function(feature): return folium.Popup(f"Error displaying data for {row[name_field]}", max_width=350) else: return folium.Popup("No data available", max_width=350) - + # Create the map m = folium.Map() - + # Add GeoJSON layer folium.GeoJson( gdf.__geo_interface__, @@ -1038,22 +1047,23 @@ def popup_function(feature): tooltip=folium.GeoJsonTooltip(fields=[name_field], aliases=['Name'], sticky=True), popup=popup_function ).add_to(m) - + # Add the MiniMap MiniMap(toggle_display=True, position='bottomright').add_to(m) - + # Add the Fullscreen button Fullscreen(position='bottomleft').add_to(m) - + # Fit to data bounds m.fit_bounds(m.get_bounds()) - + return m + def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): """ Create a QGIS-compatible style (QML format) for the bivariate map. - + Parameters: ----------- gdf : GeoDataFrame @@ -1064,7 +1074,7 @@ def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): Number of quantiles used palette_name : str Name of the palette used - + Returns: -------- qgis_style : str @@ -1072,14 +1082,14 @@ def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): """ hex_colors = [mcolors.to_hex(c[:3]) for c in colors_list] - + # Start building the QML style qml = """ """ - + # Create a category for each bivariate class for j in range(num_quantiles): # j capturing poverty/wealth (columns) for i in range(num_quantiles): # i capturing hazard (rows) @@ -1087,21 +1097,21 @@ def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): # In the classify_data function: # result_gdf['bivariate_class'] = result_gdf['wealth_quantile'] * num_quantiles + result_gdf['hazard_quantile'] bivariate_class = j * num_quantiles + i - + # Get the color for this class from colors_list # The colors_list is constructed by flattening the bivariate_colors array # which is filled by iterating over i (hazard, rows) first, then j (poverty, columns) # So we need to convert our i,j to the correct index in colors_list colors_list_index = i * num_quantiles + j color = hex_colors[colors_list_index] - + # Strip the # from the color color_code = color.lstrip('#') - + # Create a category for this class with the correct descriptions qml += f""" """ - + # Continue with symbols qml += """ @@ -1112,19 +1122,19 @@ def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): for i in range(num_quantiles): # i capturing hazard (rows) # Calculate bivariate class CORRECTLY bivariate_class = j * num_quantiles + i - + # Get the color from the colors_list (using the correct index) colors_list_index = i * num_quantiles + j color = hex_colors[colors_list_index] - + # Strip the # from the color color_code = color.lstrip('#') - + # Convert hex to RGB values (QGIS format) r = int(color_code[0:2], 16) g = int(color_code[2:4], 16) b = int(color_code[4:6], 16) - + # Create a symbol for this class qml += f""" @@ -1162,7 +1172,7 @@ def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): """ - + # Complete the QML qml += """ @@ -1208,13 +1218,14 @@ def create_qgis_style(gdf, colors_list, num_quantiles, palette_name): 0 """ - + return qml + def save_geopackage_with_qgis_style(gdf, output_path, colors_list, id_field, name_field, num_quantiles, palette_name): """ Save GeoDataFrame to GeoPackage with embedded QGIS styling information. - + Parameters: ----------- gdf : GeoDataFrame @@ -1232,20 +1243,19 @@ def save_geopackage_with_qgis_style(gdf, output_path, colors_list, id_field, nam palette_name : str Name of the palette used """ - import os import sqlite3 - + # First save the geodataframe to GeoPackage gdf.to_file(output_path, driver="GPKG", layer="bivariate_map") - + # Create the QGIS style qgis_style = create_qgis_style(gdf, colors_list, num_quantiles, palette_name) - + try: # Connect to the GeoPackage database (it's a SQLite database) conn = sqlite3.connect(output_path) cursor = conn.cursor() - + # Check if layer_styles table exists cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='layer_styles'") if not cursor.fetchone(): @@ -1267,7 +1277,7 @@ def save_geopackage_with_qgis_style(gdf, output_path, colors_list, id_field, nam update_time DATETIME DEFAULT CURRENT_TIMESTAMP ) """) - + # Insert the style into the layer_styles table cursor.execute(""" INSERT INTO layer_styles @@ -1282,23 +1292,24 @@ def save_geopackage_with_qgis_style(gdf, output_path, colors_list, id_field, nam f'Bivariate choropleth map using {palette_name.replace("_", "-")} palette ({num_quantiles}x{num_quantiles})', 'CCDR_RDL_Tool' )) - + # Commit changes conn.commit() - + print(f"Successfully embedded QGIS style information in GeoPackage at {output_path}") - + except Exception as e: print(f"Error adding styling information to GeoPackage: {str(e)}") finally: if conn: conn.close() + def export_static_map(gdf, colors_list, output_path, num_quantiles, bivariate_palette, - max_exposure, dpi=300, figsize=(10, 10), title=None): + max_exposure, dpi=300, figsize=(10, 10), title=None): """ Export a high-quality static map of the bivariate choropleth using matplotlib. - + Parameters: ----------- gdf : GeoDataFrame @@ -1320,67 +1331,67 @@ def export_static_map(gdf, colors_list, output_path, num_quantiles, bivariate_pa """ # Close any existing plots to avoid interference plt.close('all') - + # Create a new figure for the map _, ax = plt.subplots(figsize=figsize) - + # Create a mapping from bivariate_class to color color_map = {} for j in range(num_quantiles): # j capturing poverty (columns) for i in range(num_quantiles): # i capturing hazard (rows) # Calculate class as used in the GeoDataFrame bivariate_class = j * num_quantiles + i - + # Get color from colors_list (with different indexing) color_index = i * num_quantiles + j color_map[bivariate_class] = colors_list[color_index][:3] # Use only RGB - + # Plot each polygon with its appropriate color from the bivariate classification for idx, row in gdf.iterrows(): # Get the bivariate class for this feature biv_class = row['bivariate_class'] - + # Get the corresponding color if biv_class in color_map: color = color_map[biv_class] else: # Fallback if class is outside expected range color = [0.8, 0.8, 0.8] # Light gray - + # Handle both Polygon and MultiPolygon geometries geom = row.geometry - + if geom.geom_type == 'Polygon': # Plot a single polygon xs, ys = geom.exterior.xy ax.fill(xs, ys, color=color, edgecolor='black', linewidth=0.5) - + # Also plot any interior rings (holes) for interior in geom.interiors: xs, ys = interior.xy ax.fill(xs, ys, color='white', edgecolor='black', linewidth=0.5) - + elif geom.geom_type == 'MultiPolygon': # Plot each polygon in the multipolygon for part in geom.geoms: # Plot the exterior xs, ys = part.exterior.xy ax.fill(xs, ys, color=color, edgecolor='black', linewidth=0.5) - + # Plot any interior rings (holes) for interior in part.interiors: xs, ys = interior.xy ax.fill(xs, ys, color='white', edgecolor='black', linewidth=0.5) - + # Remove axis ticks and labels for a clean map ax.set_xticks([]) ax.set_yticks([]) ax.set_xticklabels([]) ax.set_yticklabels([]) - + # Set aspect ratio to equal to avoid distortion ax.set_aspect('equal') - + # Add a title if provided if title: plt.title(title, fontsize=14, pad=20) @@ -1388,35 +1399,35 @@ def export_static_map(gdf, colors_list, output_path, num_quantiles, bivariate_pa # Generate a default title based on palette and grid size default_title = f"Bivariate Map: {bivariate_palette.replace('_', '-')} ({num_quantiles}×{num_quantiles})" plt.title(default_title, fontsize=14, pad=20) - + # Set the map extent to match the data bounds with a small buffer bounds = gdf.total_bounds buffer = (bounds[2] - bounds[0]) * 0.05 # 5% buffer ax.set_xlim(bounds[0] - buffer, bounds[2] + buffer) ax.set_ylim(bounds[1] - buffer, bounds[3] + buffer) - + # Save the map plt.tight_layout() plt.savefig(output_path, dpi=dpi, bbox_inches='tight') print(f"Exported map to {output_path}") - + # Create a separate figure for the legend plt.close('all') # Close the map figure _, legend_ax = plt.subplots(figsize=(4, 4)) legend_ax.set_axis_off() - + # Parse the palette name to get descriptive colors for labels palette_parts = bivariate_palette.split('_') hazard_color = palette_parts[0].capitalize() if len(palette_parts) > 0 else "Blue" poverty_color = palette_parts[1].capitalize() if len(palette_parts) > 1 else "Red" - + # Create a grid of colored squares for the legend for i in range(num_quantiles): # i capturing hazard (rows) for j in range(num_quantiles): # j capturing poverty (columns) # Get color from colors_list color_index = i * num_quantiles + j color = colors_list[color_index][:3] # Use only RGB - + # Add a colored rectangle legend_ax.add_patch( plt.Rectangle( @@ -1426,49 +1437,50 @@ def export_static_map(gdf, colors_list, output_path, num_quantiles, bivariate_pa linewidth=0.5 ) ) - + # Add labels for the axes with proper positioning legend_ax.text(num_quantiles/2, -0.7, f"Poverty ({poverty_color})", ha='center', va='center', fontsize=12) legend_ax.text(-1.2, num_quantiles/2, f"Exposure ({hazard_color})", ha='center', va='center', rotation=90, fontsize=12) - + # Calculate percentage labels for fixed scale exposure exposure_percentages = [f"{int(x*100)}%" for x in np.linspace(0, max_exposure, num_quantiles+1)] - + # For exposure (y-axis): Add percentage labels at each level for i in range(num_quantiles+1): legend_ax.text(-0.1, i, exposure_percentages[i], ha='right', va='center', fontsize=10) - + # Add "Low" and "High" labels only for the poverty axis legend_ax.text(0, -0.3, "Low", ha='center', va='top', fontsize=10) # Poverty low (left) legend_ax.text(num_quantiles, -0.3, "High", ha='center', va='top', fontsize=10) # Poverty high (right) - + # Set limits for legend with more space for labels legend_ax.set_xlim(-1.5, num_quantiles + 0.2) legend_ax.set_ylim(-1.2, num_quantiles + 0.2) - + # Add a title to the legend legend_ax.set_title(f"Bivariate Legend: {hazard_color}-{poverty_color}", fontsize=12, pad=20) - + # Save the legend legend_path = os.path.splitext(output_path)[0] + "_legend.png" plt.tight_layout() plt.savefig(legend_path, dpi=dpi, bbox_inches='tight') print(f"Exported legend to {legend_path}") - + # Close all figures plt.close('all') - + return output_path, legend_path + # Function to run the analysis def run_analysis(b): run_button.disabled = True run_button.description = "Creating Map..." - + try: with output: output.clear_output(wait=True) - + if not validate_input( id_field_selector, name_field_selector, population_field_selector, wealth_field_selector, hazard_field_selector @@ -1482,10 +1494,10 @@ def run_analysis(b): run_button.disabled = False run_button.description = "Create Bivariate Map" return - + # Get layer name (if applicable) layer_name = layer_selector.value if not layer_selector.disabled else None - + # Check required fields id_field = id_field_selector.value name_field = name_field_selector.value @@ -1494,26 +1506,26 @@ def run_analysis(b): hazard_field = hazard_field_selector.value normalize_rwi = normalize_rwi_chk.value print(f"RWI normalization: {'Enabled' if normalize_rwi else 'Disabled'}") - + if not (id_field and name_field and pop_field and wealth_field and hazard_field): print("Error: Please select all required fields") run_button.disabled = False run_button.description = "Create Bivariate Map" return - + # Get other parameters num_quantiles = quantiles_selector.value bivariate_palette = bivariate_palette_selector.value - + print(f"Loading data from: {file_path}") if layer_name: print(f"Layer: {layer_name}") gdf = gpd.read_file(file_path, layer=layer_name) else: gdf = gpd.read_file(file_path) - + print(f"Loaded {len(gdf)} features") - + # Check for numerical data in required fields try: gdf[pop_field] = pd.to_numeric(gdf[pop_field]) @@ -1525,24 +1537,24 @@ def run_analysis(b): run_button.disabled = False run_button.description = "Create Bivariate Map" return - + # Check for negative or zero values if (gdf[pop_field] <= 0).any(): print("Warning: Some population values are zero or negative. These may cause issues in calculations.") - + if (gdf[wealth_field] <= 0).all(): print("Warning: All wealth values are zero or negative. This may cause issues in wealth calculations.") - + if (gdf[hazard_field] <= 0).all(): print("Warning: All hazard values are zero or negative. This may cause issues in hazard calculations.") - + # Ensure geometry is valid for area calculation print("Validating geometries...") invalid_geoms = ~gdf.geometry.is_valid if invalid_geoms.any(): print(f"Warning: Found {invalid_geoms.sum()} invalid geometries. Attempting to fix them...") gdf.geometry = gdf.geometry.buffer(0) # Standard fix for many invalid geometries - + # Get normalization preference normalize_rwi = normalize_rwi_chk.value print(f"RWI normalization: {'Enabled' if normalize_rwi else 'Disabled'}") @@ -1556,23 +1568,23 @@ def run_analysis(b): print("Using raw wealth index values (no normalization)...") # Create a copy to avoid modifying the original, just as in calculate_weighted_rwi result_gdf = gdf.copy() - + # Check CRS and reproject to a projected CRS if needed if result_gdf.crs is None: print("Warning: CRS is not defined. Assuming EPSG:4326 for area calculation.") result_gdf.set_crs(epsg=4326, inplace=True) - + # If using a geographic CRS (like WGS84/EPSG:4326), reproject to a suitable projected CRS if result_gdf.crs.is_geographic: print("Converting to projected CRS for accurate area calculation...") # Get centroid to determine appropriate UTM zone centroid = result_gdf.unary_union.centroid lon, lat = centroid.x, centroid.y - + # Calculate UTM zone utm_zone = int(1 + (lon + 180) // 6) epsg = 32600 + utm_zone + (0 if lat >= 0 else 100) # North vs South hemisphere - + # Reproject for area calculation area_gdf = result_gdf.to_crs(epsg=epsg) # Calculate area in square kilometers @@ -1580,29 +1592,29 @@ def run_analysis(b): else: # Already in projected CRS result_gdf['area_km2'] = result_gdf.geometry.area / 10**6 # Convert from m² to km² - + # Calculate population density (people per km²) result_gdf['pop_density'] = result_gdf[pop_field] / result_gdf['area_km2'] - + # Create a copy of the wealth field for the scaled version even when not normalizing # This ensures consistent field names in subsequent code result_gdf['w_RWIxPOP'] = result_gdf[wealth_field] - + # Scale to 0-100 for easier interpretation min_val = result_gdf[wealth_field].min() max_val = result_gdf[wealth_field].max() result_gdf['w_RWIxPOP_scaled'] = 100 * (result_gdf[wealth_field] - min_val) / (max_val - min_val) - + # Assign back to gdf to maintain consistency with the rest of the function gdf = result_gdf - + # Use the scaled version of the original wealth field for classification wealth_field_for_classification = 'w_RWIxPOP_scaled' # Now use the classify_data function to create the quantiles print(f"Classifying data with {num_quantiles}×{num_quantiles} grid...") gdf = classify_data(gdf, wealth_field, hazard_field, num_quantiles, wealth_field_for_classification) - + # Print summary statistics print("\nSummary Statistics:") print(f"Total Population: {gdf[pop_field].sum():,.0f}") @@ -1611,7 +1623,7 @@ def run_analysis(b): print(f"RWI Range: {gdf[wealth_field].min():.4f} to {gdf[wealth_field].max():.4f}") print(f"Weighted RWI Range: {gdf['w_RWIxPOP_scaled'].min():.2f} to {gdf['w_RWIxPOP_scaled'].max():.2f}") print(f"Exposure to Hazard Range: {gdf[hazard_field].min():.4f} to {gdf[hazard_field].max():.4f}\n") - + # Calculate relative exposure (per capita) print("Calculating relative Exposure to Hazard") gdf['relative_exposure'] = gdf[hazard_field] / gdf[pop_field] @@ -1673,56 +1685,56 @@ def run_analysis(b): # Create bivariate map print("Creating bivariate map...") bivariate_map = create_bivariate_map(gdf, colors_list, id_field, name_field, num_quantiles) - + # Create summary table print("Creating summary statistics table...") summary_table = create_summary_table(gdf, pop_field, wealth_field, hazard_field, bivariate_palette, num_quantiles) # Update the map widget with the new map map_widget.value = bivariate_map._repr_html_() - + # Display the legend and summary table side by side with chart_output: clear_output(wait=True) # Create two columns layout - from ipywidgets import HBox, VBox, Output + from ipywidgets import HBox, Output left_output = Output() right_output = Output() - + with left_output: display(legend_fig) with right_output: display(summary_table) - + display(HBox([left_output, right_output])) - + # Export outputs if requested if export_maps_chk.value: print("Exporting maps...") # Create output directory output_dir = os.path.join(common.OUTPUT_DIR, "bivariate_maps") os.makedirs(output_dir, exist_ok=True) - + # Save summary table summary_path = os.path.join(output_dir, "bivariate_summary.png") summary_table.savefig(summary_path, dpi=300, bbox_inches='tight') print(f"Saved summary table to {summary_path}") - + # Save interactive map as HTML html_map_path = os.path.join(output_dir, "bivariate_map.html") bivariate_map.save(html_map_path) print(f"Saved interactive map to {html_map_path}") - + # Export high-quality static map using matplotlib print("Generating high-quality static map with matplotlib...") static_map_path = os.path.join(output_dir, f"bivariate_map_{bivariate_palette}_{num_quantiles}x{num_quantiles}.png") try: map_title = f"Bivariate Risk-Poverty Map ({bivariate_palette.replace('_', '-')} palette)" map_path, legend_path = export_static_map( - gdf, - colors_list, - static_map_path, - num_quantiles, + gdf, + colors_list, + static_map_path, + num_quantiles, bivariate_palette, max_exposure_slider.value, dpi=300, @@ -1743,18 +1755,18 @@ def run_analysis(b): # Create output directory output_dir = os.path.join(common.OUTPUT_DIR, "bivariate_maps") os.makedirs(output_dir, exist_ok=True) - + # Create output file name with palette information output_file = os.path.join(output_dir, f"bivariate_map_{bivariate_palette}_{num_quantiles}x{num_quantiles}.gpkg") - + # Save to GeoPackage with embedded QGIS style information save_geopackage_with_qgis_style( - gdf, - output_file, - colors_list, - id_field, - name_field, - num_quantiles, + gdf, + output_file, + colors_list, + id_field, + name_field, + num_quantiles, bivariate_palette ) print(f"Saved data with embedded QGIS styling to {output_file}") @@ -1768,6 +1780,7 @@ def run_analysis(b): run_button.disabled = False run_button.description = "Create Bivariate Map" + # Function to create header widget def create_header_widget(): header_html = """ @@ -1789,9 +1802,10 @@ def create_header_widget():
""" - + return widgets.HTML(value=header_html, layout=widgets.Layout(width='99%')) + # Function to create footer def create_footer(): preview_container = widgets.HBox([ @@ -1799,12 +1813,13 @@ def create_footer(): export_maps_chk, export_data_chk ], layout=widgets.Layout(width='100%', justify_content='space-around')) - + return widgets.VBox([ preview_container, widgets.HBox([run_button], layout=widgets.Layout(display='flex', justify_content='center', width='100%')) ], layout=widgets.Layout(width='100%', height='100px', padding='10px')) + # Function to create sidebar def create_sidebar(info_box, tabs, output, footer): sidebar_content = widgets.VBox([ @@ -1818,18 +1833,19 @@ def create_sidebar(info_box, tabs, output, footer): footer ], layout=widgets.Layout(width='370px', height='100%')) + # Function to create UI components def get_ui_components(sidebar, header): map_and_chart = widgets.VBox( [map_widget, chart_output], layout=widgets.Layout(width='750px', height='100%') ) - + content_layout = widgets.HBox( [sidebar, map_and_chart], layout=widgets.Layout(width='100%', height='800px') ) - + final_layout = widgets.VBox( [header, content_layout], layout=widgets.Layout(width='100%') @@ -1837,6 +1853,7 @@ def get_ui_components(sidebar, header): return map_and_chart, content_layout, final_layout + # Function to create JavaScript code for tooltips def create_js_code(): return f""" @@ -1877,9 +1894,10 @@ def create_js_code(): """ + # Function to create tabs for the UI def create_tabs(): - + HTML_STYLE = "
" # Create data tab data_tab = widgets.VBox([ @@ -1894,7 +1912,7 @@ def create_tabs(): wealth_field_selector, hazard_field_selector, ], layout=widgets.Layout(padding='10px')) - + # Create bivariate mapping tab bivariate_tab = widgets.VBox([ widgets.Label("Bivariate Map Settings:"), @@ -1907,46 +1925,47 @@ def create_tabs(): max_exposure_slider, normalize_rwi_chk, ], layout=widgets.Layout(padding='10px')) - + # Create tabs tabs = widgets.Tab(layout={'width': '350px', 'height': '500px'}) tabs.children = [data_tab, bivariate_tab] tabs.set_title(0, 'Data') tabs.set_title(1, 'Bivariate Map') - + return tabs + # Function to initialize the tool def initialize_tool(): # Connect event handlers select_file_button.on_click(select_file) run_button.on_click(run_analysis) - + # Create header header = create_header_widget() - + # Create tabs tabs = create_tabs() - + # Create footer footer = create_footer() - + # Create sidebar sidebar = create_sidebar(info_box, tabs, output, footer) - + # Combine components to create final layout _, _, final_layout = get_ui_components(sidebar, header) - + # Display the layout display(final_layout) - + # Add JavaScript for tooltips display(HTML(create_js_code())) - + # Initialize empty map m = folium.Map(location=[0, 0], zoom_start=2) map_widget.value = m._repr_html_() - + # Print welcome message with output: print("Welcome to the Bivariate Risk-Poverty Mapping Tool") @@ -1957,4 +1976,4 @@ def initialize_tool(): print("\nResults will include:") print("- Interactive map with data exploration capabilities") print("- Color legend showing the bivariate classification") - print("- Summary statistics table for additional context") \ No newline at end of file + print("- Summary statistics table for additional context") diff --git a/tools/code/gui_f3_utils.py b/tools/code/gui_f3_utils.py index 896c89c..a232ba8 100644 --- a/tools/code/gui_f3_utils.py +++ b/tools/code/gui_f3_utils.py @@ -8,8 +8,6 @@ import numpy as np import pandas as pd import time -import tkinter as tk -from tkinter import filedialog import common from damageFunctions import FL_mortality_factor, FL_damage_factor_builtup, FL_damage_factor_agri