Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
CABE_DATA_DIR=data/Version_0_4_2
CABE_START_YEAR=startyear_2021
CABE_ASSUMPTIONSET=GHG_incl
CABE_EFFORT_SHARING_CONFIG=../effort-sharing/notebooks/input.yml

# Needed for SvelteKit application
CABE_API_URL=http://127.0.0.1:5000

10 changes: 9 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@ Should have the following data files:
1. `{CABE_DATA_DIR} / "ne_110m_admin_0_countries.geojson"` - can be downloaded with `npm run download:borders` and move downloaded file to CABE_DATA_DIR directory.
1. `{CABE_DATA_DIR} / "xr_policyscen.nc"`- Policy scenario data
1. `{CABE_DATA_DIR} / {CABE_START_YEAR} / "xr_dataread.nc"` - Global data
1. `{CABE_DATA_DIR} / {CABE_START_YEAR} / {CABE_ASSUMPTIONSET} / "Allocations" / "xr_alloc_{REGION}.nc"` - Region specific data
1. `{CABE_DATA_DIR} / {CABE_START_YEAR} / {CABE_ASSUMPTIONSET} / "Aggregated_files" / "xr_alloc_{YEAR}.nc"` - Aggregated data
1. `{CABE_EFFORT_SHARING_CONFIG}` - Effort sharing configuration file.

The `CABE_DATA_DIR` variable is the path to the data directory.
The `CABE_START_YEAR` variable is the start year of the allocation.
The `CABE_ASSUMPTIONSET` variable encodes assumptions on which gases are included (GHG or CO2_only) and land use (included/excluded).
The `REGION` variable is the 3 letter ISO code of the region.
The `YEAR` variable is the year of the allocation.
The `CABE_EFFORT_SHARING_CONFIG` variable is the path to the [effort sharing configuration file](https://github.com/imagepbl/effort-sharing/blob/main/notebooks/input.yml).

The `CABE_` variables are defined in the `.env` file.
See [.env.example](.env.example) file for an example.
Expand All @@ -36,6 +37,7 @@ Dependencies can be installed with
# From the root of the repository
# To install Node.js dependencies
npm install
# Activate Python environment from effort-sharing repo
# To install Python dependencies
pip install -r requirements.txt
```
Expand Down Expand Up @@ -146,6 +148,12 @@ The end-to-end test can be run with
npm run test
```

When developing effort sharing package at the same time, you can install effortsharing package in editable mode with

```bash
pip install -e ../effort-sharing
```

## Building

To create a production version of your app:
Expand Down
4 changes: 3 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,6 @@ gunicorn>=23.0.0; platform_system != "Windows"
sentry-sdk[flask]>=2.20.0
python-dotenv>=1.0.1
gevent>=24.11.1
supervisor>=4.2.5
supervisor>=4.2.5
# TODO on pr 96 merge update branch to main
git+https://github.com/imagepbl/effort-sharing.git@refactor-data-read#egg=effortsharing
14 changes: 9 additions & 5 deletions tests/region.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ import { expect, test } from '@playwright/test';
* Look at screenshots/ folder for screenshots of each region page.
* Use `npx playwright show-report` for test results.
*/
test.skip('Renders region pages', async ({ page }) => {
test.setTimeout(600_000); // 10 minutes
test('Renders region pages', async ({ page }) => {
test.setTimeout(3600_000); // 1 hour

// Fetch list of regions
await page.goto('/map?allocTime=2030');
Expand All @@ -39,16 +39,20 @@ test.skip('Renders region pages', async ({ page }) => {
async () => {
await page.getByText('Select country ▼').click();
await page.getByRole('link', { name: region, exact: true }).click();
await expect(page).toHaveURL(new RegExp(`/regions/${encodeURIComponent(region)}`));
await expect(page).toHaveURL(new RegExp(`/regions/${encodeURIComponent(region)}`), {
timeout: 20_000
});

const path = `screenshots/${region}.png`;
await page.screenshot({ path });

await page.getByRole('heading', { name: 'Global budget' });
await page.getByRole('link', { name: 'Back to map' }).click();
await expect(page).toHaveURL(new RegExp(`/map`));
await expect(page).toHaveURL(new RegExp(`/map`), {
timeout: 20_000
});
},
{ timeout: 2_000 }
{ timeout: 30_000 }
);
}
});
109 changes: 85 additions & 24 deletions ws.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"""

from dataclasses import dataclass
from functools import lru_cache
from json import loads
from pathlib import Path

Expand All @@ -19,6 +20,9 @@
import sentry_sdk
import xarray as xr
from dotenv import dotenv_values
from effortsharing.allocation import allocation
from effortsharing.datareading import datareading
from effortsharing.policyscens import policyscenadding
from flask import Flask, jsonify, request
from flask_cors import CORS

Expand All @@ -28,6 +32,7 @@ class Config:
data_dir: Path
start_year: str
assumption_set: str
effort_sharing_config: Path


def load_env() -> Config:
Expand All @@ -38,17 +43,21 @@ def load_env() -> Config:
raise ValueError("CABE_START_YEAR not set in .env file")
if "CABE_ASSUMPTIONSET" not in config or config["CABE_ASSUMPTIONSET"] is None:
raise ValueError("CABE_ASSUMPTIONSET not set in .env file")
if "CABE_EFFORT_SHARING_CONFIG" not in config or config["CABE_EFFORT_SHARING_CONFIG"] is None:
raise ValueError("CABE_EFFORT_SHARING_CONFIG not set in .env file")
return Config(
data_dir=Path(config["CABE_DATA_DIR"]),
start_year=config["CABE_START_YEAR"],
assumption_set=config["CABE_ASSUMPTIONSET"],
effort_sharing_config=Path(config["CABE_EFFORT_SHARING_CONFIG"]),
)


config = load_env()

sentry_sdk.init(
dsn="https://12eb01a8df644a3596e747a145f14033@app.glitchtip.com/10011",
# TODO re-enable for non-dev environments
# dsn="https://12eb01a8df644a3596e747a145f14033@app.glitchtip.com/10011",
traces_sample_rate=0.0,
profiles_sample_rate=0.0,
)
Expand All @@ -60,7 +69,32 @@ def load_env() -> Config:
# TODO write tests with dummy data

# Global data (xr_dataread.nc)
ds_global = xr.open_dataset(config.data_dir / config.start_year / "xr_dataread.nc")
def read_ds_global():
datareader = datareading(config.effort_sharing_config)
datareader.read_general()
datareader.read_ssps()
datareader.read_undata()
datareader.read_hdi()
datareader.read_historicalemis_jones()
datareader.read_ar6()
datareader.nonco2variation()
datareader.determine_global_nonco2_trajectories()
datareader.determine_global_budgets()
datareader.determine_global_co2_trajectories()
datareader.read_baseline()
datareader.read_ndc()
datareader.read_ndc_climateresource()
datareader.merge_xr()
datareader.add_country_groups()
xr_normal = datareader.xr_total.sel(
Temperature=np.array(datareader.settings["dimension_ranges"]["peak_temperature_saved"])
.astype(float)
.round(2)
)
print("Global data read")
return xr_normal

ds_global = read_ds_global()

# PCC convergence year is standard on 2050
DEFAULT_CONVERGENCE_YEAR = 2050
Expand Down Expand Up @@ -635,8 +669,18 @@ def allocation_map(year, allocation_method):
return {"data": rows, "domain": domain}


def read_policy_scenarios():
policyscenner = policyscenadding(
input_file=config.effort_sharing_config,
xr_total=ds_global,
)
policyscenner.read_engage_data()
policyscenner.filter_and_convert()
print("Policy scenarios dataset generated")
return policyscenner._to_xr(policyscenner.xr_eng, policyscenner.xr_total)

# Reference pathway data (xr_policyscen.nc)
ds_policyscen = xr.open_dataset(config.data_dir / "xr_policyscen.nc")
ds_policyscen = read_policy_scenarios()


@app.get("/timeseries/<region>/policies/<policy>")
Expand Down Expand Up @@ -753,17 +797,11 @@ def ndc_projections(region):
return {"ndc_inventory": ndc_range_inventory(region), "ndc_jones": ndc_range_jones(region)}


def get_ds(region):
if region not in available_region_files:
raise ValueError(f"Region {region} not found")
fn = available_region_files[region]
return xr.open_dataset(fn)
allocation_methods = {"PC", "PCC", "AP", "GDR", "ECPC", "GF"}


def emission_allocation_per_method(region, allocation_method):
selection = global_pathway_choices()
ds = get_ds(region)[allocation_method].sel(**selection).rename(Time="time")
# set time as the first dimension
def emission_allocation_per_method(allocator_ds, allocation_method):
ds = allocator_ds.rename(Time="time")
dim_order = ["time"] + [dim for dim in ds.dims if dim != "time"]
ds = ds.transpose(*dim_order)

Expand Down Expand Up @@ -794,23 +832,46 @@ def emission_allocation_per_method(region, allocation_method):
return pd.concat([mr_df, min_df, max_df], axis=1).reset_index().dropna().to_dict(orient="records")


allocation_methods = {"PC", "PCC", "AP", "GDR", "ECPC", "GF"}


@app.get("/timeseries/<region>/emissions/allocations")
def emission_allocations(region):
"""
http://127.0.0.1:5000/timeseries/USA/emissions/allocations?exceedanceRisk=0.67&negativeEmissions=0.4&temperature=1.8: 36.94ms
"""
allocator = create_allocator(region)
allocations = {}
selection = global_pathway_choices()
for allocation_method in allocation_methods:
allocation = emission_allocation_per_method(region, allocation_method)
if allocation is None:
allocation_data = emission_allocation_per_method(
allocator.xr_total[allocation_method].sel(**selection),
allocation_method,
)
if allocation_data is None:
continue
allocations[allocation_method] = allocation
allocations[allocation_method] = allocation_data
return allocations


@lru_cache(maxsize=1)
def create_allocator(region):
lulucf = "incl"
gas = "GHG"
input_file = config.effort_sharing_config
allocator = allocation(
region,
lulucf=lulucf,
gas=gas,
input_file=input_file,
)
allocator.gf()
allocator.pc()
allocator.pcc()
allocator.pcb()
allocator.dim_histstartyear = [DEFAULT_HISTORICAL_STARTYEAR]
allocator.dim_convyears = [DEFAULT_CONVERGENCE_YEAR]
allocator.dim_discountrates = [DEFAULT_DISCOUNT_FACTOR]
allocator.ecpc()
allocator.ap()
allocator.gdr()
return allocator


@app.get("/statistics/reductions/<region>")
def allocation_reduction(region):
periods = (2030, 2040)
Expand All @@ -820,8 +881,8 @@ def allocation_reduction(region):
)

hist = ds_global.GHG_hist.sel(Region=region, Time=1990).values + 0
ds = get_ds(region)

allocator = create_allocator(region)
reductions = {}
for allocation_method in allocation_methods:
pselection = selection.copy()
Expand All @@ -832,7 +893,7 @@ def allocation_reduction(region):
reductions[allocation_method] = {}
for period in periods:
pselection.update(Time=period)
es = ds[allocation_method].sel(**pselection).mean().values + 0
es = allocator.xr_total[allocation_method].sel(**pselection).mean().values + 0
if np.isnan(es) or np.isnan(hist) or hist == 0:
reductions[allocation_method][period] = None
else:
Expand All @@ -842,4 +903,4 @@ def allocation_reduction(region):


if __name__ == "__main__":
print(ndc_reductions("USA"))
print(create_allocator("African Group"))
Loading