Skip to content

Commit 0e7857f

Browse files
Add hdf5 IO for ImpactForecast (#1190)
* implement changes in Impact IO and ImpactForecast IO * Update tests * Fix small issue when reading H5 for ImpactForecast --------- Co-authored-by: Lukas Riedel <[email protected]>
1 parent bb99794 commit 0e7857f

File tree

3 files changed

+110
-4
lines changed

3 files changed

+110
-4
lines changed

climada/engine/impact.py

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1431,6 +1431,8 @@ def write_attribute(group, name, value):
14311431

14321432
def write_dataset(group, name, value):
14331433
"""Write a dataset"""
1434+
if name == "lead_time":
1435+
value = value.astype("timedelta64[ns]").astype("int64")
14341436
group.create_dataset(name, data=value, dtype=_str_type_helper(value))
14351437

14361438
def write_dict(group, name, value):
@@ -1618,7 +1620,9 @@ def read_excel(self, *args, **kwargs):
16181620
self.__dict__ = Impact.from_excel(*args, **kwargs).__dict__
16191621

16201622
@classmethod
1621-
def from_hdf5(cls, file_path: Union[str, Path]):
1623+
def from_hdf5(
1624+
cls, file_path: Union[str, Path], *, add_scalar_attrs=None, add_array_attrs=None
1625+
):
16221626
"""Create an impact object from an H5 file.
16231627
16241628
This assumes a specific layout of the file. If values are not found in the
@@ -1663,6 +1667,10 @@ def from_hdf5(cls, file_path: Union[str, Path]):
16631667
----------
16641668
file_path : str or Path
16651669
The file path of the file to read.
1670+
add_scalar_attrs : Iterable of str, optional
1671+
Scalar attributes to read from file. Defaults to None.
1672+
add_array_attrs : Iterable of str, optional
1673+
Array attributes to read from file. Defaults to None.
16661674
16671675
Returns
16681676
-------
@@ -1691,17 +1699,27 @@ def from_hdf5(cls, file_path: Union[str, Path]):
16911699
# Scalar attributes
16921700
scalar_attrs = set(
16931701
("crs", "tot_value", "unit", "aai_agg", "frequency_unit", "haz_type")
1694-
).intersection(file.attrs.keys())
1702+
)
1703+
if add_scalar_attrs is not None:
1704+
scalar_attrs = scalar_attrs.union(add_scalar_attrs)
1705+
scalar_attrs = scalar_attrs.intersection(file.attrs.keys())
16951706
kwargs.update({attr: file.attrs[attr] for attr in scalar_attrs})
16961707

16971708
# Array attributes
16981709
# NOTE: Need [:] to copy array data. Otherwise, it would be a view that is
16991710
# invalidated once we close the file.
17001711
array_attrs = set(
17011712
("event_id", "date", "coord_exp", "eai_exp", "at_event", "frequency")
1702-
).intersection(file.keys())
1713+
)
1714+
if add_array_attrs is not None:
1715+
array_attrs = array_attrs.union(add_array_attrs)
1716+
array_attrs = array_attrs.intersection(file.keys())
17031717
kwargs.update({attr: file[attr][:] for attr in array_attrs})
1704-
1718+
# correct lead_time attribut to timedelta
1719+
if "lead_time" in kwargs:
1720+
kwargs["lead_time"] = np.array(file["lead_time"][:]).astype(
1721+
"timedelta64[ns]"
1722+
)
17051723
# Special handling for 'event_name' because it should be a list of strings
17061724
if "event_name" in file:
17071725
# pylint: disable=no-member

climada/engine/impact_forecast.py

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
"""
2121

2222
import logging
23+
from pathlib import Path
24+
from typing import Union
2325

2426
import numpy as np
2527
import scipy.sparse as sparse
@@ -173,6 +175,62 @@ def calc_freq_curve(self, return_per=None):
173175
LOGGER.error("calc_freq_curve is not defined for ImpactForecast")
174176
raise NotImplementedError("calc_freq_curve is not defined for ImpactForecast")
175177

178+
@classmethod
179+
def from_hdf5(cls, file_path: Union[str, Path]):
180+
"""Create an ImpactForecast object from an H5 file.
181+
182+
This assumes a specific layout of the file. If values are not found in the
183+
expected places, they will be set to the default values for an ``Impact`` object.
184+
185+
The following H5 file structure is assumed (H5 groups are terminated with ``/``,
186+
attributes are denoted by ``.attrs/``)::
187+
188+
file.h5
189+
├─ at_event
190+
├─ coord_exp
191+
├─ eai_exp
192+
├─ event_id
193+
├─ event_name
194+
├─ frequency
195+
├─ imp_mat
196+
├─ lead_time
197+
├─ member
198+
├─ .attrs/
199+
│ ├─ aai_agg
200+
│ ├─ crs
201+
│ ├─ frequency_unit
202+
│ ├─ haz_type
203+
│ ├─ tot_value
204+
│ ├─ unit
205+
206+
As per the :py:func:`climada.engine.impact.Impact.__init__`, any of these entries
207+
is optional. If it is not found, the default value will be used when constructing
208+
the Impact.
209+
210+
The impact matrix ``imp_mat`` can either be an H5 dataset, in which case it is
211+
interpreted as dense representation of the matrix, or an H5 group, in which case
212+
the group is expected to contain the following data for instantiating a
213+
`scipy.sparse.csr_matrix <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html>`_::
214+
215+
imp_mat/
216+
├─ data
217+
├─ indices
218+
├─ indptr
219+
├─ .attrs/
220+
│ ├─ shape
221+
222+
Parameters
223+
----------
224+
file_path : str or Path
225+
The file path of the file to read.
226+
227+
Returns
228+
-------
229+
imp : ImpactForecast
230+
ImpactForecast with data from the given file
231+
"""
232+
return super().from_hdf5(file_path, add_array_attrs={"member", "lead_time"})
233+
176234
def _check_sizes(self):
177235
"""Check sizes of forecast data vs. impact data.
178236

climada/engine/test/test_impact_forecast.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,36 @@ def test_impact_forecast_blocked_methods(impact_forecast):
233233
impact_forecast.calc_freq_curve(np.array([10, 50, 100]))
234234

235235

236+
@pytest.mark.parametrize("dense", [True, False])
237+
def test_write_read_hdf5(impact_forecast, tmp_path, dense):
238+
239+
file_name = tmp_path / "test_hazard_forecast.h5"
240+
# replace dummy_impact event_names with strings
241+
impact_forecast.event_name = [str(name) for name in impact_forecast.event_name]
242+
impact_forecast.write_hdf5(file_name, dense_imp_mat=dense)
243+
244+
def compare_attr(obj, attr):
245+
actual = getattr(obj, attr)
246+
expected = getattr(impact_forecast, attr)
247+
if isinstance(actual, csr_matrix):
248+
npt.assert_array_equal(actual.todense(), expected.todense())
249+
else:
250+
npt.assert_array_equal(actual, expected)
251+
252+
# Read ImpactForecast
253+
impact_forecast_read = ImpactForecast.from_hdf5(file_name)
254+
assert impact_forecast_read.lead_time.dtype.kind == np.dtype("timedelta64").kind
255+
for attr in impact_forecast.__dict__.keys():
256+
compare_attr(impact_forecast_read, attr)
257+
258+
# Read Impact
259+
impact_read = Impact.from_hdf5(file_name)
260+
for attr in impact_read.__dict__.keys():
261+
compare_attr(impact_read, attr)
262+
assert "member" not in impact_read.__dict__
263+
assert "lead_time" not in impact_read.__dict__
264+
265+
236266
@pytest.fixture
237267
def impact_forecast_stats(impact_kwargs, lead_time, member):
238268
max_index = 4

0 commit comments

Comments
 (0)