-
Notifications
You must be signed in to change notification settings - Fork 322
Address h5netcdf scalar issue #3331
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 3 commits
5972885
fc59629
3c31b69
5f8e051
ef10d3b
8e2cfa5
33a4356
31d1c67
d35889e
5b3efb3
b2076ef
42ee530
2f6a413
aa0edbb
db89a49
f6c5a0c
30f469a
ee97b2f
606ce2b
99ff436
27298b3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -62,6 +62,11 @@ def load_readers(filenames=None, reader=None, reader_kwargs=None): | |
|
|
||
| loadables = reader_instance.select_files_from_pathnames(readers_files) | ||
| if loadables: | ||
| # WARN: This is very confusing, but it seems to work: The reader_kwargs, when passed without a reader key, | ||
| # are pud into a dictionary where keys are individual letters of the reader name, and the value is the | ||
| # reader kwargs; however, notice the `reader[idx]` part in the call here, as idx is 0 and thus the first | ||
| # letter of the reader is used to index `reader_kwargs_without_filter`, thus retrieving the correct | ||
| # reader_kwargs. | ||
|
Comment on lines
+65
to
+69
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So basically this logic is incorrect and works by luck/chance and needs to be fixed?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not sure if it was the intended behaviour or not... It's working in the end, so maybe it is? But yeah, it would definitely benefit from a dedicated test and then a refactor...
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So what did you do to run into the issue? You called it from the Scene or noticed it within a test you already had?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. no, I was debugging why my "engine" keyword arg was not being taken into account. I was another issue, but I spent a while here trying to understand what was happening… |
||
| reader_instance.create_storage_items( | ||
| loadables, | ||
| fh_kwargs=reader_kwargs_without_filter[None if reader is None else reader[idx]]) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,6 +18,7 @@ | |
| """Module for testing the satpy.readers.core.netcdf module.""" | ||
|
|
||
| import os | ||
| from contextlib import closing | ||
|
|
||
| import numpy as np | ||
| import pytest | ||
|
|
@@ -68,22 +69,30 @@ def get_test_content(self, filename, filename_info, filetype_info): | |
| @pytest.fixture | ||
| def netcdf_file(tmp_path): | ||
| """Create a test NetCDF4 file.""" | ||
| from netCDF4 import Dataset | ||
| filename = tmp_path / "test.nc" | ||
| with Dataset(filename, "w") as nc: | ||
| from multiprocessing import Process | ||
| p = Process(target=_create_netcdf_file, args=(filename, )) | ||
| p.start() | ||
| p.join() | ||
|
||
| return filename | ||
|
|
||
|
|
||
| def _create_netcdf_file(filename): | ||
| from netCDF4 import Dataset | ||
| with closing(Dataset(filename, "w")) as nc: | ||
|
||
| # Create dimensions | ||
| nc.createDimension("rows", 10) | ||
| nc.createDimension("cols", 100) | ||
|
|
||
| # Create Group | ||
| g1 = nc.createGroup("test_group") | ||
|
|
||
| # Add datasets | ||
| ds1_f = g1.createVariable("ds1_f", np.float32, | ||
| dimensions=("rows", "cols")) | ||
| ds1_f[:] = np.arange(10. * 100).reshape((10, 100)) | ||
| ds1_i = g1.createVariable("ds1_i", np.int32, | ||
| dimensions=("rows", "cols")) | ||
| ds1_f.set_auto_scale(True) | ||
|
||
| ds1_i[:] = np.arange(10 * 100).reshape((10, 100)) | ||
| ds2_f = nc.createVariable("ds2_f", np.float32, | ||
| dimensions=("rows", "cols")) | ||
|
|
@@ -95,7 +104,7 @@ def netcdf_file(tmp_path): | |
| dimensions=("rows",)) | ||
| ds2_s[:] = np.arange(10) | ||
| ds2_sc = nc.createVariable("ds2_sc", np.int8, dimensions=()) | ||
| ds2_sc[:] = 42 | ||
| ds2_sc[:] = np.int8(42) | ||
|
|
||
| # Add attributes | ||
| nc.test_attr_str = "test_string" | ||
|
|
@@ -230,21 +239,34 @@ def test_filenotfound(self): | |
| with pytest.raises(IOError, match=".*(No such file or directory|Unknown file format).*"): | ||
| NetCDF4FileHandler("/thisfiledoesnotexist.nc", {}, {}) | ||
|
|
||
| def test_get_and_cache_npxr_is_xr(self, netcdf_file): | ||
| @pytest.mark.parametrize("engine", ["netcdf4", "h5netcdf"]) | ||
| def test_get_and_cache_npxr_is_xr(self, netcdf_file, engine): | ||
| """Test that get_and_cache_npxr() returns xr.DataArray.""" | ||
| import xarray as xr | ||
|
|
||
| from satpy.readers.core.netcdf import NetCDF4FileHandler | ||
| file_handler = NetCDF4FileHandler(netcdf_file, {}, {}, cache_handle=True) | ||
| file_handler = NetCDF4FileHandler(netcdf_file, {}, {}, cache_handle=True, engine=engine) | ||
|
|
||
| data = file_handler.get_and_cache_npxr("test_group/ds1_f") | ||
| assert isinstance(data, xr.DataArray) | ||
|
|
||
| def test_get_and_cache_npxr_data_is_cached(self, netcdf_file): | ||
| @pytest.mark.parametrize("engine", ["netcdf4", "h5netcdf"]) | ||
| def test_get_and_cache_npxr_for_scalar(self, netcdf_file, engine): | ||
| """Test that get_and_cache_npxr() returns xr.DataArray.""" | ||
| from satpy.readers.core.netcdf import NetCDF4FileHandler | ||
| file_handler = NetCDF4FileHandler(netcdf_file, {}, {}, cache_handle=True, engine=engine) | ||
|
|
||
| data = file_handler.get_and_cache_npxr("ds2_sc") | ||
| # WARN: h5netcdf returns an int64! | ||
| assert data.dtype in [np.int8, np.int64], "Scalar should be of type int8" | ||
| assert data == 42 | ||
|
|
||
| @pytest.mark.parametrize("engine", ["netcdf4", "h5netcdf"]) | ||
| def test_get_and_cache_npxr_data_is_cached(self, netcdf_file, engine): | ||
| """Test that the data are cached when get_and_cache_npxr() is called.""" | ||
| from satpy.readers.core.netcdf import NetCDF4FileHandler | ||
|
|
||
| file_handler = NetCDF4FileHandler(netcdf_file, {}, {}, cache_handle=True) | ||
| file_handler = NetCDF4FileHandler(netcdf_file, {}, {}, cache_handle=True, engine=engine) | ||
| data = file_handler.get_and_cache_npxr("test_group/ds1_f") | ||
|
|
||
| # Delete the dataset from the file content dict, it should be available from the cache | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.