Skip to content

Commit 5a5a07f

Browse files
committed
Fix for dataless; avoid FUTURE global state change from temporary tests.
1 parent c3f9192 commit 5a5a07f

File tree

2 files changed

+29
-20
lines changed

2 files changed

+29
-20
lines changed

lib/iris/fileformats/netcdf/saver.py

Lines changed: 14 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1813,7 +1813,7 @@ def _create_generic_cf_array_var(
18131813
if cube is not None and data is not None and cube.shape != data.shape:
18141814
compression_kwargs = {}
18151815

1816-
if np.issubdtype(data.dtype, np.str_):
1816+
if not is_dataless and np.issubdtype(data.dtype, np.str_):
18171817
# Deal with string-type variables.
18181818
# Typically CF label variables, but also possibly ancil-vars ?
18191819

@@ -1898,8 +1898,13 @@ def _create_generic_cf_array_var(
18981898
else:
18991899
# A normal (numeric) variable.
19001900
# ensure a valid datatype for the file format.
1901-
element_type = type(element).__name__
1902-
data = self._ensure_valid_dtype(data, element_type, element)
1901+
if is_dataless:
1902+
dtype = self._DATALESS_DTYPE
1903+
fill_value = self._DATALESS_FILLVALUE
1904+
else:
1905+
element_type = type(element).__name__
1906+
data = self._ensure_valid_dtype(data, element_type, element)
1907+
dtype = data.dtype.newbyteorder("=")
19031908

19041909
# Check if this is a dim-coord.
19051910
is_dimcoord = cube is not None and element in cube.dim_coords
@@ -1913,7 +1918,7 @@ def _create_generic_cf_array_var(
19131918
# Create the CF-netCDF variable.
19141919
cf_var = self._dataset.createVariable(
19151920
cf_name,
1916-
data.dtype.newbyteorder("="),
1921+
dtype,
19171922
element_dims,
19181923
fill_value=fill_value,
19191924
**compression_kwargs,
@@ -2365,19 +2370,12 @@ def _create_cf_data_variable(
23652370
# be removed.
23662371
# Get the values in a form which is valid for the file format.
23672372
is_dataless = cube.is_dataless()
2368-
if is_dataless:
2369-
data = None
2370-
else:
2371-
data = self._ensure_valid_dtype(cube.core_data(), "cube", cube)
23722373

2373-
if is_dataless:
2374-
# The variable must have *some* dtype, and it must be maskable
2375-
dtype = self._DATALESS_DTYPE
2376-
fill_value = self._DATALESS_FILLVALUE
2377-
elif not packing:
2378-
dtype = data.dtype.newbyteorder("=")
2379-
else:
2380-
if isinstance(packing, dict):
2374+
if not is_dataless:
2375+
data = self._ensure_valid_dtype(cube.core_data(), "cube", cube)
2376+
if not packing:
2377+
dtype = data.dtype.newbyteorder("=")
2378+
elif isinstance(packing, dict):
23812379
if "dtype" not in packing:
23822380
msg = "The dtype attribute is required for packing."
23832381
raise ValueError(msg)

lib/iris/tests/integration/netcdf/test_chararrays.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,6 @@
33
import pytest
44

55
import iris
6-
7-
iris.FUTURE.save_split_attrs = True
86
from iris.coords import AuxCoord, DimCoord
97
from iris.cube import Cube
108

@@ -18,6 +16,12 @@
1816
TEST_COORD_VALS[-1] = "Xsandwich" # makes the max coord strlen same as data one
1917

2018

19+
@pytest.fixture(scope="module", autouse=True)
20+
def enable_split_attrs():
21+
with iris.FUTURE.context(save_split_attrs=True):
22+
yield
23+
24+
2125
def convert_strings_to_chararray(string_array_1d, maxlen, encoding="utf-8"):
2226
bbytes = [text.encode(encoding) for text in string_array_1d]
2327
pad = b"\0" * maxlen
@@ -192,8 +196,15 @@ def test_save_encodings(encoding):
192196
)
193197
print(cube)
194198
filepath = f"tmp_save_{str(encoding)}.nc"
195-
iris.save(cube, filepath)
196-
show_result(filepath)
199+
if encoding == "ascii":
200+
with pytest.raises(
201+
UnicodeEncodeError,
202+
match="'ascii' codec can't encode character.*not in range",
203+
):
204+
iris.save(cube, filepath)
205+
else:
206+
iris.save(cube, filepath)
207+
show_result(filepath)
197208

198209

199210
# @pytest.mark.parametrize("ndim", [1, 2])

0 commit comments

Comments
 (0)