1+ # Copyright Iris contributors
2+ #
3+ # This file is part of Iris and is released under the BSD license.
4+ # See LICENSE in the root of the repository for full licensing details.
5+ """Integration tests for string data handling."""
6+
7+ import subprocess
8+
19import netCDF4 as nc
210import numpy as np
311import pytest
412
513import iris
614from iris .coords import AuxCoord , DimCoord
715from iris .cube import Cube
16+ from iris .tests import env_bin_path
817
918NX , N_STRLEN = 3 , 64
1019TEST_STRINGS = ["Münster" , "London" , "Amsterdam" ]
1625 TEST_COORD_VALS [- 1 ] = "Xsandwich" # makes the max coord strlen same as data one
1726
1827
28+ # Ensure all tests run with "split attrs" turned on.
1929@pytest .fixture (scope = "module" , autouse = True )
2030def enable_split_attrs ():
2131 with iris .FUTURE .context (save_split_attrs = True ):
@@ -119,12 +129,19 @@ def make_testcube(
119129 return cube
120130
121131
122- def show_result (filepath ):
123- from pp_utils import ncdump
132+ NCDUMP_PATHSTR = str (env_bin_path ("ncdump" ))
133+
134+
135+ def ncdump (nc_path : str , * args ):
136+ """Call ncdump to print a dump of a file."""
137+ call_args = [NCDUMP_PATHSTR , nc_path ] + list (* args )
138+ subprocess .run (call_args , check = True )
139+
124140
141+ def show_result (filepath ):
125142 print (f"File { filepath } " )
126143 print ("NCDUMP:" )
127- ncdump (filepath , "" )
144+ ncdump (filepath )
128145 # with nc.Dataset(filepath, "r") as ds:
129146 # v = ds.variables["v"]
130147 # print("\n----\nNetcdf data readback (basic)")
@@ -159,6 +176,13 @@ def show_result(filepath):
159176 print (repr (err ))
160177
161178
179+ @pytest .fixture (scope = "session" )
180+ def save_dir (tmp_path_factory ):
181+ return tmp_path_factory .mktemp ("save_files" )
182+
183+
184+ # TODO: the tests don't test things properly yet, they just exercise the code and print
185+ # things for manual debugging.
162186tsts = (
163187 None ,
164188 "ascii" ,
@@ -172,10 +196,10 @@ def show_result(filepath):
172196
173197
174198@pytest .mark .parametrize ("encoding" , tsts )
175- def test_load_encodings (encoding ):
199+ def test_load_encodings (encoding , save_dir ):
176200 # small change
177201 print (f"\n =========\n Testing encoding: { encoding } " )
178- filepath = f"tmp_ { str (encoding )} .nc"
202+ filepath = save_dir / f"tmp_load_ { str (encoding )} .nc"
179203 do_as = encoding
180204 if encoding != "utf-32" :
181205 do_as = "utf-8"
@@ -190,12 +214,12 @@ def test_load_encodings(encoding):
190214
191215
192216@pytest .mark .parametrize ("encoding" , tsts )
193- def test_save_encodings (encoding ):
217+ def test_save_encodings (encoding , save_dir ):
194218 cube = make_testcube (
195219 dataarray = TEST_STRINGS , coordarray = TEST_COORD_VALS , encoding_str = encoding
196220 )
197221 print (cube )
198- filepath = f"tmp_save_{ str (encoding )} .nc"
222+ filepath = save_dir / f"tmp_save_{ str (encoding )} .nc"
199223 if encoding == "ascii" :
200224 with pytest .raises (
201225 UnicodeEncodeError ,
@@ -205,19 +229,3 @@ def test_save_encodings(encoding):
205229 else :
206230 iris .save (cube , filepath )
207231 show_result (filepath )
208-
209-
210- # @pytest.mark.parametrize("ndim", [1, 2])
211- # def test_convert_bytes_to_strings(ndim: int):
212- # if ndim == 1:
213- # source = convert_strings_to_chararray(TEST_STRINGS, 16)
214- # elif ndim == 2:
215- # source = np.stack([
216- # convert_strings_to_chararray(TEST_STRINGS, 16),
217- # convert_strings_to_chararray(TEST_COORD_VALS, 16),
218- # ])
219- # else:
220- # raise ValueError(f"Unexpected param ndim={ndim}.")
221- # # convert the strings to bytes
222- # result = convert_bytesarray_to_strings(source)
223- # print(result)
0 commit comments