Skip to content

Commit 479ad52

Browse files
committed
Use distinct file paths
1 parent 0106750 commit 479ad52

File tree

2 files changed

+13
-12
lines changed

2 files changed

+13
-12
lines changed

virtualizarr/tests/test_readers/conftest.py

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -372,18 +372,22 @@ def scalar_fill_value_hdf5_file(tmpdir):
372372
compound_fill = (-9999, -9999.0)
373373

374374
fill_values = [
375-
{"fill_value": -9999, "data": np.random.randint(0, 10, size=(5))},
376-
{"fill_value": -9999.0, "data": np.random.random(5)},
377-
{"fill_value": np.nan, "data": np.random.random(5)},
378-
{"fill_value": False, "data": np.array([True, False, False, True, True])},
379-
{"fill_value": "NaN", "data": np.array(["three"], dtype="S10")},
380-
{"fill_value": compound_fill, "data": compound_data},
375+
{"label": "int", "fill_value": -9999, "data": np.random.randint(0, 10, size=(5))},
376+
{"label": "float", "fill_value": -9999.0, "data": np.random.random(5)},
377+
{"label": "npNan", "fill_value": np.nan, "data": np.random.random(5)},
378+
{
379+
"label": "False",
380+
"fill_value": False,
381+
"data": np.array([True, False, False, True, True]),
382+
},
383+
{"label": "NaN", "fill_value": "NaN", "data": np.array(["three"], dtype="S10")},
384+
{"label": "compound", "fill_value": compound_fill, "data": compound_data},
381385
]
382386

383387

384388
@pytest.fixture(params=fill_values)
385389
def cf_fill_value_hdf5_file(tmpdir, request):
386-
filepath = f"{tmpdir}/cf_fill_value.nc"
390+
filepath = f"{tmpdir}/cf_fill_value_{request.param['label']}.nc"
387391
f = h5py.File(filepath, "w")
388392
dset = f.create_dataset(name="data", data=request.param["data"], chunks=True)
389393
dim_scale = f.create_dataset(

virtualizarr/tests/test_readers/test_kerchunk.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ def test_open_virtual_dataset_existing_kerchunk_refs(
201201
# Test valid json and parquet reference formats
202202

203203
if reference_format == "json":
204-
ref_filepath = tmp_path / "ref.json"
204+
ref_filepath = tmp_path / "ref1.json"
205205

206206
import ujson
207207

@@ -233,14 +233,11 @@ def test_open_virtual_dataset_existing_kerchunk_refs(
233233

234234

235235
@requires_kerchunk
236-
@pytest.mark.xfail(
237-
reason="Test hangs after https://github.com/zarr-developers/VirtualiZarr/pull/420"
238-
)
239236
def test_notimplemented_read_inline_refs(tmp_path, netcdf4_inlined_ref):
240237
# For now, we raise a NotImplementedError if we read existing references that have inlined data
241238
# https://github.com/zarr-developers/VirtualiZarr/pull/251#pullrequestreview-2361916932
242239

243-
ref_filepath = tmp_path / "ref.json"
240+
ref_filepath = tmp_path / "ref2.json"
244241

245242
import ujson
246243

0 commit comments

Comments
 (0)