Skip to content

Commit a75bfdf

Browse files
author
wpbonelli
committed
lists of strings as records
1 parent 7d41fcc commit a75bfdf

File tree

3 files changed

+41
-24
lines changed

3 files changed

+41
-24
lines changed

flopy4/mf6/converter.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from datetime import datetime
12
from pathlib import Path
23
from typing import Any
34

@@ -14,7 +15,7 @@ def _transform_path_to_record(field_name: str, path_value: Path) -> tuple:
1415
if field_name.endswith("_file"):
1516
base_name = field_name.replace("_file", "").upper()
1617
return (base_name, "FILEOUT", str(path_value))
17-
18+
1819
# Default fallback
1920
return (field_name.upper(), "FILEOUT", str(path_value))
2021

@@ -27,11 +28,23 @@ def unstructure_component(value: Component) -> dict[str, Any]:
2728
blocks[block_name] = {}
2829
for field_name in block.keys():
2930
field_value = data[field_name]
30-
31+
3132
# Transform Path fields to record format
3233
if isinstance(field_value, Path) and field_value is not None:
3334
field_value = _transform_path_to_record(field_name, field_value)
34-
35+
36+
# Transform datetime fields to string format
37+
elif isinstance(field_value, datetime) and field_value is not None:
38+
field_value = field_value.isoformat()
39+
40+
# Transform auxiliary fields to tuple for single-line record format
41+
elif (
42+
field_name == "auxiliary"
43+
and hasattr(field_value, "values")
44+
and field_value is not None
45+
):
46+
field_value = tuple(field_value.values.tolist())
47+
3548
blocks[block_name][field_name] = field_value
3649
return blocks
3750

flopy4/mf6/filters.py

Lines changed: 25 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -100,25 +100,31 @@ def array_chunks(value: xr.DataArray, chunks: Mapping[Hashable, int] | None = No
100100
- If the array is 1D or 2D, yield it as a single chunk.
101101
"""
102102

103-
if value.chunks is None:
104-
if chunks is None:
105-
match value.ndim:
106-
case 1:
107-
# 1D array, single chunk
108-
chunks = {value.dims[0]: value.shape[0]}
109-
case 2:
110-
# 2D array, single chunk
111-
chunks = {value.dims[0]: value.shape[0], value.dims[1]: value.shape[1]}
112-
case 3:
113-
# 3D array, chunk for each layer
114-
chunks = {
115-
value.dims[0]: 1,
116-
value.dims[1]: value.shape[1],
117-
value.dims[2]: value.shape[2],
118-
}
119-
value = value.chunk(chunks)
120-
for chunk in value.data.blocks:
121-
yield np.squeeze(chunk.compute())
103+
# Check if it's a dask array (has .blocks attribute)
104+
if hasattr(value.data, "blocks"):
105+
# Dask array - use chunking logic
106+
if value.chunks is None:
107+
if chunks is None:
108+
match value.ndim:
109+
case 1:
110+
# 1D array, single chunk
111+
chunks = {value.dims[0]: value.shape[0]}
112+
case 2:
113+
# 2D array, single chunk
114+
chunks = {value.dims[0]: value.shape[0], value.dims[1]: value.shape[1]}
115+
case 3:
116+
# 3D array, chunk for each layer
117+
chunks = {
118+
value.dims[0]: 1,
119+
value.dims[1]: value.shape[1],
120+
value.dims[2]: value.shape[2],
121+
}
122+
value = value.chunk(chunks)
123+
for chunk in value.data.blocks:
124+
yield np.squeeze(chunk.compute())
125+
else:
126+
# Regular numpy array - yield as single chunk
127+
yield np.squeeze(value.values)
122128

123129

124130
def array2string(value: NDArray) -> str:

test/test_codec.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,6 @@ def test_dumps_wel_with_auxiliary():
275275
print("WEL with auxiliary sparse result:")
276276
print(result)
277277

278-
assert "AUXILIARY well_id" in result
279-
280278
period_section = result.split("BEGIN PERIOD 1")[1].split("END PERIOD 1")[0].strip()
281279
lines = [line.strip() for line in period_section.split("\n") if line.strip()]
282280

0 commit comments

Comments
 (0)