@@ -1858,6 +1858,20 @@ def open_mfdataset(
1858
1858
return combined
1859
1859
1860
1860
1861
+ def _get_netcdf_autoclose (dataset : Dataset , engine : T_NetcdfEngine ) -> bool :
1862
+ """Should we close files after each write operations?"""
1863
+ scheduler = get_dask_scheduler ()
1864
+ have_chunks = any (v .chunks is not None for v in dataset .variables .values ())
1865
+
1866
+ autoclose = have_chunks and scheduler in ["distributed" , "multiprocessing" ]
1867
+ if autoclose and engine == "scipy" :
1868
+ raise NotImplementedError (
1869
+ f"Writing netCDF files with the { engine } backend "
1870
+ f"is not currently supported with dask's { scheduler } scheduler"
1871
+ )
1872
+ return autoclose
1873
+
1874
+
1861
1875
WRITEABLE_STORES : dict [T_NetcdfEngine , Callable ] = {
1862
1876
"netcdf4" : backends .NetCDF4DataStore .open ,
1863
1877
"scipy" : backends .ScipyDataStore ,
@@ -2064,16 +2078,7 @@ def to_netcdf(
2064
2078
# sanitize unlimited_dims
2065
2079
unlimited_dims = _sanitize_unlimited_dims (dataset , unlimited_dims )
2066
2080
2067
- # handle scheduler specific logic
2068
- scheduler = get_dask_scheduler ()
2069
- have_chunks = any (v .chunks is not None for v in dataset .variables .values ())
2070
-
2071
- autoclose = have_chunks and scheduler in ["distributed" , "multiprocessing" ]
2072
- if autoclose and engine == "scipy" :
2073
- raise NotImplementedError (
2074
- f"Writing netCDF files with the { engine } backend "
2075
- f"is not currently supported with dask's { scheduler } scheduler"
2076
- )
2081
+ autoclose = _get_netcdf_autoclose (dataset , engine )
2077
2082
2078
2083
if path_or_file is None :
2079
2084
if not compute :
@@ -2116,7 +2121,7 @@ def to_netcdf(
2116
2121
writes = writer .sync (compute = compute )
2117
2122
2118
2123
finally :
2119
- if not multifile :
2124
+ if not multifile and not autoclose : # type: ignore[redundant-expr,unused-ignore]
2120
2125
if compute :
2121
2126
store .close ()
2122
2127
else :
0 commit comments