From e14048d4efe6fa0ec145b34d3f155c8194ad3666 Mon Sep 17 00:00:00 2001 From: jemmajeffree <98864717+jemmajeffree@users.noreply.github.com> Date: Fri, 25 Jul 2025 19:31:09 +1000 Subject: [PATCH 1/2] Update open_mfdataset docstring --- xarray/backends/api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index cfd3ff7fc0f..8b21f5a33f4 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1494,10 +1494,10 @@ def open_mfdataset( chunks : int, dict, 'auto' or None, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk - each dimension by ``chunks``. By default, chunks will be chosen to load entire - input files into memory at once. This has a major impact on performance: please - see the full documentation for more details [2]_. This argument is evaluated - on a per-file basis, so chunk sizes that span multiple files will be ignored. + each dimension by ``chunks``. By default, chunks will be chosen to match the + chunks on disk. This may impact performance: please see the full documentation + for more details [2]_. This argument is evaluated on a per-file basis, so chunk + sizes that span multiple files will be ignored. concat_dim : str, DataArray, Index or a Sequence of these or None, optional Dimensions to concatenate files along. You only need to provide this argument if ``combine='nested'``, and if any of the dimensions along which you want to From 16be93ea0d434ed7a90de5e2ac282935ac563219 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 25 Jul 2025 09:43:25 +0000 Subject: [PATCH 2/2] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- xarray/backends/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 8b21f5a33f4..1fd75dbbfa5 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -1494,9 +1494,9 @@ def open_mfdataset( chunks : int, dict, 'auto' or None, optional Dictionary with keys given by dimension names and values given by chunk sizes. In general, these should divide the dimensions of each dataset. If int, chunk - each dimension by ``chunks``. By default, chunks will be chosen to match the - chunks on disk. This may impact performance: please see the full documentation - for more details [2]_. This argument is evaluated on a per-file basis, so chunk + each dimension by ``chunks``. By default, chunks will be chosen to match the + chunks on disk. This may impact performance: please see the full documentation + for more details [2]_. This argument is evaluated on a per-file basis, so chunk sizes that span multiple files will be ignored. concat_dim : str, DataArray, Index or a Sequence of these or None, optional Dimensions to concatenate files along. You only need to provide this argument