@@ -270,7 +270,7 @@ def save_pretrained(
270
270
save_function : Optional [Callable ] = None ,
271
271
safe_serialization : bool = True ,
272
272
variant : Optional [str ] = None ,
273
- max_shard_size : Union [int , str ] = "5GB " ,
273
+ max_shard_size : Union [int , str ] = "10GB " ,
274
274
push_to_hub : bool = False ,
275
275
** kwargs ,
276
276
):
@@ -293,10 +293,13 @@ def save_pretrained(
293
293
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
294
294
variant (`str`, *optional*):
295
295
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
296
- max_shard_size (`int` or `str`, defaults to `"5GB "`):
296
+ max_shard_size (`int` or `str`, defaults to `"10GB "`):
297
297
The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
298
298
lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5GB"`).
299
- If expressed as an integer, the unit is bytes.
299
+ If expressed as an integer, the unit is bytes. Note that this limit will be decreased after a certain
300
+ period of time (starting from Oct 2024) to allow users to upgrade to the latest version of `diffusers`.
301
+ This is to establish a common default size for this argument across different libraries in the Hugging
302
+ Face ecosystem (`transformers`, and `accelerate`, for example).
300
303
push_to_hub (`bool`, *optional*, defaults to `False`):
301
304
Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the
302
305
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
0 commit comments