@@ -1445,14 +1445,14 @@ def load(cls,
14451445 filename: The path to the file in which to save the trained model.
14461446 backend: A string identifying the used backend.
14471447 weights_only: Indicates whether unpickler should be restricted to loading only tensors, primitive types,
1448- dictionaries and any types added via ` py:func:torch.serialization.add_safe_globals`.
1449- See ` py:func:torch.load` with ``weights_only=True`` for more details. It it recommended to leave this
1448+ dictionaries and any types added via : py:func:` torch.serialization.add_safe_globals`.
1449+ See : py:func:` torch.load` with ``weights_only=True`` for more details. It it recommended to leave this
14501450 at the default value of ``None``, which sets the argument to ``False`` for torch<2.6, and ``True`` for
1451- higher versions of torch. If you experience issues with loading custom models (specified outside
1451+ higher versions of torch. If you experience issues with loading custom models (specified outside
14521452 of the CEBRA package), you can try to set this to ``False`` if you trust the source of the model.
14531453 kwargs: Optional keyword arguments passed directly to the loader.
14541454
1455- Return :
1455+ Returns :
14561456 The model to load.
14571457
14581458 Note:
@@ -1462,7 +1462,6 @@ def load(cls,
14621462 For information about the file format please refer to :py:meth:`cebra.CEBRA.save`.
14631463
14641464 Example:
1465-
14661465 >>> import cebra
14671466 >>> import numpy as np
14681467 >>> import tempfile
@@ -1476,7 +1475,6 @@ def load(cls,
14761475 >>> loaded_model = cebra.CEBRA.load(tmp_file)
14771476 >>> embedding = loaded_model.transform(dataset)
14781477 >>> tmp_file.unlink()
1479-
14801478 """
14811479 supported_backends = ["auto" , "sklearn" , "torch" ]
14821480 if backend not in supported_backends :
0 commit comments