-
Notifications
You must be signed in to change notification settings - Fork 2.3k
Fix docstring interlinks #4221
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix docstring interlinks #4221
Changes from 14 commits
55a7dd6
c013c2f
12c569d
e30b99b
c83eb3a
58a4459
3766769
aa08bb2
6e0eac5
a5f6af2
fc5b90d
a28c517
9fa47e2
334e439
496bfc9
c4dfded
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -57,14 +57,14 @@ | |
|
|
||
|
|
||
| class PreTrainedModelWrapper(nn.Module): | ||
| r""" | ||
| A wrapper class around a (`transformers.PreTrainedModel`) to be compatible with the (`~transformers.PreTrained`) | ||
| class in order to keep some attributes and methods of the (`~transformers.PreTrainedModel`) class. | ||
| """ | ||
| A wrapper class around a [`~transformers.PreTrainedModel`] to be compatible with the (`~transformers.PreTrained`) | ||
|
||
| class in order to keep some attributes and methods of the [`~transformers.PreTrainedModel`] class. | ||
|
|
||
| Attributes: | ||
| pretrained_model (`transformers.PreTrainedModel`): | ||
| pretrained_model ([`~transformers.PreTrainedModel`]): | ||
| The model to be wrapped. | ||
| parent_class (`transformers.PreTrainedModel`): | ||
| parent_class ([`~transformers.PreTrainedModel`]): | ||
| The parent class of the model to be wrapped. | ||
| supported_args (`list`): | ||
| The list of arguments that are supported by the wrapper class. | ||
|
|
@@ -111,19 +111,20 @@ def __init__( | |
| def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): | ||
| r""" | ||
| Instantiates a new model from a pretrained model from `transformers`. The pretrained model is loaded using the | ||
| `from_pretrained` method of the `transformers.PreTrainedModel` class. The arguments that are specific to the | ||
| `transformers.PreTrainedModel` class are passed along this method and filtered out from the `kwargs` argument. | ||
| `from_pretrained` method of the [`~transformers.PreTrainedModel`] class. The arguments that are specific to the | ||
| [`~transformers.PreTrainedModel`] class are passed along this method and filtered out from the `kwargs` | ||
| argument. | ||
|
|
||
| Args: | ||
| pretrained_model_name_or_path (`str` or `transformers.PreTrainedModel`): | ||
| pretrained_model_name_or_path (`str` or [`~transformers.PreTrainedModel`]): | ||
| The path to the pretrained model or its name. | ||
| *model_args (`list`, *optional*)): | ||
| *model_args (`list`, *optional*): | ||
| Additional positional arguments passed along to the underlying model's `from_pretrained` method. | ||
| **kwargs (`dict`, *optional*): | ||
| Additional keyword arguments passed along to the underlying model's `from_pretrained` method. We also | ||
| pre-process the kwargs to extract the arguments that are specific to the `transformers.PreTrainedModel` | ||
| class and the arguments that are specific to trl models. The kwargs also support | ||
| `prepare_model_for_kbit_training` arguments from `peft` library. | ||
| pre-process the kwargs to extract the arguments that are specific to the | ||
| [`~transformers.PreTrainedModel`] class and the arguments that are specific to trl models. The kwargs | ||
| also support `prepare_model_for_kbit_training` arguments from `peft` library. | ||
| """ | ||
| if kwargs is not None: | ||
| peft_config = kwargs.pop("peft_config", None) | ||
|
|
@@ -507,8 +508,8 @@ def add_and_load_reward_modeling_adapter( | |
| def push_to_hub(self, *args, **kwargs): | ||
| r""" | ||
| Push the pretrained model to the hub. This method is a wrapper around | ||
| `transformers.PreTrainedModel.push_to_hub`. Please refer to the documentation of | ||
| `transformers.PreTrainedModel.push_to_hub` for more information. | ||
| [`~transformers.PreTrainedModel.push_to_hub`]. Please refer to the documentation of | ||
| [`~transformers.PreTrainedModel.push_to_hub`] for more information. | ||
|
|
||
| Args: | ||
| *args (`list`, *optional*): | ||
|
|
@@ -521,8 +522,8 @@ def push_to_hub(self, *args, **kwargs): | |
| def save_pretrained(self, *args, **kwargs): | ||
| r""" | ||
| Save the pretrained model to a directory. This method is a wrapper around | ||
| `transformers.PreTrainedModel.save_pretrained`. Please refer to the documentation of | ||
| `transformers.PreTrainedModel.save_pretrained` for more information. | ||
| [`~transformers.PreTrainedModel.save_pretrained`]. Please refer to the documentation of | ||
| [`~transformers.PreTrainedModel.save_pretrained`] for more information. | ||
|
|
||
| Args: | ||
| *args (`list`, *optional*): | ||
|
|
@@ -596,14 +597,14 @@ def create_reference_model( | |
| Creates a static reference copy of a model. Note that model will be in `.eval()` mode. | ||
|
|
||
| Args: | ||
| model (`PreTrainedModelWrapper`): The model to be copied. | ||
| model ([`PreTrainedModelWrapper`]): The model to be copied. | ||
| num_shared_layers (`int`, *optional*): | ||
| The number of initial layers that are shared between both models and kept frozen. | ||
| pattern (`str`, *optional*): The shared layers are selected with a string pattern | ||
| (e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here. | ||
|
|
||
| Returns: | ||
| `PreTrainedModelWrapper` | ||
| [`PreTrainedModelWrapper`] | ||
| """ | ||
| if is_deepspeed_zero3_enabled(): | ||
| raise ValueError( | ||
|
|
@@ -665,13 +666,13 @@ def create_reference_model( | |
|
|
||
|
|
||
| class GeometricMixtureWrapper(GenerationMixin): | ||
| r""" | ||
| """ | ||
| Geometric Mixture generation wrapper that samples from the logits of two model's geometric mixture. | ||
|
|
||
| Args: | ||
| model (`PreTrainedModel`): The model to be wrapped. | ||
| ref_model (`PreTrainedModel`): The reference model. | ||
| generation_config (`GenerationConfig`): The generation config. | ||
| model ([`~transformers.PreTrainedModel`]): The model to be wrapped. | ||
| ref_model ([`~transformers.PreTrainedModel`]): The reference model. | ||
| generation_config ([`~transformers.GenerationConfig`]): The generation config. | ||
| mixture_coef (`float`, *optional* - default: 0.5): The mixture coefficient. | ||
| """ | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For some reasons, this one doesn't render
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, I know, @qgallouedec: see my previous comment in:
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@qgallouedec this is fixed after the merge of my PR in doc-builder: huggingface/doc-builder#628 (comment)