@@ -1318,12 +1318,12 @@ def _set_model_specific_special_tokens(self, special_tokens: List[str]):
13181318 Whether to return token type IDs. If left to the default, will return the token type IDs according to
13191319 the specific tokenizer's default, defined by the `return_outputs` attribute.
13201320
1321- [What are token type IDs?](../glossary#token-type-ids)
1321+ [What are token type IDs?](../glossary#token-type-ids) @lint-ignore
13221322 return_attention_mask (`bool`, *optional*):
13231323 Whether to return the attention mask. If left to the default, will return the attention mask according
13241324 to the specific tokenizer's default, defined by the `return_outputs` attribute.
13251325
1326- [What are attention masks?](../glossary#attention-mask)
1326+ [What are attention masks?](../glossary#attention-mask) @lint-ignore
13271327 return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
13281328 Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
13291329 of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
@@ -1346,17 +1346,17 @@ def _set_model_specific_special_tokens(self, special_tokens: List[str]):
13461346
13471347 - **input_ids** -- List of token ids to be fed to a model.
13481348
1349- [What are input IDs?](../glossary#input-ids)
1349+ [What are input IDs?](../glossary#input-ids) @lint-ignore
13501350
13511351 - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
13521352 if *"token_type_ids"* is in `self.model_input_names`).
13531353
1354- [What are token type IDs?](../glossary#token-type-ids)
1354+ [What are token type IDs?](../glossary#token-type-ids) @lint-ignore
13551355
13561356 - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
13571357 `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
13581358
1359- [What are attention masks?](../glossary#attention-mask)
1359+ [What are attention masks?](../glossary#attention-mask) @lint-ignore
13601360
13611361 - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
13621362 `return_overflowing_tokens=True`).
@@ -3495,7 +3495,7 @@ def pad(
34953495 Whether to return the attention mask. If left to the default, will return the attention mask according
34963496 to the specific tokenizer's default, defined by the `return_outputs` attribute.
34973497
3498- [What are attention masks?](../glossary#attention-mask)
3498+ [What are attention masks?](../glossary#attention-mask) @lint-ignore
34993499 return_tensors (`str` or [`~utils.TensorType`], *optional*):
35003500 If set, will return tensors instead of list of python integers. Acceptable values are:
35013501
@@ -3621,7 +3621,7 @@ def create_token_type_ids_from_sequences(
36213621 ) -> List [int ]:
36223622 """Create the token type IDs corresponding to the sequences passed.
36233623
3624- [What are token type IDs?](../glossary#token-type-ids)
3624+ [What are token type IDs?](../glossary#token-type-ids) @lint-ignore
36253625
36263626 Should be overridden in a subclass if the model has a special way of building those.
36273627
0 commit comments