3737from transformers .models .mistral3 .configuration_mistral3 import Mistral3Config as HFMistral3Config
3838from transformers .models .mistral3 .modeling_mistral3 import Mistral3ForConditionalGeneration
3939from transformers .processing_utils import Unpack
40- from transformers .utils import TransformersKwargs , auto_docstring , can_return_tuple , logging
40+ from transformers .utils import TransformersKwargs , can_return_tuple , logging
4141
4242logger = logging .get_logger (__name__ )
4343
@@ -160,7 +160,6 @@ def rotate_half(x):
160160 return torch .cat ((- x2 , x1 ), dim = - 1 )
161161
162162
163- @auto_docstring
164163class Ministral3PreTrainedModel (PreTrainedModel ):
165164 config : Ministral3Config
166165 base_model_prefix = "model"
@@ -177,21 +176,11 @@ class Ministral3PreTrainedModel(PreTrainedModel):
177176
178177
179178@dataclass
180- @auto_docstring (
181- custom_intro = """
182- Base class for Ministral3 outputs, with hidden states and attentions.
183- """
184- )
185179class Ministral3ModelOutputWithPast (BaseModelOutputWithPast ):
186180 image_hidden_states : Optional [torch .FloatTensor ] = None
187181
188182
189183@dataclass
190- @auto_docstring (
191- custom_intro = """
192- Base class for Ministral3 causal language model outputs.
193- """
194- )
195184class Ministral3CausalLMOutputWithPast (CausalLMOutputWithPast ):
196185 pass
197186
@@ -433,7 +422,6 @@ def forward(
433422 return hidden_states
434423
435424
436- @auto_docstring
437425class Ministral3Model (Ministral3PreTrainedModel ):
438426 def __init__ (self , config : Ministral3Config ):
439427 super ().__init__ (config )
@@ -451,7 +439,6 @@ def __init__(self, config: Ministral3Config):
451439 self .post_init ()
452440
453441 @can_return_tuple
454- @auto_docstring
455442 def forward (
456443 self ,
457444 input_ids : Optional [torch .LongTensor ] = None ,
@@ -512,7 +499,6 @@ def forward(
512499 )
513500
514501
515- @auto_docstring
516502class Ministral3ForCausalLM (Ministral3PreTrainedModel , GenerationMixin ):
517503 _tied_weights_keys = {"lm_head.weight" : "model.embed_tokens.weight" }
518504 _tp_plan = {"lm_head" : "colwise_rep" }
@@ -526,7 +512,6 @@ def __init__(self, config: Ministral3Config):
526512 self .post_init ()
527513
528514 @can_return_tuple
529- @auto_docstring
530515 def forward (
531516 self ,
532517 input_ids : Optional [torch .LongTensor ] = None ,
0 commit comments