@@ -10,11 +10,41 @@ class TestTasks(ExtTestCase):
1010 @hide_stdout ()
1111 def test_text2text_generation (self ):
1212 mid = "sshleifer/tiny-marian-en-de"
13- data = get_untrained_model_with_inputs (mid , verbose = 1 )
13+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
14+ self .assertEqual (data ["task" ], "text2text-generation" )
1415 self .assertIn ((data ["size" ], data ["n_weights" ]), [(473928 , 118482 )])
1516 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
1617 raise unittest .SkipTest (f"not working for { mid !r} " )
1718 model (** inputs )
19+ model (** data ["inputs2" ])
20+ with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
21+ torch .export .export (
22+ model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
23+ )
24+
25+ @hide_stdout ()
26+ def test_text_generation (self ):
27+ mid = "arnir0/Tiny-LLM"
28+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
29+ self .assertEqual (data ["task" ], "text-generation" )
30+ self .assertIn ((data ["size" ], data ["n_weights" ]), [(51955968 , 12988992 )])
31+ model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
32+ model (** inputs )
33+ model (** data ["inputs2" ])
34+ with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
35+ torch .export .export (
36+ model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
37+ )
38+
39+ @hide_stdout ()
40+ def test_image_classification (self ):
41+ mid = "hf-internal-testing/tiny-random-BeitForImageClassification"
42+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
43+ self .assertEqual (data ["task" ], "image-classification" )
44+ self .assertIn ((data ["size" ], data ["n_weights" ]), [(56880 , 14220 )])
45+ model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
46+ model (** inputs )
47+ model (** data ["inputs2" ])
1848 with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
1949 torch .export .export (
2050 model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
@@ -23,9 +53,11 @@ def test_text2text_generation(self):
2353 @hide_stdout ()
2454 def test_automatic_speech_recognition (self ):
2555 mid = "openai/whisper-tiny"
26- data = get_untrained_model_with_inputs (mid , verbose = 1 )
56+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
57+ self .assertEqual (data ["task" ], "automatic-speech-recognition" )
2758 self .assertIn ((data ["size" ], data ["n_weights" ]), [(132115968 , 33028992 )])
2859 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
60+ model (** data ["inputs2" ])
2961 Dim = torch .export .Dim
3062 self .maxDiff = None
3163 self .assertIn ("{0:Dim(batch),1:DYN(seq_length)}" , self .string_type (ds ))
@@ -91,13 +123,15 @@ def test_automatic_speech_recognition(self):
91123 )
92124
93125 @hide_stdout ()
94- def test_imagetext2text_generation (self ):
126+ def test_image_text_to_text (self ):
95127 mid = "HuggingFaceM4/tiny-random-idefics"
96- data = get_untrained_model_with_inputs (mid , verbose = 1 )
128+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
129+ self .assertEqual (data ["task" ], "image-text-to-text" )
97130 self .assertIn ((data ["size" ], data ["n_weights" ]), [(12742888 , 3185722 )])
98131 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
99132 model (** inputs )
100- if not has_torch ("2.10" ):
133+ model (** data ["inputs2" ])
134+ if not has_torch ("2.8" ):
101135 raise unittest .SkipTest ("sym_max does not work with dynamic dimension" )
102136 with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
103137 torch .export .export (
@@ -107,10 +141,12 @@ def test_imagetext2text_generation(self):
107141 @hide_stdout ()
108142 def test_fill_mask (self ):
109143 mid = "google-bert/bert-base-multilingual-cased"
110- data = get_untrained_model_with_inputs (mid , verbose = 1 )
144+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
145+ self .assertEqual (data ["task" ], "fill-mask" )
111146 self .assertIn ((data ["size" ], data ["n_weights" ]), [(428383212 , 107095803 )])
112147 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
113148 model (** inputs )
149+ model (** data ["inputs2" ])
114150 with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
115151 torch .export .export (
116152 model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
@@ -119,10 +155,12 @@ def test_fill_mask(self):
119155 @hide_stdout ()
120156 def test_feature_extraction (self ):
121157 mid = "facebook/bart-base"
122- data = get_untrained_model_with_inputs (mid , verbose = 1 )
158+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
159+ self .assertEqual (data ["task" ], "feature-extraction" )
123160 self .assertIn ((data ["size" ], data ["n_weights" ]), [(557681664 , 139420416 )])
124161 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
125162 model (** inputs )
163+ model (** data ["inputs2" ])
126164 with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
127165 torch .export .export (
128166 model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
@@ -131,10 +169,12 @@ def test_feature_extraction(self):
131169 @hide_stdout ()
132170 def test_text_classification (self ):
133171 mid = "Intel/bert-base-uncased-mrpc"
134- data = get_untrained_model_with_inputs (mid , verbose = 1 )
172+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
173+ self .assertEqual (data ["task" ], "text-classification" )
135174 self .assertIn ((data ["size" ], data ["n_weights" ]), [(154420232 , 38605058 )])
136175 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
137176 model (** inputs )
177+ model (** data ["inputs2" ])
138178 with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
139179 torch .export .export (
140180 model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
@@ -143,10 +183,12 @@ def test_text_classification(self):
143183 @hide_stdout ()
144184 def test_sentence_similary (self ):
145185 mid = "sentence-transformers/all-MiniLM-L6-v1"
146- data = get_untrained_model_with_inputs (mid , verbose = 1 )
186+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
187+ self .assertEqual (data ["task" ], "sentence-similarity" )
147188 self .assertIn ((data ["size" ], data ["n_weights" ]), [(62461440 , 15615360 )])
148189 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
149190 model (** inputs )
191+ model (** data ["inputs2" ])
150192 with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
151193 torch .export .export (
152194 model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
@@ -155,9 +197,11 @@ def test_sentence_similary(self):
155197 @hide_stdout ()
156198 def test_falcon_mamba_dev (self ):
157199 mid = "tiiuae/falcon-mamba-tiny-dev"
158- data = get_untrained_model_with_inputs (mid , verbose = 1 )
200+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
201+ self .assertEqual (data ["task" ], "text-generation" )
159202 model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
160203 model (** inputs )
204+ model (** data ["inputs2" ])
161205 self .assertIn ((data ["size" ], data ["n_weights" ]), [(138640384 , 34660096 )])
162206 if not has_transformers ("4.55" ):
163207 raise unittest .SkipTest ("The model has control flow." )
@@ -166,6 +210,20 @@ def test_falcon_mamba_dev(self):
166210 model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
167211 )
168212
213+ @hide_stdout ()
214+ def test_zero_shot_image_classification (self ):
215+ mid = "openai/clip-vit-base-patch16"
216+ data = get_untrained_model_with_inputs (mid , verbose = 1 , add_second_input = True )
217+ self .assertEqual (data ["task" ], "zero-shot-image-classification" )
218+ self .assertIn ((data ["size" ], data ["n_weights" ]), [(188872708 , 47218177 )])
219+ model , inputs , ds = data ["model" ], data ["inputs" ], data ["dynamic_shapes" ]
220+ model (** inputs )
221+ model (** data ["inputs2" ])
222+ with bypass_export_some_errors (patch_transformers = True , verbose = 10 ):
223+ torch .export .export (
224+ model , (), kwargs = inputs , dynamic_shapes = use_dyn_not_str (ds ), strict = False
225+ )
226+
169227
170228if __name__ == "__main__" :
171229 unittest .main (verbosity = 2 )
0 commit comments