|
1 | 1 | import unittest |
2 | 2 | import torch |
3 | | -from onnx_diagnostic.ext_test_case import ExtTestCase, hide_stdout, has_transformers, has_torch |
| 3 | +from onnx_diagnostic.ext_test_case import ExtTestCase, hide_stdout, has_transformers |
4 | 4 | from onnx_diagnostic.torch_models.hghub.model_inputs import get_untrained_model_with_inputs |
5 | 5 | from onnx_diagnostic.torch_export_patches import bypass_export_some_errors |
6 | 6 | from onnx_diagnostic.torch_export_patches.patch_inputs import use_dyn_not_str |
@@ -36,22 +36,6 @@ def test_text_generation(self): |
36 | 36 | model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False |
37 | 37 | ) |
38 | 38 |
|
39 | | - @hide_stdout() |
40 | | - def test_image_classification(self): |
41 | | - mid = "hf-internal-testing/tiny-random-BeitForImageClassification" |
42 | | - data = get_untrained_model_with_inputs(mid, verbose=1, add_second_input=True) |
43 | | - self.assertEqual(data["task"], "image-classification") |
44 | | - self.assertIn((data["size"], data["n_weights"]), [(56880, 14220)]) |
45 | | - model, inputs, ds = data["model"], data["inputs"], data["dynamic_shapes"] |
46 | | - model(**inputs) |
47 | | - model(**data["inputs2"]) |
48 | | - if not has_transformers("4.51"): |
49 | | - raise unittest.SkipTest("_patch_make_causal_mask patch fails when an issue arises") |
50 | | - with bypass_export_some_errors(patch_transformers=True, verbose=10): |
51 | | - torch.export.export( |
52 | | - model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False |
53 | | - ) |
54 | | - |
55 | 39 | @hide_stdout() |
56 | 40 | def test_automatic_speech_recognition(self): |
57 | 41 | mid = "openai/whisper-tiny" |
@@ -124,22 +108,6 @@ def test_automatic_speech_recognition(self): |
124 | 108 | model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False |
125 | 109 | ) |
126 | 110 |
|
127 | | - @hide_stdout() |
128 | | - def test_image_text_to_text(self): |
129 | | - mid = "HuggingFaceM4/tiny-random-idefics" |
130 | | - data = get_untrained_model_with_inputs(mid, verbose=1, add_second_input=True) |
131 | | - self.assertEqual(data["task"], "image-text-to-text") |
132 | | - self.assertIn((data["size"], data["n_weights"]), [(12742888, 3185722)]) |
133 | | - model, inputs, ds = data["model"], data["inputs"], data["dynamic_shapes"] |
134 | | - model(**inputs) |
135 | | - model(**data["inputs2"]) |
136 | | - if not has_torch("2.7.99"): |
137 | | - raise unittest.SkipTest("sym_max does not work with dynamic dimension") |
138 | | - with bypass_export_some_errors(patch_transformers=True, verbose=10): |
139 | | - torch.export.export( |
140 | | - model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False |
141 | | - ) |
142 | | - |
143 | 111 | @hide_stdout() |
144 | 112 | def test_fill_mask(self): |
145 | 113 | mid = "google-bert/bert-base-multilingual-cased" |
@@ -212,22 +180,6 @@ def test_falcon_mamba_dev(self): |
212 | 180 | model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False |
213 | 181 | ) |
214 | 182 |
|
215 | | - @hide_stdout() |
216 | | - def test_zero_shot_image_classification(self): |
217 | | - mid = "openai/clip-vit-base-patch16" |
218 | | - data = get_untrained_model_with_inputs(mid, verbose=1, add_second_input=True) |
219 | | - self.assertEqual(data["task"], "zero-shot-image-classification") |
220 | | - self.assertIn((data["size"], data["n_weights"]), [(188872708, 47218177)]) |
221 | | - model, inputs, ds = data["model"], data["inputs"], data["dynamic_shapes"] |
222 | | - model(**inputs) |
223 | | - model(**data["inputs2"]) |
224 | | - if not has_transformers("4.51"): |
225 | | - raise unittest.SkipTest("_patch_make_causal_mask patch fails when an issue arises") |
226 | | - with bypass_export_some_errors(patch_transformers=True, verbose=10): |
227 | | - torch.export.export( |
228 | | - model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False |
229 | | - ) |
230 | | - |
231 | 183 |
|
232 | 184 | if __name__ == "__main__": |
233 | 185 | unittest.main(verbosity=2) |
0 commit comments