Skip to content

Commit b9fa045

Browse files
authored
More unit test (#146)
* patch for _compute_dynamic_ntk_parameters * change * fix * custom patch * patch * doc * file * doc * fix unittest * badge * remove one unit test * add more cache * more patches * fix * fix * atol * fix
1 parent 094f23d commit b9fa045

File tree

14 files changed

+239
-33
lines changed

14 files changed

+239
-33
lines changed

CHANGELOGS.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@ Change Logs
44
0.7.0
55
+++++
66

7+
* :pr:`146`: patch for IdeficsAttention, IdeficsEmbedding
8+
* :pr:`145`: patch for _compute_dynamic_ntk_parameters (Phi3RotaryEmbedding)
79
* :pr:`144`: support for second inputs with different dimension,
810
rename test_helper into validate,
911
support ``interpolate_pos_encoding`` for ``VitModel``,

README.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ onnx-diagnostic: investigate onnx models
2222
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
2323
:target: https://github.com/psf/black
2424

25-
.. image:: https://codecov.io/gh/sdpython/onnx-diagnostic/branch/main/graph/badge.svg?token=Wb9ZGDta8J
25+
.. image:: https://codecov.io/gh/sdpython/onnx-diagnostic/graph/badge.svg?token=91T5ZVIP96
2626
:target: https://codecov.io/gh/sdpython/onnx-diagnostic
2727

2828
The main feature is about `patches <https://github.com/sdpython/onnx-diagnostic/tree/main/onnx_diagnostic/torch_export_patches>`_:

_doc/examples/plot_export_hub_codellama.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import pprint
2121
import torch
2222
from onnx_diagnostic import doc
23+
from onnx_diagnostic.ext_test_case import unit_test_going
2324
from onnx_diagnostic.helpers import string_type
2425
from onnx_diagnostic.torch_models.hghub import (
2526
get_untrained_model_with_inputs,
@@ -32,7 +33,12 @@
3233
from onnx_diagnostic.torch_export_patches import torch_export_patches
3334
from onnx_diagnostic.torch_export_patches.patch_inputs import use_dyn_not_str
3435

35-
model_id = "codellama/CodeLlama-7b-Python-hf"
36+
model_id = (
37+
"HuggingFaceM4/tiny-random-idefics"
38+
if unit_test_going()
39+
else "codellama/CodeLlama-7b-Python-hf"
40+
)
41+
print(f"model_id={model_id!r}")
3642
print("info", get_model_info(model_id))
3743

3844
# %%

_doc/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ onnx-diagnostic: investigate onnx models
1515
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
1616
:target: https://github.com/psf/black
1717

18-
.. image:: https://codecov.io/gh/sdpython/onnx-diagnostic/branch/main/graph/badge.svg?token=Wb9ZGDta8J
18+
.. image:: https://codecov.io/gh/sdpython/onnx-diagnostic/graph/badge.svg?token=91T5ZVIP96
1919
:target: https://codecov.io/gh/sdpython/onnx-diagnostic
2020

2121
The main feature is about `patches <https://github.com/sdpython/onnx-diagnostic/tree/main/onnx_diagnostic/torch_export_patches>`_:

_unittests/ut_helpers/test_doc_helper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def test_custom_doc_kernels_layer_normalization(self):
5656
)
5757
expected = torch_sess.run(None, feeds)
5858
got = torch_sess_custom.run(None, feeds)
59-
self.assertEqualAny(expected, got, atol=1e-3)
59+
self.assertEqualAny(expected, got, atol=2e-3)
6060

6161
def test_custom_doc_kernels_matmul(self):
6262
model = oh.make_model(

_unittests/ut_helpers/test_helper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ def test_flatten_encoder_decoder_cache(self):
584584
self.assertIn("EncoderDecoderCache", s)
585585

586586
def test_string_typeçconfig(self):
587-
conf = get_pretrained_config("microsoft/phi-2")
587+
conf = get_pretrained_config("microsoft/phi-2", use_only_preinstalled=True)
588588
s = string_type(conf)
589589
self.assertStartsWith("PhiConfig(**{", s)
590590

_unittests/ut_tasks/test_tasks_image_text_to_text.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,20 @@
11
import unittest
22
import torch
3-
from onnx_diagnostic.ext_test_case import ExtTestCase, hide_stdout, has_transformers, has_torch
3+
from onnx_diagnostic.ext_test_case import (
4+
ExtTestCase,
5+
hide_stdout,
6+
requires_transformers,
7+
requires_torch,
8+
)
49
from onnx_diagnostic.torch_models.hghub.model_inputs import get_untrained_model_with_inputs
510
from onnx_diagnostic.torch_export_patches import torch_export_patches
611
from onnx_diagnostic.torch_export_patches.patch_inputs import use_dyn_not_str
712

813

914
class TestTasks(ExtTestCase):
1015
@hide_stdout()
16+
@requires_transformers("4.52")
17+
@requires_torch("2.7.99")
1118
def test_image_text_to_text(self):
1219
mid = "HuggingFaceM4/tiny-random-idefics"
1320
data = get_untrained_model_with_inputs(mid, verbose=1, add_second_input=True)
@@ -16,10 +23,6 @@ def test_image_text_to_text(self):
1623
model, inputs, ds = data["model"], data["inputs"], data["dynamic_shapes"]
1724
model(**inputs)
1825
model(**data["inputs2"])
19-
if not has_transformers("4.55"):
20-
raise unittest.SkipTest("The model has control flow.")
21-
if not has_torch("2.7.99"):
22-
raise unittest.SkipTest("sym_max does not work with dynamic dimension")
2326
with torch_export_patches(patch_transformers=True, verbose=10):
2427
torch.export.export(
2528
model, (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds), strict=False

_unittests/ut_torch_models/test_hghub_api.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,16 @@ def test_task_from_id_long(self):
7272
@requires_torch("2.7")
7373
@hide_stdout()
7474
def test_get_pretrained_config(self):
75-
conf = get_pretrained_config("microsoft/phi-2")
75+
conf = get_pretrained_config("microsoft/phi-2", use_only_preinstalled=True)
7676
self.assertNotEmpty(conf)
7777

7878
@requires_transformers("4.50")
7979
@requires_torch("2.7")
8080
@hide_stdout()
8181
def test_get_pretrained_config_options(self):
82-
conf = get_pretrained_config("microsoft/phi-2", num_key_value_heads=16)
82+
conf = get_pretrained_config(
83+
"microsoft/phi-2", num_key_value_heads=16, use_only_preinstalled=True
84+
)
8385
self.assertNotEmpty(conf)
8486
self.assertEqual(conf.num_key_value_heads, 16)
8587

_unittests/ut_torch_models/test_hghub_model.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -75,16 +75,6 @@ def test_get_untrained_model_with_inputs_beit(self):
7575
# different expected value for different version of transformers
7676
self.assertIn((data["size"], data["n_weights"]), [(111448, 27862), (56880, 14220)])
7777

78-
@hide_stdout()
79-
@ignore_errors(OSError)
80-
def test_get_untrained_model_with_inputs_codellama(self):
81-
mid = "codellama/CodeLlama-7b-Python-hf"
82-
data = get_untrained_model_with_inputs(mid, verbose=1)
83-
model, inputs = data["model"], data["inputs"]
84-
model(**inputs)
85-
# different expected value for different version of transformers
86-
self.assertIn((data["size"], data["n_weights"]), [(547377152, 136844288)])
87-
8878
@hide_stdout()
8979
@ignore_errors(OSError)
9080
def test_get_untrained_model_with_inputs_clip_vit(self):
@@ -129,11 +119,11 @@ def _diff(c1, c2):
129119
try:
130120
model(**inputs)
131121
except Exception as e:
132-
diff = _diff(get_pretrained_config(mid), data["configuration"])
122+
cf = get_pretrained_config(mid, use_only_preinstalled=True)
123+
diff = _diff(cf, data["configuration"])
133124
raise AssertionError(
134125
f"Computation failed due to {e}.\n--- pretrained\n"
135-
f"{pprint.pformat(get_pretrained_config(mid))}\n"
136-
f"--- modified\n{data['configuration']}\n"
126+
f"{pprint.pformat(cf)}\n--- modified\n{data['configuration']}\n"
137127
f"--- diff\n{diff}"
138128
) from e
139129
# different expected value for different version of transformers

_unittests/ut_torch_models/test_validate_whole_models.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,30 @@ def test_validate_phi35_mini_instruct(self):
270270
inputs2=True,
271271
patch=True,
272272
rewrite=True,
273-
# model_options={"rope_scaling": {"rope_type": "dynamic", "factor": 10.0}},
273+
)
274+
self.assertIsInstance(summary, dict)
275+
self.assertIsInstance(data, dict)
276+
onnx_filename = data["onnx_filename"]
277+
onx = onnx.load(onnx_filename)
278+
op_types = set(n.op_type for n in onx.graph.node)
279+
self.assertIn("If", op_types)
280+
281+
@requires_torch("2.7")
282+
@hide_stdout()
283+
@ignore_warnings(FutureWarning)
284+
@requires_transformers("4.51")
285+
def test_validate_phi35_4k_mini_instruct(self):
286+
mid = "microsoft/Phi-3-mini-4k-instruct"
287+
summary, data = validate_model(
288+
mid,
289+
do_run=True,
290+
verbose=10,
291+
exporter="custom",
292+
dump_folder="dump_test/validate_phi35_mini_instruct",
293+
inputs2=True,
294+
patch=True,
295+
rewrite=True,
296+
model_options={"rope_scaling": {"rope_type": "dynamic", "factor": 10.0}},
274297
)
275298
self.assertIsInstance(summary, dict)
276299
self.assertIsInstance(data, dict)

0 commit comments

Comments
 (0)