Skip to content

Commit 413f3df

Browse files
authored
Remove DORT related tests since it was removed from PyTorch (#2465)
Signed-off-by: Justin Chu <[email protected]>
1 parent 3f2f7d3 commit 413f3df

File tree

5 files changed

+15
-152
lines changed

5 files changed

+15
-152
lines changed

onnxscript/tools/training_helper.py

Lines changed: 0 additions & 47 deletions
This file was deleted.

onnxscript/tools/transformers_models/llama_test.py

Lines changed: 5 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
import onnxruntime
1010
import torch
1111

12-
import onnxscript.tools.training_helper
1312
import onnxscript.tools.transformers_models
1413
import onnxscript.tools.transformers_models.llama
1514
from onnxscript._internal.version_utils import (
@@ -34,13 +33,7 @@ def test_llama_export_cpu(self):
3433
)
3534
input_tensors = input_tensors_many[0]
3635
expected = model(*input_tensors)
37-
try:
38-
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
39-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
40-
# see https://github.com/pytorch/pytorch/issues/128394
41-
if "Node.meta _enter_autocast is missing val field." in str(e):
42-
raise unittest.SkipTest(str(e))
43-
raise
36+
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
4437
names = [i.name for i in proto.graph.input]
4538
np_input_tensors = [x.numpy() for x in input_tensors]
4639
feeds = dict(zip(names, np_input_tensors))
@@ -63,15 +56,9 @@ def test_llama_export_cpu_export_api(self):
6356
)
6457
input_tensors = input_tensors_many[0]
6558
expected = model(*input_tensors)
66-
try:
67-
proto = onnxscript.tools.transformers_models.export_to_onnx(
68-
model, *input_tensors, export_api=True
69-
)
70-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
71-
# see https://github.com/pytorch/pytorch/issues/128394
72-
if "Node.meta _enter_autocast is missing val field." in str(e):
73-
raise unittest.SkipTest(str(e))
74-
raise
59+
proto = onnxscript.tools.transformers_models.export_to_onnx(
60+
model, *input_tensors, export_api=True
61+
)
7562
names = [i.name for i in proto.graph.input]
7663
np_input_tensors = [x.numpy() for x in input_tensors]
7764
feeds = dict(zip(names, np_input_tensors))
@@ -94,13 +81,7 @@ def test_llama_export_cuda(self):
9481
model = model.to("cuda")
9582
input_tensors = [i.to("cuda") for i in input_tensors_cpu]
9683
expected = model(*input_tensors)
97-
try:
98-
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
99-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
100-
# see https://github.com/pytorch/pytorch/issues/128394
101-
if "Node.meta _enter_autocast is missing val field." in str(e):
102-
raise unittest.SkipTest(str(e))
103-
raise
84+
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
10485
names = [i.name for i in proto.graph.input]
10586
np_input_tensors = [x.detach().cpu().numpy() for x in input_tensors]
10687
feeds = dict(zip(names, np_input_tensors))

onnxscript/tools/transformers_models/mistral_test.py

Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99
import onnxruntime
1010
import torch
1111

12-
import onnxscript.optimizer
13-
import onnxscript.rewriter
14-
import onnxscript.tools.training_helper
1512
import onnxscript.tools.transformers_models
1613
import onnxscript.tools.transformers_models.mistral
1714
from onnxscript._internal.version_utils import (
@@ -36,13 +33,7 @@ def test_mistral_export_cpu(self):
3633
)
3734
input_tensors = input_tensors_many[0]
3835
expected = model(*input_tensors)
39-
try:
40-
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
41-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
42-
# see https://github.com/pytorch/pytorch/issues/128394
43-
if "Node.meta _enter_autocast is missing val field." in str(e):
44-
raise unittest.SkipTest(str(e))
45-
raise
36+
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
4637
names = [i.name for i in proto.graph.input]
4738
np_input_tensors = [x.numpy() for x in input_tensors]
4839
feeds = dict(zip(names, np_input_tensors))
@@ -65,15 +56,9 @@ def test_mistral_export_cpu_export_api(self):
6556
)
6657
input_tensors = input_tensors_many[0]
6758
expected = model(*input_tensors)
68-
try:
69-
proto = onnxscript.tools.transformers_models.export_to_onnx(
70-
model, *input_tensors, export_api=True
71-
)
72-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
73-
# see https://github.com/pytorch/pytorch/issues/128394
74-
if "Node.meta _enter_autocast is missing val field." in str(e):
75-
raise unittest.SkipTest(str(e))
76-
raise
59+
proto = onnxscript.tools.transformers_models.export_to_onnx(
60+
model, *input_tensors, export_api=True
61+
)
7762
names = [i.name for i in proto.graph.input]
7863
np_input_tensors = [x.numpy() for x in input_tensors]
7964
feeds = dict(zip(names, np_input_tensors))
@@ -95,13 +80,7 @@ def test_phi_export_cuda(self):
9580
model = model.to("cuda")
9681
input_tensors = [i.to("cuda") for i in input_tensors_cpu]
9782
expected = model(*input_tensors)
98-
try:
99-
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
100-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
101-
# see https://github.com/pytorch/pytorch/issues/128394
102-
if "Node.meta _enter_autocast is missing val field." in str(e):
103-
raise unittest.SkipTest(str(e))
104-
raise
83+
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
10584
names = [i.name for i in proto.graph.input]
10685
np_input_tensors = [x.detach().cpu().numpy() for x in input_tensors]
10786
feeds = dict(zip(names, np_input_tensors))

onnxscript/tools/transformers_models/phi3_test.py

Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,6 @@
99
import onnxruntime
1010
import torch
1111

12-
import onnxscript.optimizer
13-
import onnxscript.rewriter
14-
import onnxscript.tools.training_helper
1512
import onnxscript.tools.transformers_models
1613
import onnxscript.tools.transformers_models.phi3
1714
from onnxscript._internal.version_utils import (
@@ -35,13 +32,7 @@ def test_phi3_export_cpu(self):
3532
)
3633
input_tensors = input_tensors_many[0]
3734
expected = model(*input_tensors)
38-
try:
39-
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
40-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
41-
# see https://github.com/pytorch/pytorch/issues/128394
42-
if "Node.meta _enter_autocast is missing val field." in str(e):
43-
raise unittest.SkipTest(str(e))
44-
raise
35+
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
4536
names = [i.name for i in proto.graph.input]
4637
np_input_tensors = [x.numpy() for x in input_tensors]
4738
feeds = dict(zip(names, np_input_tensors))
@@ -62,15 +53,9 @@ def test_phi3_export_cpu_export_api(self):
6253
)
6354
input_tensors = input_tensors_many[0]
6455
expected = model(*input_tensors)
65-
try:
66-
proto = onnxscript.tools.transformers_models.export_to_onnx(
67-
model, *input_tensors, export_api=True
68-
)
69-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
70-
# see https://github.com/pytorch/pytorch/issues/128394
71-
if "Node.meta _enter_autocast is missing val field." in str(e):
72-
raise unittest.SkipTest(str(e))
73-
raise
56+
proto = onnxscript.tools.transformers_models.export_to_onnx(
57+
model, *input_tensors, export_api=True
58+
)
7459
names = [i.name for i in proto.graph.input]
7560
np_input_tensors = [x.numpy() for x in input_tensors]
7661
feeds = dict(zip(names, np_input_tensors))
@@ -93,13 +78,7 @@ def test_phi3_export_cuda(self):
9378
model = model.to("cuda")
9479
input_tensors = [i.to("cuda") for i in input_tensors_cpu]
9580
expected = model(*input_tensors)
96-
try:
97-
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
98-
except torch._export.verifier.SpecViolationError as e: # pylint: disable=protected-access
99-
# see https://github.com/pytorch/pytorch/issues/128394
100-
if "Node.meta _enter_autocast is missing val field." in str(e):
101-
raise unittest.SkipTest(str(e))
102-
raise
81+
proto = onnxscript.tools.transformers_models.export_to_onnx(model, *input_tensors)
10382
names = [i.name for i in proto.graph.input]
10483
np_input_tensors = [x.detach().cpu().numpy() for x in input_tensors]
10584
feeds = dict(zip(names, np_input_tensors))

onnxscript/tools/transformers_models/phi_test.py

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,13 @@
22
# Licensed under the MIT License.
33
# pylint: disable=not-callable
44

5-
import copy
65
import sys
76
import unittest
87

98
import numpy as np
109
import onnxruntime
1110
import torch
1211

13-
import onnxscript.tools.training_helper
1412
import onnxscript.tools.transformers_models
1513
import onnxscript.tools.transformers_models.phi
1614
from onnxscript._internal.version_utils import (
@@ -79,33 +77,6 @@ def test_phi_export_cuda(self):
7977
results = sess.run(None, feeds)
8078
np.testing.assert_allclose(expected[0].detach().cpu().numpy(), results[0], atol=1e-5)
8179

82-
@unittest.skipIf(sys.platform == "win32", reason="not supported yet on Windows")
83-
@unittest.skipIf(not has_transformers(), reason="transformers is missing")
84-
@unittest.skipIf(
85-
not hasattr(onnxruntime, "training"), reason="ORT training removed since 1.22"
86-
)
87-
@ignore_warnings(UserWarning)
88-
def test_phi_dort_static(self):
89-
model, input_tensors_many, _ = onnxscript.tools.transformers_models.phi.get_phi_model()
90-
input_tensors = input_tensors_many[0]
91-
expected = model(*input_tensors)
92-
93-
local_aot_ort = onnxscript.tools.training_helper.make_aot_ort()
94-
95-
compiled_model = torch.compile(
96-
copy.deepcopy(model),
97-
backend=local_aot_ort,
98-
dynamic=False,
99-
fullgraph=True,
100-
)
101-
102-
results = compiled_model(*input_tensors)
103-
torch.testing.assert_close(expected[0], results[0], atol=1e-5, rtol=1e-5)
104-
105-
expected_gradients = onnxscript.tools.training_helper.train_loop(model, *input_tensors)
106-
gradients = onnxscript.tools.training_helper.train_loop(compiled_model, *input_tensors)
107-
torch.testing.assert_close(expected_gradients[0], gradients[0], atol=1e-5, rtol=1e-5)
108-
10980

11081
if __name__ == "__main__":
11182
unittest.main(verbosity=2)

0 commit comments

Comments
 (0)