Skip to content

Commit a754737

Browse files
committed
less ambitious goal
1 parent fdc724a commit a754737

9 files changed

+33
-1
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
os: [ubuntu-latest]
1818
python: ['3.11', '3.12']
1919
transformers: ['4.48', '4.50', 'main']
20-
torch: ['2.6', 'main']
20+
torch: ['main']
2121

2222
steps:
2323
- uses: actions/checkout@v3

_doc/examples/plot_export_locate_issue.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525

2626
import traceback
2727
import torch
28+
from onnx_diagnostic import doc
2829
from onnx_diagnostic.torch_export_patches import bypass_export_some_errors
2930

3031

@@ -99,3 +100,6 @@ def forward(self, x: torch.Tensor, ys: list[torch.Tensor]):
99100
#
100101
# File "onnx-diagnostic/_doc/examples/plot_export_locate_issue.py", line 25, in forward
101102
# z = x * caty
103+
104+
105+
doc.plot_legend("was inferred to be a constant", "torch.export.export", "tomato")

_doc/examples/plot_export_tiny_llm.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import pprint
3131
import torch
3232
import transformers
33+
from onnx_diagnostic import doc
3334
from onnx_diagnostic.helpers import string_type
3435
from onnx_diagnostic.torch_models.llms import get_tiny_llm
3536

@@ -170,3 +171,5 @@ def _forward_(*args, _f=None, **kwargs):
170171
# %%
171172
# If you have any error, then look at example
172173
# :ref:`l-plot-tiny-llm-export-patched`.
174+
175+
doc.plot_legend("Tiny-LLM fails", "torch.export.export", "tomato")

_doc/examples/plot_export_tiny_llm_patched.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666
import pprint
6767
import torch
6868
import transformers
69+
from onnx_diagnostic import doc
6970
from onnx_diagnostic.helpers import string_type
7071
from onnx_diagnostic.torch_export_patches.onnx_export_errors import bypass_export_some_errors
7172
from onnx_diagnostic.torch_models.llms import get_tiny_llm
@@ -122,3 +123,6 @@
122123
)
123124
print("It worked:")
124125
print(ep)
126+
127+
# %%
128+
doc.plot_legend("Tiny-LLM patched", "torch.export.export", "green")

_doc/examples/plot_export_with_dynamic_cache.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
import pprint
2525
import torch
26+
from onnx_diagnostic import doc
2627
from onnx_diagnostic.cache_helpers import make_dynamic_cache
2728
from onnx_diagnostic.helpers import string_type
2829
from onnx_diagnostic.export import ModelInputs
@@ -221,3 +222,7 @@ def forward(self, cache, z):
221222
model, modificator(inputs[0]), dynamic_shapes=ds[0], strict=False
222223
)
223224
print(ep)
225+
226+
# %%
227+
228+
doc.plot_legend("dynamic shapes", "torch.export.export", "tomato")

_doc/examples/plot_export_with_dynamic_shapes_auto.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
"""
1313

1414
import torch
15+
from onnx_diagnostic import doc
1516

1617

1718
class Model(torch.nn.Module):
@@ -90,3 +91,7 @@ def forward(self, x, y, z):
9091
dynamic_shapes=({0: AUTO, 1: AUTO}, {0: AUTO, 1: AUTO}, {0: AUTO, 1: AUTO}),
9192
)
9293
)
94+
95+
# %%
96+
97+
doc.plot_legend("dynamic shapes inferred", "torch.export.export", "tomato")

_doc/examples/plot_failing_model_extract.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import onnx
2626
import onnx.helper as oh
2727
import onnxruntime
28+
from onnx_diagnostic import doc
2829
from onnx_diagnostic.helpers import from_array_extended
2930
from onnx_diagnostic.ort_session import investigate_onnxruntime_issue
3031

@@ -96,3 +97,7 @@
9697
onnx.shape_inference.infer_shapes(model, strict_mode=True)
9798
except onnx.onnx_cpp2py_export.shape_inference.InferenceError as e:
9899
print(e)
100+
101+
# %%
102+
103+
doc.plot_legend("Run until it fails", "onnxruntime.InferenceSession", "lightgrey")

_doc/examples/plot_failing_onnxruntime_evaluator.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
import onnx.helper as oh
2929
import torch
3030
import onnxruntime
31+
from onnx_diagnostic import doc
3132
from onnx_diagnostic.ext_test_case import has_cuda
3233
from onnx_diagnostic.helpers import from_array_extended
3334
from onnx_diagnostic.reference import OnnxruntimeEvaluator
@@ -104,3 +105,5 @@
104105
# This runtime is useful when it fails for a numerical reason.
105106
# It is possible to insert prints in the python code to print
106107
# more information or debug if needed.
108+
109+
doc.plot_legend("onnxruntime running step by step", "OnnxruntimeEvaluator", "lightgrey")

_doc/examples/plot_failing_reference_evaluator.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
import onnx
2020
import onnx.helper as oh
2121
import onnxruntime
22+
from onnx_diagnostic import doc
2223
from onnx_diagnostic.helpers import from_array_extended
2324
from onnx_diagnostic.reference import ExtendedReferenceEvaluator
2425

@@ -79,3 +80,5 @@
7980
# This runtime is useful when it fails for a numerical reason.
8081
# It is possible to insert prints in the python code to print
8182
# more information or debug if needed.
83+
84+
doc.plot_legend("Python Runtime for ONNX", "ExtendedReferenceEvalutor", "lightgrey")

0 commit comments

Comments
 (0)