From 4f6c0415e5e76bd82a3f543c265fb706042dd441 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Tue, 4 Nov 2025 18:35:35 +0100 Subject: [PATCH 1/6] Adds variable to track random operators --- onnx_diagnostic/_command_lines_parser.py | 2 +- onnx_diagnostic/helpers/log_helper.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/onnx_diagnostic/_command_lines_parser.py b/onnx_diagnostic/_command_lines_parser.py index 83015322..953fdb16 100644 --- a/onnx_diagnostic/_command_lines_parser.py +++ b/onnx_diagnostic/_command_lines_parser.py @@ -940,7 +940,7 @@ def get_parser_agg() -> ArgumentParser: "n_model_faster2x,n_model_faster3x,n_model_faster4x,n_node_attention," "n_node_attention23,n_node_rotary_embedding,n_node_rotary_embedding23," "n_node_gqa,n_node_layer_normalization,n_node_layer_normalization23," - "peak_gpu_torch,peak_gpu_nvidia,n_node_control_flow," + "peak_gpu_torch,peak_gpu_nvidia,n_node_control_flow,n_node_random," "n_node_constant,n_node_shape,n_node_expand," "n_node_function,n_node_initializer,n_node_scatter," "time_export_unbiased,onnx_n_nodes_no_cst,n_node_initializer_small", diff --git a/onnx_diagnostic/helpers/log_helper.py b/onnx_diagnostic/helpers/log_helper.py index 612c3466..0b4fa25e 100644 --- a/onnx_diagnostic/helpers/log_helper.py +++ b/onnx_diagnostic/helpers/log_helper.py @@ -1611,6 +1611,7 @@ def __init__( "n_node_initializer_small", "n_node_layer_normalization", "n_node_layer_normalization23", + "n_node_random", "n_node_reshape", "n_node_rotary_embedding", "n_node_rotary_embedding23", @@ -1802,6 +1803,16 @@ def first_err(df: pandas.DataFrame) -> pandas.Series: + gdf(df, "op_onnx__InstanceNormlization", 0) + gdf(df, "op_onnx__GroupNormalization", 0), ), + n_node_random=lambda df: gpreserve( + df, + "time_latency_eager", + gdf(df, "op_onnx__RandomNormal", 0) + + gdf(df, "op_onnx__RandomNormalLike", 0) + + gdf(df, "op_onnx__RandomUniform", 0) + + gdf(df, "op_onnx__RandomUniformLike", 0) + + gdf(df, "op_onnx__Multinomial", 0) + + gdf(df, "op_onnx__Bernoulli", 0), + ), n_node_attention=lambda df: gpreserve( df, "time_latency_eager", From 1c61dd03a5c7f08d4a8e2fe8b035a0c120d3efa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Tue, 4 Nov 2025 18:37:31 +0100 Subject: [PATCH 2/6] doc --- CHANGELOGS.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOGS.rst b/CHANGELOGS.rst index d15ed81b..d9c2cf5f 100644 --- a/CHANGELOGS.rst +++ b/CHANGELOGS.rst @@ -4,6 +4,8 @@ Change Logs 0.8.1 +++++ +* :pr:`286`: adds variable to track random nodes in models + 0.8.0 +++++ From 0ef903255ae834bc519684f678dbea65c8e15461 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Thu, 6 Nov 2025 10:24:42 +0100 Subject: [PATCH 3/6] fox onnx_type_name --- onnx_diagnostic/helpers/onnx_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnx_diagnostic/helpers/onnx_helper.py b/onnx_diagnostic/helpers/onnx_helper.py index ee7eedc3..44098aec 100644 --- a/onnx_diagnostic/helpers/onnx_helper.py +++ b/onnx_diagnostic/helpers/onnx_helper.py @@ -331,7 +331,7 @@ def onnx_dtype_name(itype: int, exc: bool = True) -> str: print(onnx_dtype_name(7)) """ for k in dir(TensorProto): - if "FLOAT" in k or "INT" in k or "TEXT" in k or "BOOL" in k: + if k.upper() == k: v = getattr(TensorProto, k) if v == itype: return k From 9efcc5138288fe1cb7999c2704d8f85b56e5603b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Thu, 6 Nov 2025 10:52:37 +0100 Subject: [PATCH 4/6] fix unittest --- _unittests/ut_helpers/test_log_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_unittests/ut_helpers/test_log_helper.py b/_unittests/ut_helpers/test_log_helper.py index 38c1ee20..875015a5 100644 --- a/_unittests/ut_helpers/test_log_helper.py +++ b/_unittests/ut_helpers/test_log_helper.py @@ -268,7 +268,7 @@ def test_cube_logs_performance_cube_time(self): cube = CubeLogsPerformance(dfs, keep_last_date=True) cube.load() ct = cube.clone() - self.assertEqual((52, 116), ct.shape) + self.assertEqual((52, 117), ct.shape) def test_duplicate(self): df = pandas.DataFrame( From 190f7092b40743639bd141a53ce8dba1397e735e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Thu, 6 Nov 2025 12:11:51 +0100 Subject: [PATCH 5/6] better coverahe --- _unittests/ut_helpers/test_args_helper.py | 5 +++- _unittests/ut_helpers/test_bench_run.py | 4 +++ _unittests/ut_helpers/test_graph_helper.py | 2 ++ _unittests/ut_helpers/test_helper.py | 22 ++++++++++++--- _unittests/ut_helpers/test_log_helper.py | 9 ++++++ _unittests/ut_helpers/test_memory_peak.py | 14 +++++++++- .../ut_helpers/test_mini_onnx_builder.py | 28 +++++++++++++++++++ _unittests/ut_helpers/test_onnx_helper.py | 9 ++++++ _unittests/ut_helpers/test_ort_session.py | 16 +++++++++++ _unittests/ut_helpers/test_rt_helper.py | 8 ++++++ _unittests/ut_torch_onnx/test_sbs.py | 2 +- onnx_diagnostic/helpers/log_helper.py | 3 +- onnx_diagnostic/helpers/memory_peak.py | 2 ++ onnx_diagnostic/helpers/mini_onnx_builder.py | 2 +- onnx_diagnostic/helpers/onnx_helper.py | 2 +- onnx_diagnostic/helpers/rt_helper.py | 10 ++----- 16 files changed, 121 insertions(+), 17 deletions(-) diff --git a/_unittests/ut_helpers/test_args_helper.py b/_unittests/ut_helpers/test_args_helper.py index 43cf8050..aa3fbbaa 100644 --- a/_unittests/ut_helpers/test_args_helper.py +++ b/_unittests/ut_helpers/test_args_helper.py @@ -1,9 +1,12 @@ import unittest from onnx_diagnostic.ext_test_case import ExtTestCase -from onnx_diagnostic.helpers.args_helper import get_parsed_args +from onnx_diagnostic.helpers.args_helper import get_parsed_args, check_cuda_availability class TestHelpers(ExtTestCase): + def test_check_cuda_availability(self): + check_cuda_availability() + def test_args(self): try: args = get_parsed_args( diff --git a/_unittests/ut_helpers/test_bench_run.py b/_unittests/ut_helpers/test_bench_run.py index 1ac68ea5..12dbae6b 100644 --- a/_unittests/ut_helpers/test_bench_run.py +++ b/_unittests/ut_helpers/test_bench_run.py @@ -9,11 +9,15 @@ get_machine, make_configs, run_benchmark, + _clean_string, ) from onnx_diagnostic.helpers.cache_helper import make_dynamic_cache, CacheKeyValue class TestBenchRun(ExtTestCase): + def test__clean_string(self): + self.assertEqual("r", _clean_string("r")) + def test_reg(self): text = ":m,6;" m = _extract_metrics(text) diff --git a/_unittests/ut_helpers/test_graph_helper.py b/_unittests/ut_helpers/test_graph_helper.py index 4990870d..ace57055 100644 --- a/_unittests/ut_helpers/test_graph_helper.py +++ b/_unittests/ut_helpers/test_graph_helper.py @@ -33,6 +33,8 @@ def test_computation_order(self): proto.graph.node, [i.name for i in [*proto.graph.input, *proto.graph.initializer]] ) self.assertEqual([1, 2, 3], order) + gr = GraphRendering(proto) + self.assertEqual(repr(gr), "GraphRendering()") def test_graph_positions1(self): proto = oh.make_model( diff --git a/_unittests/ut_helpers/test_helper.py b/_unittests/ut_helpers/test_helper.py index d1bde2db..8798aab3 100644 --- a/_unittests/ut_helpers/test_helper.py +++ b/_unittests/ut_helpers/test_helper.py @@ -62,19 +62,21 @@ def test_string_type(self): s = string_type(obj) self.assertEqual(s, "dict(a:A7r1,b:#1[float],c:(int,))") + @hide_stdout() def test_string_dict(self): a = np.array([1], dtype=np.float32) obj = {"a": a, "b": {"r": 5.6}, "c": {1}} - s = string_type(obj) + s = string_type(obj, verbose=10) self.assertEqual(s, "dict(a:A1r1,b:dict(r:float),c:{int})") + @hide_stdout() def test_string_type_array(self): a = np.array([1], dtype=np.float32) t = torch.tensor([1]) obj = {"a": a, "b": t} - s = string_type(obj, with_shape=False) + s = string_type(obj, with_shape=False, verbose=10) self.assertEqual(s, "dict(a:A1r1,b:T7r1)") - s = string_type(obj, with_shape=True) + s = string_type(obj, with_shape=True, verbose=10) self.assertEqual(s, "dict(a:A1s1,b:T7s1)") def test_string_sig_f(self): @@ -92,6 +94,17 @@ def __init__(self, a, b=3, c=4, e=5): ssig = string_sig(A(1, c=8)) self.assertEqual(ssig, "A(a=1, c=8)") + def test_ort_value(self): + import onnxruntime as onnxrt + + numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32) + ortvalue = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input) + self.assertEqual("OV1r2", string_type(ortvalue)) + self.assertEqual("OV1s3x2", string_type(ortvalue, with_shape=True)) + self.assertEqual( + "OV(NO-NUMPY:FIXIT)", string_type(ortvalue, with_shape=True, with_min_max=True) + ) + def test_pretty_onnx(self): proto = oh.make_model( oh.make_graph( @@ -122,7 +135,8 @@ def test_print_pretty_onnx(self): [ oh.make_node("Sigmoid", ["Y"], ["sy"]), oh.make_node("Mul", ["Y", "sy"], ["ysy"]), - oh.make_node("Mul", ["X", "ysy"], ["final"]), + oh.make_node("Cast", ["ysy"], ["ysyy"], to=1), + oh.make_node("Mul", ["X", "ysyy"], ["final"]), ], "-nd-", [ diff --git a/_unittests/ut_helpers/test_log_helper.py b/_unittests/ut_helpers/test_log_helper.py index 875015a5..96fc88d7 100644 --- a/_unittests/ut_helpers/test_log_helper.py +++ b/_unittests/ut_helpers/test_log_helper.py @@ -183,6 +183,7 @@ def test_cube_logs_excel(self): ["version.*", "model.*"], ["time_latency", "time_baseline"], key_agg=["model_name"], + plots=True, ), }, verbose=1, @@ -523,6 +524,14 @@ def test_cube_sbs_no_time(self): self.assertEqual(sbs_agg.shape, (2, 11)) self.assertEqual(sbs_agg.index.names, ["date", "METRICS"]) self.assertEqual(sorted(sbs_agg.columns.names), ["CONF", "exporter"]) + output = self.get_dump_file("test_cube_sbs_no_time.xlsx") + cube.to_excel( + output, + views=["time_p"], + time_mask=True, + verbose=0, + sbs=dict(CFA=dict(exporter="E1", opt="O"), CFB=dict(exporter="E2", opt="O")), + ) def test_cube_sbs_with_time(self): df = pandas.DataFrame( diff --git a/_unittests/ut_helpers/test_memory_peak.py b/_unittests/ut_helpers/test_memory_peak.py index 4204b03e..df3205a7 100644 --- a/_unittests/ut_helpers/test_memory_peak.py +++ b/_unittests/ut_helpers/test_memory_peak.py @@ -9,10 +9,22 @@ ignore_warnings, requires_cuda, ) -from onnx_diagnostic.helpers.memory_peak import get_memory_rss, start_spying_on +from onnx_diagnostic.helpers.memory_peak import get_memory_rss, start_spying_on, Monitor class TestMemoryPeak(ExtTestCase): + def test_basic(self): + m = Monitor() + self.assertEqual( + repr(m), + "Monitor(begin=0, end=0, peak=0, average=0, n=0, d_end=0, d_peak=0, d_avg=0)", + ) + m.update(1) + self.assertEqual( + repr(m), + "Monitor(begin=1, end=1, peak=1, average=1, n=1, d_end=0, d_peak=0, d_avg=0.0)", + ) + @skipif_ci_apple("stuck") def test_memory(self): mem = get_memory_rss(os.getpid()) diff --git a/_unittests/ut_helpers/test_mini_onnx_builder.py b/_unittests/ut_helpers/test_mini_onnx_builder.py index ae73acc8..a9843fb7 100644 --- a/_unittests/ut_helpers/test_mini_onnx_builder.py +++ b/_unittests/ut_helpers/test_mini_onnx_builder.py @@ -1,11 +1,13 @@ import unittest import numpy as np +import onnx import torch from onnx_diagnostic.ext_test_case import ExtTestCase from onnx_diagnostic.reference import ExtendedReferenceEvaluator from onnx_diagnostic.helpers.mini_onnx_builder import ( create_onnx_model_from_input_tensors, create_input_tensors_from_onnx_model, + proto_from_array, MiniOnnxBuilder, ) from onnx_diagnostic.helpers.cache_helper import make_dynamic_cache, CacheKeyValue @@ -13,6 +15,13 @@ class TestMiniOnnxBuilder(ExtTestCase): + def test_proto_from_array(self): + self.assertRaise(lambda: proto_from_array(None), TypeError) + t = torch.tensor([[0, 2.0], [3, 0]]).to_sparse() + self.assertRaise(lambda: proto_from_array(t), NotImplementedError) + tp = proto_from_array(torch.tensor([[0, 2.0], [3, 0]]).to(torch.bfloat16)) + self.assertEqual(tp.data_type, onnx.TensorProto.BFLOAT16) + def test_mini_onnx_builder_sequence_onnx(self): builder = MiniOnnxBuilder() builder.append_output_sequence("name", [np.array([6, 7])]) @@ -31,6 +40,25 @@ def test_mini_onnx_builder_sequence_ort(self): got = ref.run(None, {}) self.assertEqualAny([np.array([6, 7])], got[0]) + def test_mini_onnx_builder_sequence_ort_randomize(self): + from onnxruntime import InferenceSession + + builder = MiniOnnxBuilder() + builder.append_output_initializer( + "name1", np.array([6, 7], dtype=np.float32), randomize=True + ) + builder.append_output_initializer( + "name2", np.array([-6, 7], dtype=np.float32), randomize=True + ) + onx = builder.to_onnx() + ref = InferenceSession(onx.SerializeToString(), providers=["CPUExecutionProvider"]) + got = ref.run(None, {}) + self.assertEqual((2,), got[0].shape) + self.assertEqual(np.float32, got[0].dtype) + self.assertGreaterOrEqual(got[0].min(), 0) + self.assertEqual((2,), got[1].shape) + self.assertEqual(np.float32, got[1].dtype) + def test_mini_onnx_builder(self): data = [ ( diff --git a/_unittests/ut_helpers/test_onnx_helper.py b/_unittests/ut_helpers/test_onnx_helper.py index 5c22cae0..bd7b4c81 100644 --- a/_unittests/ut_helpers/test_onnx_helper.py +++ b/_unittests/ut_helpers/test_onnx_helper.py @@ -18,6 +18,7 @@ tensor_statistics, enumerate_results, shadowing_names, + onnx_dtype_name, ) @@ -295,6 +296,7 @@ def test_enumerate_results(self): self.assertEqual(2, len(list(enumerate_results(model, "X", verbose=2)))) self.assertEqual(2, len(list(enumerate_results(model, "Z", verbose=2)))) + @hide_stdout() def test_enumerate_results_loop(self): x = np.array([1, 2, 3, 4, 5]).astype(np.float32) @@ -467,6 +469,13 @@ def _mkv_(name): shadowing_names(model), ) + def test_onnx_dtype_name(self): + for k in dir(TensorProto): + if k.upper() == k and k not in {"DESCRIPTOR", "EXTERNAL"}: + self.assertEqual(k, onnx_dtype_name(getattr(TensorProto, k))) + self.assertRaise(lambda: onnx_dtype_name(1000), ValueError) + self.assertEqual(onnx_dtype_name(1000, exc=False), "UNEXPECTED") + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/_unittests/ut_helpers/test_ort_session.py b/_unittests/ut_helpers/test_ort_session.py index dc899f2e..0ec5af44 100644 --- a/_unittests/ut_helpers/test_ort_session.py +++ b/_unittests/ut_helpers/test_ort_session.py @@ -295,6 +295,22 @@ def test_init_torch_bfloat16(self): self.assertIsInstance(got[0], torch.Tensor) self.assertEqualArray(expected[0], got[0]) + def test_profiling(self): + model, feeds, expected = self._get_model_init(onnx.TensorProto.BFLOAT16) + wrap = InferenceSessionForTorch( + model, + providers="cpu", + graph_optimization_level=False, + enable_profiling=True, + optimized_model_filepath=self.get_dump_file("test_init_torch_bfloat16.onnx"), + log_severity_level=2, + log_verbosity_level=2, + disable_aot_function_inlining=1, + ) + got = wrap.run(None, feeds) + self.assertIsInstance(got[0], torch.Tensor) + self.assertEqualArray(expected[0], got[0]) + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/_unittests/ut_helpers/test_rt_helper.py b/_unittests/ut_helpers/test_rt_helper.py index f165fa16..c11d2f16 100644 --- a/_unittests/ut_helpers/test_rt_helper.py +++ b/_unittests/ut_helpers/test_rt_helper.py @@ -1,5 +1,6 @@ import os import unittest +import onnx import torch from onnx_diagnostic.ext_test_case import ( ExtTestCase, @@ -12,6 +13,7 @@ onnx_generate, generate_and_validate, onnx_generate_with_genai, + name_type_to_onnx_dtype, ) from onnx_diagnostic.torch_models.hghub import get_untrained_model_with_inputs from onnx_diagnostic.torch_export_patches import torch_export_patches @@ -76,6 +78,12 @@ def test_onnx_generate(self): self.assertNotEmpty(session) self.assertEqualArray(expected, res) + def test_name_type_to_onnx_dtype(self): + for name in ["int64", "int32", "int64", "float16", "float", "double", "bfloat16"]: + look = f"tensor({name})" + expected = getattr(onnx.TensorProto, name.upper()) + self.assertEqual(expected, name_type_to_onnx_dtype(look)) + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/_unittests/ut_torch_onnx/test_sbs.py b/_unittests/ut_torch_onnx/test_sbs.py index 28f934a1..63732455 100644 --- a/_unittests/ut_torch_onnx/test_sbs.py +++ b/_unittests/ut_torch_onnx/test_sbs.py @@ -48,7 +48,7 @@ def forward(self, x): verbose=1, ), ) - self.assertEqual(len(results), 4) + self.assertEqual(len(results), 5) @hide_stdout() @ignore_warnings((DeprecationWarning, FutureWarning, UserWarning)) diff --git a/onnx_diagnostic/helpers/log_helper.py b/onnx_diagnostic/helpers/log_helper.py index 0b4fa25e..66bb3962 100644 --- a/onnx_diagnostic/helpers/log_helper.py +++ b/onnx_diagnostic/helpers/log_helper.py @@ -1169,7 +1169,8 @@ def to_excel( assuming they should remain stale :param sbs: configurations to compare side-by-side, this adds two tabs, one gathering raw data about the two configurations, the other one - is aggregated by metrics + is aggregated by metrics, example: + ``=dict(CFA=dict(exporter="E1", opt="O"), CFB=dict(exporter="E2", opt="O"))`` """ if verbose: print(f"[CubeLogs.to_excel] create Excel file {output}, shape={self.shape}") diff --git a/onnx_diagnostic/helpers/memory_peak.py b/onnx_diagnostic/helpers/memory_peak.py index 0fd5ab6f..5ff44bd6 100644 --- a/onnx_diagnostic/helpers/memory_peak.py +++ b/onnx_diagnostic/helpers/memory_peak.py @@ -47,6 +47,8 @@ def delta_end(self): @property def delta_avg(self): + if self.n_measures == 0: + return 0 return self.average / self.n_measures - self.begin def __repr__(self): diff --git a/onnx_diagnostic/helpers/mini_onnx_builder.py b/onnx_diagnostic/helpers/mini_onnx_builder.py index d3327ea2..2d727259 100644 --- a/onnx_diagnostic/helpers/mini_onnx_builder.py +++ b/onnx_diagnostic/helpers/mini_onnx_builder.py @@ -52,7 +52,7 @@ def proto_from_array( tensor = TensorProto() tensor.dims.extend(arr_cpu.shape) - tensor.name = name + tensor.name = name or "" itype = dtype_to_tensor_dtype(arr_cpu.dtype) assert not hasattr(TensorProto, "INT4") or itype not in { TensorProto.INT4, diff --git a/onnx_diagnostic/helpers/onnx_helper.py b/onnx_diagnostic/helpers/onnx_helper.py index 44098aec..52d2ecba 100644 --- a/onnx_diagnostic/helpers/onnx_helper.py +++ b/onnx_diagnostic/helpers/onnx_helper.py @@ -331,7 +331,7 @@ def onnx_dtype_name(itype: int, exc: bool = True) -> str: print(onnx_dtype_name(7)) """ for k in dir(TensorProto): - if k.upper() == k: + if k.upper() == k and k != "EXTERNAL": v = getattr(TensorProto, k) if v == itype: return k diff --git a/onnx_diagnostic/helpers/rt_helper.py b/onnx_diagnostic/helpers/rt_helper.py index d03f3c24..2ab19681 100644 --- a/onnx_diagnostic/helpers/rt_helper.py +++ b/onnx_diagnostic/helpers/rt_helper.py @@ -10,13 +10,9 @@ def name_type_to_onnx_dtype(name: str) -> int: - if name == "tensor(int64)": - return onnx.TensorProto.INT64 - if name == "tensor(float)": - return onnx.TensorProto.FLOAT - if name == "tensor(float16)": - return onnx.TensorProto.FLOAT16 - raise AssertionError(f"Unexpected value {name!r}") + assert name.startswith("tensor(") and name.endswith(")"), f"Invalid value name={name!r}" + look = name[7:-1] + return getattr(onnx.TensorProto, look.upper()) def make_feeds( From 1c6f50124ad22f9e2a8ebd1a247c2b3b051b2faf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20Dupr=C3=A9?= Date: Thu, 6 Nov 2025 12:16:49 +0100 Subject: [PATCH 6/6] fix --- _unittests/ut_export/test_shape_helper.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/_unittests/ut_export/test_shape_helper.py b/_unittests/ut_export/test_shape_helper.py index 8de7c807..0615c819 100644 --- a/_unittests/ut_export/test_shape_helper.py +++ b/_unittests/ut_export/test_shape_helper.py @@ -106,10 +106,15 @@ def test_all_dynamic_shape_all_transformers_cache(self): ] with torch_export_patches(patch_transformers=True): for cache, exds in caches: - with self.subTest(cache_name=cache.__class__.__name__): + with self.subTest(cache_name=cache.__class__.__name__, patch=True): ds = all_dynamic_shapes_from_inputs(cache) self.assertEqual(exds, ds) + for cache, exds in caches: + with self.subTest(cache_name=cache.__class__.__name__, patch=False): + ds = all_dynamic_shapes_from_inputs(cache) + self.assertEqual(exds, ds) + @requires_transformers("4.52") @requires_torch("2.7.99") def test_all_dynamic_shapes_from_inputs(self):