|
| 1 | +""" |
| 2 | +Compares two ONNX models. |
| 3 | +""" |
| 4 | + |
| 5 | +print("-- import onnx") |
| 6 | +import onnx |
| 7 | + |
| 8 | +print("-- import onnx.helper") |
| 9 | +from onnx.helper import tensor_dtype_to_np_dtype |
| 10 | + |
| 11 | +print("-- import onnxruntime") |
| 12 | +import onnxruntime |
| 13 | + |
| 14 | +print("-- import torch") |
| 15 | +import torch |
| 16 | + |
| 17 | +print("-- import transformers") |
| 18 | +import transformers |
| 19 | + |
| 20 | +print("-- import huggingface_hub") |
| 21 | +import huggingface_hub |
| 22 | + |
| 23 | +print("-- import onnx-diagnostic.helper") |
| 24 | +from onnx_diagnostic.helpers.helper import flatten_object, string_type, max_diff, string_diff |
| 25 | + |
| 26 | +print("-- import onnx-diagnostic.torch_models.hghub") |
| 27 | +from onnx_diagnostic.torch_models.hghub import get_untrained_model_with_inputs |
| 28 | + |
| 29 | +print("-- done") |
| 30 | + |
| 31 | +model_id = "arnir0/Tiny-LLM" |
| 32 | +onnx1 = ( |
| 33 | + "dump_test/arnir0_Tiny-LLM-custom-default-f16-cuda-op20/" |
| 34 | + "arnir0_Tiny-LLM-custom-default-f16-cuda-op20.onnx" |
| 35 | +) |
| 36 | +onnx2 = ( |
| 37 | + "dump_test/arnir0_Tiny-LLM-custom-default-f16-cuda-op21/" |
| 38 | + "arnir0_Tiny-LLM-custom-default-f16-cuda-op21.onnx" |
| 39 | +) |
| 40 | +providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] |
| 41 | + |
| 42 | +print(f"-- load {onnx1!r}") |
| 43 | +onx1 = onnx.load(onnx1) |
| 44 | +print(f"-- load {onnx2!r}") |
| 45 | +onx2 = onnx.load(onnx2) |
| 46 | + |
| 47 | +print(f"-- getting inputs for model_id {model_id!r}") |
| 48 | +data = get_untrained_model_with_inputs(model_id) |
| 49 | +inputs = data["inputs"] |
| 50 | +print(f"-- inputs: {string_type(inputs, with_shape=True)}") |
| 51 | +flatten_inputs = flatten_object(inputs, drop_keys=True) |
| 52 | +print(f"-- flat inputs: {string_type(flatten_inputs, with_shape=True)}") |
| 53 | + |
| 54 | +names = [i.name for i in onx1.graph.input] |
| 55 | +itypes = [i.type.tensor_type.elem_type for i in onx1.graph.input] |
| 56 | +assert names == [ |
| 57 | + i.name for i in onx2.graph.input |
| 58 | +], f"Not the same names for both models {names} != {[i.name for i in onx2.graph.input]}" |
| 59 | +feeds = { |
| 60 | + n: t.numpy().astype(tensor_dtype_to_np_dtype(itype)) |
| 61 | + for n, itype, t in zip(names, itypes, flatten_inputs) |
| 62 | +} |
| 63 | +print(f"-- feeds: {string_type(feeds, with_shape=True)}") |
| 64 | + |
| 65 | +print(f"-- creating session 1 from {onnx1!r}") |
| 66 | +opts = onnxruntime.SessionOptions() |
| 67 | +opts.optimized_model_filepath = "debug1_full.onnx" |
| 68 | +opts.log_severity_level = 0 |
| 69 | +opts.log_verbosity_level = 0 |
| 70 | +sess1 = onnxruntime.InferenceSession(onnx1, opts, providers=providers) |
| 71 | +print(f"-- creating session 2 from {onnx2!r}") |
| 72 | +opts.optimized_model_filepath = "debug2_full.onnx" |
| 73 | +opts.log_severity_level = 0 |
| 74 | +opts.log_verbosity_level = 0 |
| 75 | +sess2 = onnxruntime.InferenceSession(onnx2, opts, providers=providers) |
| 76 | + |
| 77 | +print("-- run session1") |
| 78 | +expected1 = sess1.run(None, feeds) |
| 79 | +print(f"-- got {string_type(expected1, with_shape=True)}") |
| 80 | +print("-- run session2") |
| 81 | +expected2 = sess2.run(None, feeds) |
| 82 | +print(f"-- got {string_type(expected2, with_shape=True)}") |
| 83 | + |
| 84 | +print("-- compute differences") |
| 85 | +diff = max_diff(expected1, expected2) |
| 86 | +print(f"-- diff={string_diff(diff)}") |
| 87 | + |
| 88 | + |
| 89 | +def get_names(onx: onnx.ModelProto) -> list[str]: |
| 90 | + names = [] |
| 91 | + for node in onx.graph.node: |
| 92 | + for o in node.output: |
| 93 | + names.append((o, node.op_type, node.name)) |
| 94 | + return names |
| 95 | + |
| 96 | + |
| 97 | +if diff["abs"] > 0.1: |
| 98 | + print("--") |
| 99 | + print("-- import select_model_inputs_outputs") |
| 100 | + from onnx_extended.tools.onnx_nodes import select_model_inputs_outputs |
| 101 | + |
| 102 | + print("-- looking into intermediate results") |
| 103 | + names1 = get_names(onx1) |
| 104 | + names2 = get_names(onx1) |
| 105 | + common = [n for n in names1 if n in (set(names1) & set(names2))] |
| 106 | + print(f"-- {len(common)} names / {len(names1)}-{len(names2)}") |
| 107 | + print(f"-- first names {common[:5]}") |
| 108 | + for name, op_type, op_name in common: |
| 109 | + x1 = select_model_inputs_outputs(onx1, [name]) |
| 110 | + x2 = select_model_inputs_outputs(onx2, [name]) |
| 111 | + s1 = onnxruntime.InferenceSession(x1.SerializeToString(), providers=providers) |
| 112 | + s2 = onnxruntime.InferenceSession(x2.SerializeToString(), providers=providers) |
| 113 | + e1 = s1.run(None, feeds) |
| 114 | + e2 = s2.run(None, feeds) |
| 115 | + diff = max_diff(e1, e2) |
| 116 | + print( |
| 117 | + f"-- name={name!r}: diff={string_diff(diff)} " |
| 118 | + f"- op_type={op_type!r}, op_name={op_name!r}" |
| 119 | + ) |
| 120 | + if diff["abs"] > 0.1: |
| 121 | + opts = onnxruntime.SessionOptions() |
| 122 | + opts.optimized_model_filepath = "debug1.onnx" |
| 123 | + onnxruntime.InferenceSession(x1.SerializeToString(), opts, providers=providers) |
| 124 | + opts.optimized_model_filepath = "debug2.onnx" |
| 125 | + onnxruntime.InferenceSession(x2.SerializeToString(), opts, providers=providers) |
| 126 | + print("--") |
| 127 | + print("-- break here") |
| 128 | + print(f"-- feeds {string_type(feeds, with_shape=True)}") |
| 129 | + print(f"-- e1={string_type(e1, with_shape=True, with_min_max=True)}") |
| 130 | + print(f"-- e2={string_type(e2, with_shape=True, with_min_max=True)}") |
| 131 | + break |
0 commit comments