diff --git a/CHANGELOGS.rst b/CHANGELOGS.rst index bce920d9..c2a91eeb 100644 --- a/CHANGELOGS.rst +++ b/CHANGELOGS.rst @@ -4,6 +4,7 @@ Change Logs 0.8.3 +++++ +* :pr:`322`: support rerunning onnx kernels with torch intermediate results in side-by-side * :pr:`314`: fix modelbuilder download needed after this change https://github.com/microsoft/onnxruntime-genai/pull/1862 * :pr:`311`: use custom and local function to use PackedMultiHeadAttention from onnxruntime * :pr:`310`: splits patches into multiple files diff --git a/_doc/cmds/sbs.rst b/_doc/cmds/sbs.rst index 8ced069e..1a411da1 100644 --- a/_doc/cmds/sbs.rst +++ b/_doc/cmds/sbs.rst @@ -20,3 +20,40 @@ CPU, CUDA Inputs are saved :func:`torch.save`. The execution will run on CUDA if the device of the inputs is CUDA, same goes on CPU. + +Example ++++++++ + +.. code-block:: + + python -m onnx_diagnostic sbs \ + -i qwen_2_5_vl_instruct_visual.inputs.pt \ + --ep test_imagetext2text_qwen_2_5_vl_instruct_visual.cuda.float16.custom.graph.ep.pt2 \ + -m test_imagetext2text_qwen_2_5_vl_instruct_visual.cuda.float16.custom.onnx \ + -o results.dynamo.float16.xlsx \ + -v 1 --atol=0.1 --rtol=1 \ + --replay-names conv3d,rsqrt,to_4,mul_48,linear,linear_2,linear_84,linear_89,mul_172,linear_156,linear_159 \ + -2 --reset conv3d + +A snippet of the table it produces: + +:: + + ep_name onnx_name ep_target onnx_op_type onnx_id_output ep_shape_type onnx_shape_type err_abs + transpose_18 transpose_18 aten.transpose.int Transpose 0 GT10s16x1292x80 GT10s16x1292x80 0.0083 + unsqueeze_50 unsqueeze_50 aten.unsqueeze.default Unsqueeze 0 GT10s1x16x1292x80 GT10s1x16x1292x80 0.0083 + eq_20 eq_20 aten.eq.Scalar Equal 0 GT9s1292x1292 GT9s1292x1292 0 + unsqueeze_56 unsqueeze_56 aten.unsqueeze.default Unsqueeze 0 GT9s1x1x1292x1292 GT9s1x1x1292x1292 0 + slice_29 slice_29 aten.slice.Tensor Slice 0 GT9s1x1x1292x1292 GT9s1x1x1292x1292 0 + transpose_19 transpose_19 aten.transpose.int Transpose 0 GT10s1x1292x16x80 GT10s1x1292x16x80 0.0071 + reshape_20 reshape_20 aten.reshape.default Reshape 0 GT10s1292x1280 GT10s1292x1280 0.0071 + linear_21 linear_21 aten.linear.default Gemm 0 GT10s1292x1280 GT10s1292x1280 0.0015 + mul_54 mul_54 aten.mul.Tensor SkipSimplifiedLayerNormalization 0 GT10s1292x1280 GT10s1292x1280 0.0098 + add_32 add_32 aten.add.Tensor SkipSimplifiedLayerNormalization 3 GT10s1292x1280 GT10s1292x1280 0.0313 + linear_22 linear_22 aten.linear.default Gemm 0 GT10s1292x3420 GT10s1292x3420 0.0078 + silu_4 silu_4 aten.silu.default QuickGelu 0 GT10s1292x3420 GT10s1292x3420 0.0059 + +The available column are described by +:class:`RunAlignedRecord `. +It is possible to dump pieces of the model to study some particular input +with :class:`ReplayConfiguration `. diff --git a/_unittests/ut_torch_onnx/test_sbs.py b/_unittests/ut_torch_onnx/test_sbs.py index 5c52111a..ac994033 100644 --- a/_unittests/ut_torch_onnx/test_sbs.py +++ b/_unittests/ut_torch_onnx/test_sbs.py @@ -379,7 +379,7 @@ def forward(self, x): use_tensor=True, ), ) - df = pandas.DataFrame(list(results)) + df = pandas.DataFrame(list(results)).dropna(axis=1, how="all") df.to_excel(self.get_dump_file("test_sbs_model_with_weights_custom.xlsx")) self.assertEqual( [ @@ -390,8 +390,8 @@ def forward(self, x): "ep_time_run", "err_abs", "err_dev", + "err_h001", "err_h01", - "err_nan", "err_rel", "onnx_id_node", "onnx_id_output", @@ -445,7 +445,7 @@ def forward(self, x): use_tensor=True, ), ) - df = pandas.DataFrame(list(results)) + df = pandas.DataFrame(list(results)).dropna(axis=1, how="all") df.to_excel(self.get_dump_file("test_sbs_model_with_weights_dynamo.xlsx")) self.assertEqual( [ @@ -456,8 +456,8 @@ def forward(self, x): "ep_time_run", "err_abs", "err_dev", + "err_h001", "err_h01", - "err_nan", "err_rel", "onnx_id_node", "onnx_id_output", @@ -542,7 +542,7 @@ def forward(self, x): reset_names=["linear"], ), ) - df = pandas.DataFrame(list(results)) + df = pandas.DataFrame(list(results)).dropna(axis=1, how="all") df.to_excel(self.get_dump_file("test_sbs_model_with_weights_custom_reset.xlsx")) onnx_op_type = df["onnx_op_type"].tolist() self.assertEqual(onnx_op_type.count("reset"), 1) @@ -593,10 +593,83 @@ def forward(self, x): ), ), ) - df = pandas.DataFrame(list(results)) + df = pandas.DataFrame(list(results)).dropna(axis=1, how="all") df.to_excel(self.get_dump_file("test_sbs_replay.xlsx")) - print(df) - # self.clean_dump() + self.assertEqual(df.shape, (8, 16)) + self.clean_dump() + + @hide_stdout() + @ignore_warnings((DeprecationWarning, FutureWarning, UserWarning)) + def test_sbs_run_onnx_with_torch_inputs(self): + torch = self.torch + + class Model(self.torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.fc1 = torch.nn.Linear(10, 32) # input size 10 → hidden size 32 + self.relu = torch.nn.ReLU() + self.fc2 = torch.nn.Linear(32, 1) # hidden → output + + def forward(self, x): + x = self.relu(self.fc1(x)) + x = self.fc2(x) + return x + + inputs = dict(x=self.torch.randn((5, 10))) + ds = dict(x={0: "batch"}) + Model()(**inputs) + ep = self.torch.export.export( + Model(), (), kwargs=inputs, dynamic_shapes=use_dyn_not_str(ds) + ) + filename = self.get_dump_file("test_sbs_run_onnx_with_torch_inputs.onnx") + to_onnx(ep, exporter="custom", filename=filename) + onx = onnx.load(filename) + results = list( + run_aligned( + ep, + onx, + kwargs=inputs, + run_cls=OnnxruntimeEvaluator, + verbose=11, + use_tensor=True, + run_onnx_with_torch_inputs=True, + ), + ) + df = pandas.DataFrame(list(results)).dropna(axis=1, how="all") + df.to_excel(self.get_dump_file("test_sbs_run_onnx_with_torch_inputs.xlsx")) + self.assertEqual( + [ + "comment", + "ep_id_node", + "ep_name", + "ep_shape_type", + "ep_target", + "ep_time_run", + "err_abs", + "err_abs2", + "err_dev", + "err_dev2", + "err_h001", + "err_h0012", + "err_h01", + "err_h012", + "err_rel", + "err_rel2", + "onnx_id_node", + "onnx_id_output", + "onnx_name", + "onnx_op_type", + "onnx_shape_type", + "onnx_time_run", + ], + sorted(df.columns), + ) + self.assertEqual(len(results), 8) + self.assertEqual([0, 0, 0, 0, None, 0, 0, 0], [r.err_dev for r in results]) + self.assertEqual( + [-1, -1, -1, -1, -1, 0, 1, 2], df["onnx_id_node"].fillna(-10).tolist() + ) + self.clean_dump() if __name__ == "__main__": diff --git a/onnx_diagnostic/_command_lines_parser.py b/onnx_diagnostic/_command_lines_parser.py index f7d83943..f63dc722 100644 --- a/onnx_diagnostic/_command_lines_parser.py +++ b/onnx_diagnostic/_command_lines_parser.py @@ -1217,6 +1217,19 @@ def get_parser_sbs() -> ArgumentParser: default=False, help="First runs the whole model.", ) + parser.add_argument( + "-2", + "--second-run", + action=BooleanOptionalAction, + default=False, + help=textwrap.dedent( + """ + Tries to run all onnx nodes with torch results produced by the exported + program. It then measures the discrepancies again. It can be used + to identify kernel introduces discrepancies from other just propagating them. + """ + ), + ) parser.add_argument( "--reset", required=False, @@ -1365,6 +1378,7 @@ def _size(name): reset_names=args.reset.split(","), exc=False, replay_configuration=replay_configuration, + run_onnx_with_torch_inputs=args.second_run, ): data.append(obs) if ( @@ -1377,8 +1391,10 @@ def _size(name): ) df.to_excel(args.output) print(f"-- final saves into {args.output!r}") - df = pandas.DataFrame(data).apply( - lambda col: col.fillna("") if col.dtype == "object" else col + df = ( + pandas.DataFrame(data) + .apply(lambda col: col.fillna("") if col.dtype == "object" else col) + .dropna(axis=1, how="all") ) df.to_excel(args.output, index=False) print("-- done") diff --git a/onnx_diagnostic/torch_onnx/sbs.py b/onnx_diagnostic/torch_onnx/sbs.py index fe2016fc..03912685 100644 --- a/onnx_diagnostic/torch_onnx/sbs.py +++ b/onnx_diagnostic/torch_onnx/sbs.py @@ -9,7 +9,12 @@ from ..helpers.onnx_helper import pretty_onnx from ..helpers.torch_helper import to_numpy, from_numpy, to_tensor, torch_dtype_to_onnx_dtype from ..helpers.torch_fx_graph_helper import prepare_args_kwargs, run_fx_node -from .sbs_dataclasses import ReplayConfiguration, RunAlignedRecord, StatusRunAligned +from .sbs_dataclasses import ( + ReplayConfiguration, + RunAlignedRecord, + StatusRunAligned, + make_torch_inputs, +) def _check_tensor_(use_tensor, name, obj, flip_type=False): @@ -41,7 +46,8 @@ def _loop_cmp( torch_results: Dict[str, torch.Tensor], onnx_results: Dict[str, Any], onnx_name: str, - torch_result: torch.Tensor, + onnx_result: torch.Tensor, + second_onnx_result: torch.Tensor, verbose: int, atol: Optional[float], rtol: Optional[float], @@ -51,13 +57,13 @@ def _loop_cmp( exc: bool, use_tensor: bool, ) -> Optional[RunAlignedRecord]: - onnx_results[onnx_name] = _check_tensor_(use_tensor, onnx_name, torch_result) + onnx_results[onnx_name] = _check_tensor_(use_tensor, onnx_name, onnx_result) if verbose > 1: - print(f"[run_aligned-nx] +res: {onnx_name}={string_type(torch_result, **str_kws)}") + print(f"[run_aligned-nx] +res: {onnx_name}={string_type(onnx_result, **str_kws)}") to = mapping_onnx_to_torch.get(onnx_name, onnx_name) if to in torch_results: - d = max_diff(torch_results[to], torch_result, hist=[0.1]) + d = max_diff(torch_results[to], onnx_result, hist=[0.1, 0.01]) if verbose > 1: if onnx_name == to: print(f"[run_aligned-==] cmp {to}: {string_diff(d)}") @@ -68,9 +74,9 @@ def _loop_cmp( raise ValueError( f"discrepancies detected for results [{to}/{onnx_name}]: " f"{string_diff(d)}" - f"\n-- torch_results: {string_type(torch_results[to], **str_kws)}" - f"\n-- onnx_results: {string_type(torch_result, **str_kws)}" - f"\n-- torch\n{torch_results[to]}" + f"\n-- onnx_result: {string_type(onnx_result[to], **str_kws)}" + f"\n-- onnx_results: {string_type(onnx_result, **str_kws)}" + f"\n-- torch\n{onnx_result[to]}" ) else: print( @@ -82,9 +88,12 @@ def _loop_cmp( ep_name=to, onnx_name=onnx_name, ep_shape_type=string_type(torch_results[to], **str_kws), - onnx_shape_type=string_type(torch_result, **str_kws), + onnx_shape_type=string_type(onnx_result, **str_kws), ) r.set_diff(d) + if second_onnx_result is not None: + d2 = max_diff(torch_results[to], second_onnx_result, hist=[0.1, 0.01]) + r.set_diff2(d2) mapping_onnx_to_torch[onnx_name] = to return r return None @@ -104,6 +113,23 @@ def _duplicated_values(d): return final +def _validation_nn_functional( + node: onnx.NodeProto, new_feeds: Dict[str, torch.Tensor], expected: List[torch.Tensor] +) -> Optional[str]: + if node.op_type == "Gemm" and len(node.input) == 3: + atts = {} + for att in node.attribute: + if att.name in ("alpha", "beta"): + atts[att.name] = att.f + elif att.name in ("transA", "transB"): + atts[att.name] = att.i + if atts == {"transB": 1}: + res = torch.nn.functional.linear(*[new_feeds[i] for i in node.input]) + diff = max_diff(res, expected[0]) + return f"function.linear:{string_diff(diff)}" + return None + + def _loop_onnx_node( onx: onnx.ModelProto, ep_graph_nodes: List[torch.fx.Node], @@ -129,6 +155,7 @@ def _loop_onnx_node( has_cuda: bool, run_cls: type, loop: Any, + run_onnx_with_torch_inputs: bool, ) -> Iterator[Optional[RunAlignedRecord]]: if i_onnx in already_run_onnx: @@ -140,11 +167,11 @@ def _loop_onnx_node( f"{node.op_type}({', '.join(node.input)}) -> {', '.join(node.output)}" ) elif verbose == 1: - loop.update(i_torch + i_onnx) loop.set_description( f"ep {i_torch}/{len(ep_graph_nodes)} nx {i_onnx}/{len(onx.graph.node)} " f"{status.to_str()}" ) + loop.update(min(1, 1 + i_torch + i_onnx)) ref = run_cls(node, **run_cls_kwargs) # We need to clone because the runtime maybe using dlpack to create OrtValue @@ -154,6 +181,8 @@ def _loop_onnx_node( else {k: onnx_results[k].copy() for k in node.input if k} ) assert "" not in feeds, f"Unexpected feeds={string_type(feeds, **str_kws)}" + if verbose > 1: + print(f"[run_aligned] feeds={string_type(feeds, **str_kws)}") begin = time.perf_counter() try: res = ref.run(None, feeds) # type: ignore[attr-defined] @@ -163,6 +192,8 @@ def _loop_onnx_node( f"with inputs={node.input}, feeds={string_type(feeds, **str_kws)}" ) from e duration = time.perf_counter() - begin + if verbose > 1: + print(f"[run_aligned] res={string_type(res, **str_kws)}") assert ( not has_cuda or not any(t is not None and t.is_cuda for t in feeds.values()) @@ -186,9 +217,43 @@ def _loop_onnx_node( f"res={string_type(res, with_device=True, with_shape=True)}, " f"node is {pretty_onnx(node)}" ) + + comment = None + cross = None + if run_onnx_with_torch_inputs: + # Let's run the operator with torch results if they are available + new_feeds, removed = make_torch_inputs( + node.input, + { + **{v: k for k, v in torch_names_to_onnx_names.items()}, + **mapping_onnx_to_torch, + }, + onnx_results, + torch_results, + submodel=None, + ) + if not removed: + if verbose > 1: + print( + f"[run_aligned] feeds for second run=" + f"{string_type(new_feeds, **str_kws)}" + ) + cross = ref.run(None, new_feeds) + if verbose > 1: + print(f"[run_aligned] got for second run={string_type(cross, **str_kws)}") + # Gemm = torch.nn.function.linear, in that case, we just run it as well + to = mapping_onnx_to_torch.get(node.output[0], node.output[0]) + if to in torch_results: + comment = _validation_nn_functional(node, new_feeds, [torch_results[to]]) + elif verbose > 1: + print(f"[run_aligned] second run not possible because of missing {removed}") + + if cross is None: + cross = [None for _ in res] + list_node_output = list(node.output) node_output = [o for o in list_node_output if o] - for o, r in zip(node_output, res): + for o, r, r2 in zip(node_output, res, cross): if r is None or not o: continue tmp = _loop_cmp( @@ -197,6 +262,7 @@ def _loop_onnx_node( onnx_results, o, r, + r2, verbose, atol, rtol, @@ -221,6 +287,7 @@ def _loop_onnx_node( status.yielded_nodes += 1 if tmp.err_abs is not None: status.update(tmp.err_abs) + tmp.comment = comment yield tmp # do we need to dump pieces if graph the user can replay? @@ -258,6 +325,7 @@ def _loop_onnx_node( onnx_results, o, torch_results[tmp.ep_name], + None, verbose, atol, rtol, @@ -499,6 +567,7 @@ def run_aligned( exc: bool = True, reset_names: Optional[List[str]] = None, replay_configuration: Optional[ReplayConfiguration] = None, + run_onnx_with_torch_inputs: bool = False, ) -> Iterator[RunAlignedRecord]: """ Runs in parallel both the exported program @@ -524,7 +593,10 @@ def run_aligned( piece of the onnx graph he wants to replay in order to investigate later, see :class: `ReplayConfiguration ` - :return: a list of :class:`RunAlignedRecord` + :param run_onnx_with_torch_inputs: run an onnx operator with torch results + if they available + :return: a list of :class:`RunAlignedRecord + ` Example: @@ -567,7 +639,6 @@ def forward(self, x): df = df.apply(lambda col: col.fillna("") if col.dtype == "object" else col) print(df) - This example uses :class:`onnx.reference.ReferenceEvaluator` to run the onnx model but onnxruntime can also be used through :class:`onnx_diagnostic.helpers.ort_session.InferenceSessionForTorch`. @@ -711,7 +782,6 @@ def forward(self, x): positions: Dict[str, Dict[str, int]] = {} ep_graph_nodes = list(ep.graph.nodes) torch_results: Dict[str, Any] = {} - last_position = 0 torch_output_names = None torch_input_names: List[str] = [] name_to_ep_node = {} @@ -774,7 +844,10 @@ def forward(self, x): # starts the side-by-side if verbose: - print(f"[run_aligned] ep: starts side-by-side with {len(ep_graph_nodes)} nodes") + print( + f"[run_aligned] ep: starts side-by-side with {len(ep_graph_nodes)} " + f"fx nodes and {len(onx.graph.node)} onnx nodes" + ) if verbose == 1: import tqdm @@ -785,6 +858,7 @@ def forward(self, x): already_run: Set[int] = set() ep_durations = {} status = StatusRunAligned() + last_position = 0 for i_torch, node in enumerate(ep_graph_nodes): if verbose > 1: if node.op == "call_function": @@ -797,11 +871,11 @@ def forward(self, x): f"[run_aligned] run ep.graph.nodes[{i_torch}]: {node.op} -> {node.name!r}" ) elif verbose == 1: - loop.update(i_torch + last_position) loop.set_description( f"ep {i_torch}/{len(ep_graph_nodes)} nx {last_position}/{len(onx.graph.node)} " f"{status.to_str()}" ) + loop.update(min(1, 1 + i_torch + last_position)) if node.op == "placeholder": is_input = node.name not in placeholders @@ -868,7 +942,7 @@ def forward(self, x): max_diff( t, onnx_results[torch_names_to_onnx_names[node.name]], - hist=[0.1], + hist=[0.1, 0.01], ) ) yield record.check(already_yielded) @@ -958,6 +1032,7 @@ def forward(self, x): has_cuda, run_cls, loop, + run_onnx_with_torch_inputs, ): if r: yield r.check(already_yielded) @@ -966,7 +1041,7 @@ def forward(self, x): last_position = next_to_visit # complete the execution of the onnx graph - if verbose: + if verbose > 1: print( f"[run_aligned] complete execution of onnx graph from pos={last_position} " f"to {len(onx.graph.node)}" @@ -999,9 +1074,12 @@ def forward(self, x): has_cuda, run_cls, loop, + run_onnx_with_torch_inputs, ): if r: yield r.check(already_yielded) + if loop is not None: + loop.close() if verbose: print(f"[run_aligned] done with status={status.to_str()}") diff --git a/onnx_diagnostic/torch_onnx/sbs_dataclasses.py b/onnx_diagnostic/torch_onnx/sbs_dataclasses.py index 74ac8e13..234e424d 100644 --- a/onnx_diagnostic/torch_onnx/sbs_dataclasses.py +++ b/onnx_diagnostic/torch_onnx/sbs_dataclasses.py @@ -1,7 +1,7 @@ import os import textwrap from dataclasses import dataclass -from typing import Any, Dict, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple try: from typing import Self @@ -15,6 +15,40 @@ from ..helpers.torch_helper import torch_dtype_to_onnx_dtype +def make_torch_inputs( + input_names: List[str], + onnx_name_to_ep_name: Dict[str, str], + onnx_results: Dict[str, torch.Tensor], + torch_results: Dict[str, torch.Tensor], + submodel: Optional[onnx.ModelProto], +) -> Tuple[Dict[str, torch.Tensor], Set[str]]: + """ + Gathers torch tensors instead of onnx tensors (tensors produced by the onnx model) + + :param input_names: tensors to gather + :param onnx_name_to_ep_name: mapping between onnx name to names in the exported program + :param onnx_results: all onnx results (produced by the onnx model) + :param torch_results: all tensors produced by the exported program + :param submodel: onnx model, any tensor missing in `torch_results` is + add as an initializer to this model + :return: the list of tensors, the set of inputs for which there was no tensor coming + from the exported program + """ + torch_inputs = {} + removed_inputs = set() + for n in input_names: + if n in onnx_name_to_ep_name: + torch_inputs[n] = torch_results[onnx_name_to_ep_name[n]] + else: + removed_inputs.add(n) + if submodel is not None: + # We add that input as an initializer because it is probably a constant. + submodel.graph.initializer.append(from_array_extended(onnx_results[n], name=n)) + else: + torch_inputs[n] = onnx_results[n] + return torch_inputs, removed_inputs + + @dataclass class ReplayConfiguration: """ @@ -171,6 +205,17 @@ def get_replay_code(self) -> str: print() print("-- end --") print() + + if False: + # CUDA profiling + with torch.profiler.profile( + activities=[torch.profiler.ProfilerActivity.CUDA], + record_shapes=True, + with_stack=True, + ) as prof: + sess.run(None, ep_feeds) + obj = prof.key_averages() + print(obj.table()) """ ) @@ -231,15 +276,9 @@ def dump( if verbose: print(f"[ReplayConfiguration.dump] dumps into folder {folder!r}") - torch_inputs = {} - removed_inputs = set() - for n in input_names: - if n in onnx_name_to_ep_name: - torch_inputs[n] = torch_results[onnx_name_to_ep_name[n]] - else: - # We add that input as an initializer because it is probably a constant. - submodel.graph.initializer.append(from_array_extended(onnx_results[n], name=n)) - removed_inputs.add(n) + torch_inputs, removed_inputs = make_torch_inputs( + input_names, onnx_name_to_ep_name, onnx_results, torch_results, submodel + ) if removed_inputs: input_names = [i for i in input_names if i not in removed_inputs] @@ -301,9 +340,17 @@ class RunAlignedRecord: :param err_dev: 0 if the device is the same, 1 if not :param err_nan: number of nan values disagreeing :param err_h01: number of values for which the discrepancy is above 0.1 + :param err_h001: number of values for which the discrepancy is above 0.01 :param ep_time_run: execution time for the exported program :param onnx_time_run: execution time for the onnx model, that includes the creation of the onnx model so that's probably not very usable + :param err_abs2: same as `err_abs` if onnx kernel is run with torch results + :param err_rel2: same as `err_rel` if onnx kernel is run with torch results + :param err_dev2: same as `err_dev` if onnx kernel is run with torch results + :param err_nan2: same as `err_nan` if onnx kernel is run with torch results + :param err_h012: same as `err_h01` if onnx kernel is run with torch results + :param err_h0012: same as `err_h001` if onnx kernel is run with torch results + :param comment: any additional information """ ep_id_node: Optional[int] = None @@ -320,8 +367,16 @@ class RunAlignedRecord: err_dev: Optional[float] = None err_nan: Optional[float] = None err_h01: Optional[float] = None + err_h001: Optional[float] = None ep_time_run: Optional[float] = None onnx_time_run: Optional[float] = None + err_abs2: Optional[float] = None + err_rel2: Optional[float] = None + err_dev2: Optional[float] = None + err_nan2: Optional[float] = None + err_h012: Optional[float] = None + err_h0012: Optional[float] = None + comment: Optional[str] = None def __post_init__(self): "Validation." @@ -344,6 +399,24 @@ def set_diff(self, diff: Dict[str, Any]) -> Self: self.err_nan = diff["nan"] if "rep" in diff: self.err_h01 = diff["rep"][">0.1"] + self.err_h001 = diff["rep"][">0.01"] + return self + + def set_diff2(self, diff: Dict[str, Any]) -> Self: + """Sets error.""" + if diff is None: + return + if "abs" in diff: + self.err_abs2 = diff["abs"] + if "rel" in diff: + self.err_rel2 = diff["rel"] + if "dev" in diff: + self.err_dev2 = diff["dev"] + if "nan" in diff: + self.err_nan2 = diff["nan"] + if "rep" in diff: + self.err_h012 = diff["rep"][">0.1"] + self.err_h0012 = diff["rep"][">0.01"] return self @property