Skip to content

Commit b756784

Browse files
committed
fix
1 parent 3aba9a3 commit b756784

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

CHANGELOGS.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ Change Logs
88
* :pr:`311`: use custom and local function to use PackedMultiHeadAttention from onnxruntime
99
* :pr:`310`: splits patches into multiple files
1010
* :pr:`308`: add option --save_ep to dump the exported program as well as torch input
11-
* :pr:`304`, :pr:`306`, :pr:`316`: improves side-by-side comparison, creates command line sbs
11+
* :pr:`304`, :pr:`306`, :pr:`316`, :pr:`317`: improves side-by-side comparison, creates command line sbs
1212

1313
0.8.2
1414
+++++

onnx_diagnostic/torch_onnx/sbs.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ def forward(self, x):
450450
-v 1 --atol=0.1 --rtol=1
451451
"""
452452
assert callable(run_cls), f"run_cls={run_cls} not a callable"
453-
reset_names = set(reset_names) if reset_names else set()
453+
reset_names: Set[str] = set(reset_names) if reset_names else set()
454454
str_kws = dict(with_shape=True, with_device=True)
455455
has_cuda = any(
456456
(isinstance(t, torch.Tensor) and t.is_cuda)
@@ -867,13 +867,13 @@ def _gemm_linear(node, feeds, sess):
867867
t = torch_results[init.name]
868868
torch_names_to_onnx_names[init.name] = init.name
869869
elif init.name not in skip_onnx_name and init.name in rev_init_aliases:
870-
new_names = [
870+
new_names = [ # type: ignore[assignment]
871871
k
872872
for k in rev_init_aliases[init.name]
873873
if k in torch_results and k not in skip_mapping_torch_onnx
874874
]
875875
if new_names and len(new_names) == 1:
876-
new_name = new_names[0]
876+
new_name = new_names[0] # type: ignore[assignment]
877877
t = torch_results[new_name]
878878
if (
879879
t.shape == tuple(init.dims)

0 commit comments

Comments
 (0)