We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 85a0556 commit ff3ce36Copy full SHA for ff3ce36
backends/qualcomm/quantizer/custom_annotation.py
@@ -22,7 +22,9 @@
22
from torch.fx import Node
23
24
25
-def annotate_matmul_16a8w(gm: torch.fx.GraphModule, traverse_input1=True) -> None:
+def annotate_matmul_16a8w(
26
+ gm: torch.fx.GraphModule, traverse_input1=True
27
+) -> None: # noqa: C901
28
"""
29
This function is specific for matmul op 16a8w.
30
0 commit comments