@@ -976,7 +976,7 @@ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
976976 return result
977977
978978
979- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
979+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
980980class ReplaceTrivialConvWithLinear (ExportPass ):
981981 """
982982 In nn.Conv1d, the operand shapes are:
@@ -1256,7 +1256,7 @@ def call_operator(
12561256 return self .transpose_dims (new_op , meta , 0 , dim )
12571257
12581258
1259- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
1259+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
12601260class ReplaceConvWithIm2RowAndLinear (ExportPass ):
12611261 """
12621262 Replace convolution where groups=1 with im2row followed by a linear op.
@@ -1449,7 +1449,7 @@ def call_operator(self, op, args, kwargs, meta):
14491449 )
14501450
14511451
1452- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
1452+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
14531453class ReplaceTransposedConvWithLinearPass (ExportPass ):
14541454 """
14551455 Replace transposed convolution where groups=1 with transposed_im2row
@@ -1686,7 +1686,7 @@ def call(self, graph_module: torch.fx.GraphModule) -> PassResult:
16861686 return result
16871687
16881688
1689- @register_cadence_pass (CadencePassAttribute (opt_level = 1 ))
1689+ @register_cadence_pass (CadencePassAttribute (opt_level = 2 ))
16901690class ReplaceLinearWithFullyConnectedOpPass (ExportPass ):
16911691 """
16921692 If the input of linear/quantized_linear op is a vector, replace it with
0 commit comments