|
24 | 24 | exir_ops.edge.aten.upsample_bilinear2d.default,
|
25 | 25 | exir_ops.edge.aten.mean.dim,
|
26 | 26 | exir_ops.edge.aten.max.dim,
|
| 27 | + exir_ops.edge.aten.max_pool2d_with_indices.default, |
27 | 28 | exir_ops.edge.aten.hardtanh.default,
|
28 | 29 | exir_ops.edge.aten.sqrt.default,
|
29 | 30 | exir_ops.edge.aten.ceil.default,
|
|
33 | 34 | exir_ops.edge.aten.abs.default,
|
34 | 35 | exir_ops.edge.aten._prelu_kernel.default,
|
35 | 36 | exir_ops.edge.aten.slice_copy.Tensor,
|
| 37 | + exir_ops.edge.aten.relu.default, |
| 38 | + exir_ops.edge.aten.hardtanh.default, |
| 39 | + exir_ops.edge.aten.permute_copy.default, |
| 40 | + exir_ops.edge.aten.sigmoid.default, |
| 41 | + exir_ops.edge.aten._softmax.default, |
| 42 | + exir_ops.edge.aten.cat.default, |
| 43 | + exir_ops.edge.aten.elu.default, |
| 44 | + exir_ops.edge.aten.avg_pool2d.default, |
| 45 | + exir_ops.edge.aten.leaky_relu.default, |
36 | 46 | ]
|
37 | 47 |
|
38 | 48 | SUPPORTED_MODULES = [
|
39 | 49 | torch.nn.Conv1d,
|
40 | 50 | # TODO(T161981984) recomposed hardswish into a single node
|
41 |
| - torch.nn.Hardswish, |
42 |
| - torch.nn.Hardsigmoid, |
43 |
| - torch.nn.Conv2d, |
44 |
| - torch.nn.ReLU, |
45 |
| - torch.nn.Sigmoid, |
46 |
| - torch.nn.Softmax, |
47 |
| - torch.nn.BatchNorm1d, |
| 51 | + torch.nn.Hardswish, # we need to recompose |
| 52 | + torch.nn.Hardsigmoid, # we can handle decomposition |
48 | 53 | torch.nn.BatchNorm2d,
|
| 54 | + torch.nn.BatchNorm1d, |
| 55 | + torch.nn.Conv2d, |
49 | 56 | torch.nn.Linear,
|
50 | 57 | torch.nn.functional.linear,
|
51 |
| - torch.nn.Hardtanh, |
52 |
| - torch.nn.MaxPool2d, |
53 |
| - torch.nn.LeakyReLU, |
54 |
| - torch.nn.ELU, |
55 |
| - torch.nn.AvgPool2d, |
56 | 58 | torch.nn.PReLU, # Without this, the PReLU weight becomes not a get_attr
|
57 |
| - torch.cat, |
58 |
| - torch.concat, |
59 |
| - torch.concatenate, |
60 | 59 | ]
|
61 | 60 |
|
62 | 61 | # TODO delete this and should use SUPPORTED_OPS instead once we align fp32 and quant support
|
63 | 62 | SUPPORTED_QUANT_OPS = [
|
64 | 63 | exir_ops.edge.aten.add.Tensor,
|
| 64 | + exir_ops.edge.aten.clamp.default, |
| 65 | + exir_ops.edge.aten.relu.default, |
65 | 66 | exir_ops.edge.aten.sub.Tensor,
|
66 | 67 | exir_ops.edge.aten.mul.Tensor,
|
67 | 68 | exir_ops.edge.aten.mean.dim,
|
68 |
| - exir_ops.edge.aten.hardtanh.default, # TODO - which one module or op or both? |
| 69 | + exir_ops.edge.aten.hardtanh.default, |
69 | 70 | exir_ops.edge.aten.slice_copy.Tensor,
|
| 71 | + exir_ops.edge.aten.permute_copy.default, |
| 72 | + exir_ops.edge.aten.hardtanh.default, |
| 73 | + exir_ops.edge.aten.mean.dim, |
| 74 | + exir_ops.edge.aten.cat.default, |
| 75 | + exir_ops.edge.aten.max_pool2d_with_indices.default, |
| 76 | + exir_ops.edge.aten.max_pool2d.default, |
| 77 | + exir_ops.edge.aten.constant_pad_nd.default, |
| 78 | + exir_ops.edge.aten.elu.default, |
| 79 | + exir_ops.edge.aten.leaky_relu.default, |
70 | 80 | ]
|
71 | 81 |
|
72 | 82 | SUPPORTED_IMPLICIT_Q_DQ_OP_NAMES_SET = {
|
|
75 | 85 | SUPPORTED_QUANT_OPS
|
76 | 86 | + [
|
77 | 87 | exir_ops.edge.aten._to_copy.default,
|
78 |
| - exir_ops.edge.aten.max_pool2d.default, |
79 | 88 | exir_ops.edge.aten.linear.default,
|
80 | 89 | ]
|
81 | 90 | )
|
|
88 | 97 |
|
89 | 98 | # TODO delete this and should use SUPPORTED_MODULES instead once we align fp32 and quant support
|
90 | 99 | SUPPORTED_QUANT_MODULES = [
|
91 |
| - torch.clamp, |
92 |
| - torch.mean, |
93 |
| - torch.permute, |
94 |
| - torch.permute_copy, |
95 |
| - torch.cat, |
96 |
| - torch.concat, |
97 |
| - torch.concatenate, |
98 | 100 | torch.nn.Linear,
|
99 | 101 | torch.nn.functional.linear,
|
100 | 102 | # TODO - T158982884
|
101 | 103 | # torch.ao.nn.quantized.reference.modules.linear.Linear,
|
102 |
| - torch.nn.MaxPool2d, |
103 | 104 | torch.nn.Conv1d,
|
104 | 105 | torch.nn.functional.conv1d,
|
105 | 106 | torch.ao.nn.quantized.reference.modules.conv.Conv1d,
|
106 | 107 | torch.nn.Conv2d,
|
107 | 108 | torch.nn.functional.conv2d,
|
108 |
| - torch.nn.functional.pad, |
109 |
| - torch.nn.functional.elu, |
110 | 109 | torch.ao.nn.quantized.reference.modules.conv.Conv2d,
|
111 | 110 | torch.nn.BatchNorm1d,
|
112 | 111 | torch.nn.BatchNorm2d,
|
113 |
| - torch.nn.ConstantPad2d, |
114 |
| - torch.nn.ELU, |
115 |
| - torch.nn.Hardtanh, |
116 |
| - torch.nn.ReLU, |
117 |
| - torch.nn.functional.relu, |
118 |
| - torch.nn.functional.relu_, |
119 |
| - torch.nn.functional.leaky_relu, |
120 |
| - torch.nn.functional.leaky_relu_, |
121 |
| - torch.nn.LeakyReLU, |
122 | 112 | ]
|
123 | 113 |
|
124 | 114 | SUPPORTED_IMPLICIT_Q_DQ_MODULES_SET = set(SUPPORTED_QUANT_MODULES)
|
|
0 commit comments