|
17 | 17 | register_node_visitor, |
18 | 18 | ) |
19 | 19 | from executorch.backends.arm.operators.operator_validation_utils import ( |
| 20 | + adjust_pooling_pad_if_needed, |
20 | 21 | validate_num_inputs, |
21 | 22 | validate_same_dtype, |
22 | 23 | ) |
23 | 24 | from executorch.backends.arm.tosa_mapping import TosaArg |
24 | 25 | from executorch.backends.arm.tosa_specification import TosaSpecification |
25 | 26 |
|
26 | 27 |
|
27 | | -# Similarly to Conv2d, the TOSA spec requires that following is exactly divisible: |
28 | | -# `(input + 2 * pad - kernel_size) / stride` |
29 | | -# PyTorch however, does not require this, so as needed, we must adjust the padding. |
30 | | -def adjust_pad_if_needed( |
31 | | - input_size: int, kernel_size: int, stride: int, pad: int |
32 | | -) -> int: |
33 | | - if pad == 0: |
34 | | - return pad |
35 | | - |
36 | | - mod_remainder = (input_size + 2 * pad - kernel_size) % stride |
37 | | - |
38 | | - # No need to adjust |
39 | | - if mod_remainder == 0: |
40 | | - return pad |
41 | | - |
42 | | - return pad - mod_remainder |
43 | | - |
44 | | - |
45 | 28 | @register_node_visitor |
46 | 29 | class MaxPool2dVisitor_0_80(NodeVisitor): |
47 | 30 | target = "aten.max_pool2d.default" |
@@ -82,13 +65,13 @@ def define_node( |
82 | 65 | pad_size_list = [0, 0, 0, 0] |
83 | 66 |
|
84 | 67 | # Adjust the padding as necessary |
85 | | - pad_size_list[1] = adjust_pad_if_needed( |
| 68 | + pad_size_list[1] = adjust_pooling_pad_if_needed( |
86 | 69 | input_tensor.shape[2], |
87 | 70 | kernel_size[0], |
88 | 71 | stride[0], |
89 | 72 | pad_size_list[1], |
90 | 73 | ) |
91 | | - pad_size_list[3] = adjust_pad_if_needed( |
| 74 | + pad_size_list[3] = adjust_pooling_pad_if_needed( |
92 | 75 | input_tensor.shape[3], |
93 | 76 | kernel_size[1], |
94 | 77 | stride[1], |
@@ -167,13 +150,13 @@ def define_node( |
167 | 150 | pad_size_list = [0, 0, 0, 0] |
168 | 151 |
|
169 | 152 | # Adjust the padding as necessary |
170 | | - pad_size_list[1] = adjust_pad_if_needed( |
| 153 | + pad_size_list[1] = adjust_pooling_pad_if_needed( |
171 | 154 | input_tensor.shape[2], |
172 | 155 | kernel_size[0], |
173 | 156 | stride[0], |
174 | 157 | pad_size_list[1], |
175 | 158 | ) |
176 | | - pad_size_list[3] = adjust_pad_if_needed( |
| 159 | + pad_size_list[3] = adjust_pooling_pad_if_needed( |
177 | 160 | input_tensor.shape[3], |
178 | 161 | kernel_size[1], |
179 | 162 | stride[1], |
|
0 commit comments