Skip to content

Commit b32fdcb

Browse files
committed
Revert "Back out "Support Half/BFloat16 in max_pool2d (#7829)""
This reverts commit 25395b9. We should be able to land these internally now. ghstack-source-id: 1180c58 ghstack-comment-id: 2617009097 Pull Request resolved: #7978
1 parent dd8da0f commit b32fdcb

File tree

2 files changed

+79
-70
lines changed

2 files changed

+79
-70
lines changed

kernels/portable/cpu/op_max_pool2d_with_indices.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out(
7070
ret_val);
7171

7272
ScalarType in_type = in.scalar_type();
73-
ET_SWITCH_REAL_TYPES(
73+
ET_SWITCH_REALHBF16_TYPES(
7474
in_type, ctx, "max_pool2d_with_indices.out", CTYPE, [&]() {
7575
apply_kernel_2d_reduce_then_map_fn<CTYPE>(
7676
[](const CTYPE in_val,

kernels/test/op_max_pool2d_with_indices_test.cpp

Lines changed: 78 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -40,78 +40,87 @@ class OpMaxPool2DWithIndicesOutTest : public OperatorTest {
4040
out,
4141
indices);
4242
}
43+
44+
template <executorch::aten::ScalarType DTYPE>
45+
void test_4d_dtype() {
46+
torch::executor::testing::TensorFactory<DTYPE> tf;
47+
torch::executor::testing::TensorFactory<executorch::aten::ScalarType::Long>
48+
tfLong;
49+
50+
executorch::aten::Tensor self = tf.make(
51+
{2, 3, 5, 5},
52+
{28.75, -38.875, -7.0, -13.5, 70.75, 53.75, 69.625, 97.375,
53+
25.375, 99.5, -72.125, -87.25, 79.25, 42.0, -24.75, -15.5,
54+
12.5, -86.0, 85.5, -0.25, 67.125, 77.0, 53.375, -61.125,
55+
50.0, 3.875, 42.25, -37.375, 51.0, -60.875, 87.0, 32.25,
56+
73.5, 68.875, -84.375, -98.75, -30.125, 94.25, 1.625, -86.25,
57+
-56.5, -68.0, 74.25, -51.25, 8.125, 71.375, -53.125, 4.875,
58+
77.5, -89.875, 4.5, -46.5, -46.375, -92.625, -85.5, -23.0,
59+
-8.875, -12.0, -46.625, -88.625, 66.75, 87.75, 90.25, -45.0,
60+
-78.125, 63.25, 28.75, 28.125, -30.375, 17.75, -16.0, 5.0,
61+
11.125, 88.625, -47.625, 72.25, 32.0, -7.625, 61.625, -63.125,
62+
-22.75, 83.125, -40.375, -78.25, 49.5, -39.125, -89.625, 47.875,
63+
-61.375, 7.75, 16.875, -96.375, -22.5, 8.5, 74.25, 12.75,
64+
90.125, 73.875, -71.75, -10.0, 41.25, 1.125, 10.375, -34.625,
65+
29.75, -27.5, 26.625, 81.0, -8.875, 17.625, 84.375, -23.625,
66+
-53.875, -26.0, -67.375, -90.75, 16.375, 45.625, 99.5, 56.25,
67+
-87.625, -65.5, -79.75, 31.875, 79.75, 6.375, 44.625, -55.25,
68+
-5.5, -68.875, -38.625, 54.125, -3.125, 5.75, 29.25, -39.5,
69+
26.75, 68.25, -24.625, -53.0, 51.0, 90.625, 65.375, 43.875,
70+
90.875, -41.625, 99.875, 6.375, -31.25, -94.0});
71+
::std::vector<int64_t> kernel_size_vec = {2, 2};
72+
executorch::aten::ArrayRef<int64_t> kernel_size =
73+
executorch::aten::ArrayRef<int64_t>(
74+
kernel_size_vec.data(), kernel_size_vec.size());
75+
::std::vector<int64_t> stride_vec = {1, 1};
76+
executorch::aten::ArrayRef<int64_t> stride =
77+
executorch::aten::ArrayRef<int64_t>(
78+
stride_vec.data(), stride_vec.size());
79+
::std::vector<int64_t> padding_vec = {0, 0};
80+
executorch::aten::ArrayRef<int64_t> padding =
81+
executorch::aten::ArrayRef<int64_t>(
82+
padding_vec.data(), padding_vec.size());
83+
::std::vector<int64_t> dilation_vec = {1, 1};
84+
executorch::aten::ArrayRef<int64_t> dilation =
85+
executorch::aten::ArrayRef<int64_t>(
86+
dilation_vec.data(), dilation_vec.size());
87+
bool ceil_mode = false;
88+
executorch::aten::Tensor out = tf.zeros({2, 3, 4, 4});
89+
executorch::aten::Tensor indices = tfLong.zeros({2, 3, 4, 4});
90+
executorch::aten::Tensor out_expected = tf.make(
91+
{2, 3, 4, 4},
92+
{69.625, 97.375, 97.375, 99.5, 69.625, 97.375, 97.375, 99.5,
93+
12.5, 79.25, 85.5, 85.5, 77.0, 77.0, 85.5, 85.5,
94+
87.0, 73.5, 73.5, 68.875, 87.0, 94.25, 94.25, 68.875,
95+
-30.125, 94.25, 94.25, 8.125, 71.375, 74.25, 77.5, 77.5,
96+
4.5, -8.875, -12.0, -46.625, 87.75, 90.25, 90.25, -45.0,
97+
87.75, 90.25, 90.25, 17.75, 63.25, 28.75, 88.625, 88.625,
98+
83.125, 83.125, 61.625, 61.625, 83.125, 83.125, 47.875, 49.5,
99+
16.875, 47.875, 47.875, 74.25, 90.125, 90.125, 73.875, 74.25,
100+
41.25, 81.0, 81.0, 29.75, 84.375, 81.0, 81.0, 17.625,
101+
84.375, 45.625, 99.5, 99.5, 16.375, 45.625, 99.5, 99.5,
102+
54.125, 54.125, 5.75, 29.25, 54.125, 68.25, 68.25, 29.25,
103+
90.625, 90.625, 68.25, 90.875, 99.875, 99.875, 65.375, 90.875});
104+
executorch::aten::Tensor indices_expected = tfLong.make(
105+
{2, 3, 4, 4},
106+
{6, 7, 7, 9, 6, 7, 7, 9, 16, 12, 18, 18, 21, 21, 18, 18,
107+
5, 7, 7, 8, 5, 12, 12, 8, 11, 12, 12, 19, 20, 17, 23, 23,
108+
0, 6, 7, 8, 11, 12, 12, 13, 11, 12, 12, 19, 15, 16, 23, 23,
109+
6, 6, 3, 3, 6, 6, 12, 9, 15, 12, 12, 19, 21, 21, 22, 19,
110+
0, 7, 7, 4, 10, 7, 7, 9, 10, 17, 18, 18, 16, 17, 18, 18,
111+
6, 6, 8, 9, 6, 12, 12, 9, 16, 16, 12, 19, 21, 21, 17, 19});
112+
op_max_pool2d_with_indices_out(
113+
self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
114+
EXPECT_TENSOR_CLOSE(out, out_expected);
115+
EXPECT_TENSOR_CLOSE(indices, indices_expected);
116+
}
43117
};
44118

45119
TEST_F(OpMaxPool2DWithIndicesOutTest, SanityTest4D) {
46-
torch::executor::testing::TensorFactory<executorch::aten::ScalarType::Float>
47-
tfFloat;
48-
torch::executor::testing::TensorFactory<executorch::aten::ScalarType::Long>
49-
tfLong;
50-
51-
executorch::aten::Tensor self = tfFloat.make(
52-
{2, 3, 5, 5},
53-
{28.75, -38.875, -7.0, -13.5, 70.75, 53.75, 69.625, 97.375,
54-
25.375, 99.5, -72.125, -87.25, 79.25, 42.0, -24.75, -15.5,
55-
12.5, -86.0, 85.5, -0.25, 67.125, 77.0, 53.375, -61.125,
56-
50.0, 3.875, 42.25, -37.375, 51.0, -60.875, 87.0, 32.25,
57-
73.5, 68.875, -84.375, -98.75, -30.125, 94.25, 1.625, -86.25,
58-
-56.5, -68.0, 74.25, -51.25, 8.125, 71.375, -53.125, 4.875,
59-
77.5, -89.875, 4.5, -46.5, -46.375, -92.625, -85.5, -23.0,
60-
-8.875, -12.0, -46.625, -88.625, 66.75, 87.75, 90.25, -45.0,
61-
-78.125, 63.25, 28.75, 28.125, -30.375, 17.75, -16.0, 5.0,
62-
11.125, 88.625, -47.625, 72.25, 32.0, -7.625, 61.625, -63.125,
63-
-22.75, 83.125, -40.375, -78.25, 49.5, -39.125, -89.625, 47.875,
64-
-61.375, 7.75, 16.875, -96.375, -22.5, 8.5, 74.25, 12.75,
65-
90.125, 73.875, -71.75, -10.0, 41.25, 1.125, 10.375, -34.625,
66-
29.75, -27.5, 26.625, 81.0, -8.875, 17.625, 84.375, -23.625,
67-
-53.875, -26.0, -67.375, -90.75, 16.375, 45.625, 99.5, 56.25,
68-
-87.625, -65.5, -79.75, 31.875, 79.75, 6.375, 44.625, -55.25,
69-
-5.5, -68.875, -38.625, 54.125, -3.125, 5.75, 29.25, -39.5,
70-
26.75, 68.25, -24.625, -53.0, 51.0, 90.625, 65.375, 43.875,
71-
90.875, -41.625, 99.875, 6.375, -31.25, -94.0});
72-
::std::vector<int64_t> kernel_size_vec = {2, 2};
73-
executorch::aten::ArrayRef<int64_t> kernel_size =
74-
executorch::aten::ArrayRef<int64_t>(
75-
kernel_size_vec.data(), kernel_size_vec.size());
76-
::std::vector<int64_t> stride_vec = {1, 1};
77-
executorch::aten::ArrayRef<int64_t> stride =
78-
executorch::aten::ArrayRef<int64_t>(stride_vec.data(), stride_vec.size());
79-
::std::vector<int64_t> padding_vec = {0, 0};
80-
executorch::aten::ArrayRef<int64_t> padding =
81-
executorch::aten::ArrayRef<int64_t>(
82-
padding_vec.data(), padding_vec.size());
83-
::std::vector<int64_t> dilation_vec = {1, 1};
84-
executorch::aten::ArrayRef<int64_t> dilation =
85-
executorch::aten::ArrayRef<int64_t>(
86-
dilation_vec.data(), dilation_vec.size());
87-
bool ceil_mode = false;
88-
executorch::aten::Tensor out = tfFloat.zeros({2, 3, 4, 4});
89-
executorch::aten::Tensor indices = tfLong.zeros({2, 3, 4, 4});
90-
executorch::aten::Tensor out_expected = tfFloat.make(
91-
{2, 3, 4, 4},
92-
{69.625, 97.375, 97.375, 99.5, 69.625, 97.375, 97.375, 99.5, 12.5,
93-
79.25, 85.5, 85.5, 77.0, 77.0, 85.5, 85.5, 87.0, 73.5,
94-
73.5, 68.875, 87.0, 94.25, 94.25, 68.875, -30.125, 94.25, 94.25,
95-
8.125, 71.375, 74.25, 77.5, 77.5, 4.5, -8.875, -12.0, -46.625,
96-
87.75, 90.25, 90.25, -45.0, 87.75, 90.25, 90.25, 17.75, 63.25,
97-
28.75, 88.625, 88.625, 83.125, 83.125, 61.625, 61.625, 83.125, 83.125,
98-
47.875, 49.5, 16.875, 47.875, 47.875, 74.25, 90.125, 90.125, 73.875,
99-
74.25, 41.25, 81.0, 81.0, 29.75, 84.375, 81.0, 81.0, 17.625,
100-
84.375, 45.625, 99.5, 99.5, 16.375, 45.625, 99.5, 99.5, 54.125,
101-
54.125, 5.75, 29.25, 54.125, 68.25, 68.25, 29.25, 90.625, 90.625,
102-
68.25, 90.875, 99.875, 99.875, 65.375, 90.875});
103-
executorch::aten::Tensor indices_expected = tfLong.make(
104-
{2, 3, 4, 4},
105-
{6, 7, 7, 9, 6, 7, 7, 9, 16, 12, 18, 18, 21, 21, 18, 18,
106-
5, 7, 7, 8, 5, 12, 12, 8, 11, 12, 12, 19, 20, 17, 23, 23,
107-
0, 6, 7, 8, 11, 12, 12, 13, 11, 12, 12, 19, 15, 16, 23, 23,
108-
6, 6, 3, 3, 6, 6, 12, 9, 15, 12, 12, 19, 21, 21, 22, 19,
109-
0, 7, 7, 4, 10, 7, 7, 9, 10, 17, 18, 18, 16, 17, 18, 18,
110-
6, 6, 8, 9, 6, 12, 12, 9, 16, 16, 12, 19, 21, 21, 17, 19});
111-
op_max_pool2d_with_indices_out(
112-
self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
113-
EXPECT_TENSOR_CLOSE(out, out_expected);
114-
EXPECT_TENSOR_CLOSE(indices, indices_expected);
120+
#define TEST_ENTRY(ctype, dtype) \
121+
test_4d_dtype<executorch::aten::ScalarType::dtype>();
122+
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
123+
#undef TEST_ENTRY
115124
}
116125

117126
TEST_F(OpMaxPool2DWithIndicesOutTest, SanityTest4D_2) {

0 commit comments

Comments
 (0)