Skip to content

Commit 109ac29

Browse files
committed
Revert "Back out "Support Half/BFloat16 in max_pool2d (#7829)""
This reverts commit 25395b9. We should be able to land these internally now. ghstack-source-id: 6609cc7 ghstack-comment-id: 2617009097 Pull Request resolved: #7978
1 parent 16889b0 commit 109ac29

File tree

2 files changed

+73
-70
lines changed

2 files changed

+73
-70
lines changed

kernels/portable/cpu/op_max_pool2d_with_indices.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out(
7070
ret_val);
7171

7272
ScalarType in_type = in.scalar_type();
73-
ET_SWITCH_REAL_TYPES(
73+
ET_SWITCH_REALHBF16_TYPES(
7474
in_type, ctx, "max_pool2d_with_indices.out", CTYPE, [&]() {
7575
apply_kernel_2d_reduce_then_map_fn<CTYPE>(
7676
[](const CTYPE in_val,

kernels/test/op_max_pool2d_with_indices_test.cpp

Lines changed: 72 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -40,78 +40,81 @@ class OpMaxPool2DWithIndicesOutTest : public OperatorTest {
4040
out,
4141
indices);
4242
}
43+
44+
template <executorch::aten::ScalarType DTYPE>
45+
void test_4d_dtype() {
46+
torch::executor::testing::TensorFactory<DTYPE> tf;
47+
torch::executor::testing::TensorFactory<executorch::aten::ScalarType::Long> tfLong;
48+
49+
executorch::aten::Tensor self = tf.make(
50+
{2, 3, 5, 5},
51+
{28.75, -38.875, -7.0, -13.5, 70.75, 53.75, 69.625, 97.375,
52+
25.375, 99.5, -72.125, -87.25, 79.25, 42.0, -24.75, -15.5,
53+
12.5, -86.0, 85.5, -0.25, 67.125, 77.0, 53.375, -61.125,
54+
50.0, 3.875, 42.25, -37.375, 51.0, -60.875, 87.0, 32.25,
55+
73.5, 68.875, -84.375, -98.75, -30.125, 94.25, 1.625, -86.25,
56+
-56.5, -68.0, 74.25, -51.25, 8.125, 71.375, -53.125, 4.875,
57+
77.5, -89.875, 4.5, -46.5, -46.375, -92.625, -85.5, -23.0,
58+
-8.875, -12.0, -46.625, -88.625, 66.75, 87.75, 90.25, -45.0,
59+
-78.125, 63.25, 28.75, 28.125, -30.375, 17.75, -16.0, 5.0,
60+
11.125, 88.625, -47.625, 72.25, 32.0, -7.625, 61.625, -63.125,
61+
-22.75, 83.125, -40.375, -78.25, 49.5, -39.125, -89.625, 47.875,
62+
-61.375, 7.75, 16.875, -96.375, -22.5, 8.5, 74.25, 12.75,
63+
90.125, 73.875, -71.75, -10.0, 41.25, 1.125, 10.375, -34.625,
64+
29.75, -27.5, 26.625, 81.0, -8.875, 17.625, 84.375, -23.625,
65+
-53.875, -26.0, -67.375, -90.75, 16.375, 45.625, 99.5, 56.25,
66+
-87.625, -65.5, -79.75, 31.875, 79.75, 6.375, 44.625, -55.25,
67+
-5.5, -68.875, -38.625, 54.125, -3.125, 5.75, 29.25, -39.5,
68+
26.75, 68.25, -24.625, -53.0, 51.0, 90.625, 65.375, 43.875,
69+
90.875, -41.625, 99.875, 6.375, -31.25, -94.0});
70+
::std::vector<int64_t> kernel_size_vec = {2, 2};
71+
executorch::aten::ArrayRef<int64_t> kernel_size = executorch::aten::ArrayRef<int64_t>(
72+
kernel_size_vec.data(), kernel_size_vec.size());
73+
::std::vector<int64_t> stride_vec = {1, 1};
74+
executorch::aten::ArrayRef<int64_t> stride =
75+
executorch::aten::ArrayRef<int64_t>(stride_vec.data(), stride_vec.size());
76+
::std::vector<int64_t> padding_vec = {0, 0};
77+
executorch::aten::ArrayRef<int64_t> padding =
78+
executorch::aten::ArrayRef<int64_t>(padding_vec.data(), padding_vec.size());
79+
::std::vector<int64_t> dilation_vec = {1, 1};
80+
executorch::aten::ArrayRef<int64_t> dilation =
81+
executorch::aten::ArrayRef<int64_t>(dilation_vec.data(), dilation_vec.size());
82+
bool ceil_mode = false;
83+
executorch::aten::Tensor out = tf.zeros({2, 3, 4, 4});
84+
executorch::aten::Tensor indices = tfLong.zeros({2, 3, 4, 4});
85+
executorch::aten::Tensor out_expected = tf.make(
86+
{2, 3, 4, 4},
87+
{69.625, 97.375, 97.375, 99.5, 69.625, 97.375, 97.375, 99.5,
88+
12.5, 79.25, 85.5, 85.5, 77.0, 77.0, 85.5, 85.5,
89+
87.0, 73.5, 73.5, 68.875, 87.0, 94.25, 94.25, 68.875,
90+
-30.125, 94.25, 94.25, 8.125, 71.375, 74.25, 77.5, 77.5,
91+
4.5, -8.875, -12.0, -46.625, 87.75, 90.25, 90.25, -45.0,
92+
87.75, 90.25, 90.25, 17.75, 63.25, 28.75, 88.625, 88.625,
93+
83.125, 83.125, 61.625, 61.625, 83.125, 83.125, 47.875, 49.5,
94+
16.875, 47.875, 47.875, 74.25, 90.125, 90.125, 73.875, 74.25,
95+
41.25, 81.0, 81.0, 29.75, 84.375, 81.0, 81.0, 17.625,
96+
84.375, 45.625, 99.5, 99.5, 16.375, 45.625, 99.5, 99.5,
97+
54.125, 54.125, 5.75, 29.25, 54.125, 68.25, 68.25, 29.25,
98+
90.625, 90.625, 68.25, 90.875, 99.875, 99.875, 65.375, 90.875});
99+
executorch::aten::Tensor indices_expected = tfLong.make(
100+
{2, 3, 4, 4},
101+
{6, 7, 7, 9, 6, 7, 7, 9, 16, 12, 18, 18, 21, 21, 18, 18,
102+
5, 7, 7, 8, 5, 12, 12, 8, 11, 12, 12, 19, 20, 17, 23, 23,
103+
0, 6, 7, 8, 11, 12, 12, 13, 11, 12, 12, 19, 15, 16, 23, 23,
104+
6, 6, 3, 3, 6, 6, 12, 9, 15, 12, 12, 19, 21, 21, 22, 19,
105+
0, 7, 7, 4, 10, 7, 7, 9, 10, 17, 18, 18, 16, 17, 18, 18,
106+
6, 6, 8, 9, 6, 12, 12, 9, 16, 16, 12, 19, 21, 21, 17, 19});
107+
op_max_pool2d_with_indices_out(
108+
self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
109+
EXPECT_TENSOR_CLOSE(out, out_expected);
110+
EXPECT_TENSOR_CLOSE(indices, indices_expected);
111+
}
43112
};
44113

45114
TEST_F(OpMaxPool2DWithIndicesOutTest, SanityTest4D) {
46-
torch::executor::testing::TensorFactory<executorch::aten::ScalarType::Float>
47-
tfFloat;
48-
torch::executor::testing::TensorFactory<executorch::aten::ScalarType::Long>
49-
tfLong;
50-
51-
executorch::aten::Tensor self = tfFloat.make(
52-
{2, 3, 5, 5},
53-
{28.75, -38.875, -7.0, -13.5, 70.75, 53.75, 69.625, 97.375,
54-
25.375, 99.5, -72.125, -87.25, 79.25, 42.0, -24.75, -15.5,
55-
12.5, -86.0, 85.5, -0.25, 67.125, 77.0, 53.375, -61.125,
56-
50.0, 3.875, 42.25, -37.375, 51.0, -60.875, 87.0, 32.25,
57-
73.5, 68.875, -84.375, -98.75, -30.125, 94.25, 1.625, -86.25,
58-
-56.5, -68.0, 74.25, -51.25, 8.125, 71.375, -53.125, 4.875,
59-
77.5, -89.875, 4.5, -46.5, -46.375, -92.625, -85.5, -23.0,
60-
-8.875, -12.0, -46.625, -88.625, 66.75, 87.75, 90.25, -45.0,
61-
-78.125, 63.25, 28.75, 28.125, -30.375, 17.75, -16.0, 5.0,
62-
11.125, 88.625, -47.625, 72.25, 32.0, -7.625, 61.625, -63.125,
63-
-22.75, 83.125, -40.375, -78.25, 49.5, -39.125, -89.625, 47.875,
64-
-61.375, 7.75, 16.875, -96.375, -22.5, 8.5, 74.25, 12.75,
65-
90.125, 73.875, -71.75, -10.0, 41.25, 1.125, 10.375, -34.625,
66-
29.75, -27.5, 26.625, 81.0, -8.875, 17.625, 84.375, -23.625,
67-
-53.875, -26.0, -67.375, -90.75, 16.375, 45.625, 99.5, 56.25,
68-
-87.625, -65.5, -79.75, 31.875, 79.75, 6.375, 44.625, -55.25,
69-
-5.5, -68.875, -38.625, 54.125, -3.125, 5.75, 29.25, -39.5,
70-
26.75, 68.25, -24.625, -53.0, 51.0, 90.625, 65.375, 43.875,
71-
90.875, -41.625, 99.875, 6.375, -31.25, -94.0});
72-
::std::vector<int64_t> kernel_size_vec = {2, 2};
73-
executorch::aten::ArrayRef<int64_t> kernel_size =
74-
executorch::aten::ArrayRef<int64_t>(
75-
kernel_size_vec.data(), kernel_size_vec.size());
76-
::std::vector<int64_t> stride_vec = {1, 1};
77-
executorch::aten::ArrayRef<int64_t> stride =
78-
executorch::aten::ArrayRef<int64_t>(stride_vec.data(), stride_vec.size());
79-
::std::vector<int64_t> padding_vec = {0, 0};
80-
executorch::aten::ArrayRef<int64_t> padding =
81-
executorch::aten::ArrayRef<int64_t>(
82-
padding_vec.data(), padding_vec.size());
83-
::std::vector<int64_t> dilation_vec = {1, 1};
84-
executorch::aten::ArrayRef<int64_t> dilation =
85-
executorch::aten::ArrayRef<int64_t>(
86-
dilation_vec.data(), dilation_vec.size());
87-
bool ceil_mode = false;
88-
executorch::aten::Tensor out = tfFloat.zeros({2, 3, 4, 4});
89-
executorch::aten::Tensor indices = tfLong.zeros({2, 3, 4, 4});
90-
executorch::aten::Tensor out_expected = tfFloat.make(
91-
{2, 3, 4, 4},
92-
{69.625, 97.375, 97.375, 99.5, 69.625, 97.375, 97.375, 99.5, 12.5,
93-
79.25, 85.5, 85.5, 77.0, 77.0, 85.5, 85.5, 87.0, 73.5,
94-
73.5, 68.875, 87.0, 94.25, 94.25, 68.875, -30.125, 94.25, 94.25,
95-
8.125, 71.375, 74.25, 77.5, 77.5, 4.5, -8.875, -12.0, -46.625,
96-
87.75, 90.25, 90.25, -45.0, 87.75, 90.25, 90.25, 17.75, 63.25,
97-
28.75, 88.625, 88.625, 83.125, 83.125, 61.625, 61.625, 83.125, 83.125,
98-
47.875, 49.5, 16.875, 47.875, 47.875, 74.25, 90.125, 90.125, 73.875,
99-
74.25, 41.25, 81.0, 81.0, 29.75, 84.375, 81.0, 81.0, 17.625,
100-
84.375, 45.625, 99.5, 99.5, 16.375, 45.625, 99.5, 99.5, 54.125,
101-
54.125, 5.75, 29.25, 54.125, 68.25, 68.25, 29.25, 90.625, 90.625,
102-
68.25, 90.875, 99.875, 99.875, 65.375, 90.875});
103-
executorch::aten::Tensor indices_expected = tfLong.make(
104-
{2, 3, 4, 4},
105-
{6, 7, 7, 9, 6, 7, 7, 9, 16, 12, 18, 18, 21, 21, 18, 18,
106-
5, 7, 7, 8, 5, 12, 12, 8, 11, 12, 12, 19, 20, 17, 23, 23,
107-
0, 6, 7, 8, 11, 12, 12, 13, 11, 12, 12, 19, 15, 16, 23, 23,
108-
6, 6, 3, 3, 6, 6, 12, 9, 15, 12, 12, 19, 21, 21, 22, 19,
109-
0, 7, 7, 4, 10, 7, 7, 9, 10, 17, 18, 18, 16, 17, 18, 18,
110-
6, 6, 8, 9, 6, 12, 12, 9, 16, 16, 12, 19, 21, 21, 17, 19});
111-
op_max_pool2d_with_indices_out(
112-
self, kernel_size, stride, padding, dilation, ceil_mode, out, indices);
113-
EXPECT_TENSOR_CLOSE(out, out_expected);
114-
EXPECT_TENSOR_CLOSE(indices, indices_expected);
115+
#define TEST_ENTRY(ctype, dtype) test_4d_dtype<executorch::aten::ScalarType::dtype>();
116+
ET_FORALL_FLOATHBF16_TYPES(TEST_ENTRY);
117+
#undef TEST_ENTRY
115118
}
116119

117120
TEST_F(OpMaxPool2DWithIndicesOutTest, SanityTest4D_2) {

0 commit comments

Comments
 (0)