Skip to content

Commit 9dfc0d6

Browse files
authored
Fix crash in op_upsample_bilinear2d_aa
Differential Revision: D81686230 Pull Request resolved: #13945
1 parent 4628f89 commit 9dfc0d6

File tree

3 files changed

+118
-0
lines changed

3 files changed

+118
-0
lines changed

kernels/portable/cpu/op_upsample_bilinear2d_aa.cpp

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,20 @@ void compute_aa_weights_for_pixel(
6161

6262
*num_contributors = std::min(xmax - xmin, static_cast<int64_t>(4));
6363

64+
// Ensure we have at least one contributor
65+
if (*num_contributors <= 0) {
66+
*num_contributors = 1;
67+
indices[0] = std::max(
68+
static_cast<int64_t>(0),
69+
std::min(static_cast<int64_t>(center), input_size - 1));
70+
weights[0] = static_cast<T>(1.0);
71+
// Clear unused weight slots
72+
for (int64_t j = 1; j < 4; ++j) {
73+
weights[j] = static_cast<T>(0.0);
74+
}
75+
return;
76+
}
77+
6478
// PyTorch's weight computation
6579
T total_weight = static_cast<T>(0.0);
6680
const T invscale = (scale >= static_cast<T>(1.0))
@@ -84,6 +98,12 @@ void compute_aa_weights_for_pixel(
8498
for (int64_t j = 0; j < *num_contributors; ++j) {
8599
weights[j] /= total_weight;
86100
}
101+
} else {
102+
// Fallback: if total weight is 0, set equal weights
103+
T equal_weight = static_cast<T>(1.0) / static_cast<T>(*num_contributors);
104+
for (int64_t j = 0; j < *num_contributors; ++j) {
105+
weights[j] = equal_weight;
106+
}
87107
}
88108

89109
// Clear unused weight slots

kernels/portable/test/op_upsample_bilinear2d_aa_test.py

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,58 @@ def test_upsample_bilinear2d_aa_scale_factors_vs_output_size(self):
289289
# Skip this test if et_test namespace setup issues persist
290290
print(f"Skipping scale factors test due to: {e}")
291291

292+
def test_upsample_bilinear2d_aa_extreme_scale_factors(self):
293+
"""Test the specific case that exposed the segfault bug with extreme scale factors."""
294+
# Create input tensor with same data as C++ test to ensure consistency
295+
input_tensor = torch.zeros(8, 2, 7, 1, dtype=torch.float32)
296+
for i in range(8 * 2 * 7 * 1):
297+
input_tensor.view(-1)[i] = i * 0.1
298+
299+
# Test the specific case that caused segfault before the fix
300+
self.run_upsample_aa_test(
301+
input_tensor,
302+
output_size=[7, 2],
303+
align_corners=False,
304+
scale_factors=None, # Use explicit scale factors via direct call
305+
atol=1e-2, # Relaxed tolerance for extreme scale factors
306+
)
307+
308+
# Also test with direct ExecuTorch call using the extreme scale factors
309+
try:
310+
et_result = torch.zeros(8, 2, 7, 2, dtype=torch.float32)
311+
et_result = torch.ops.et_test._upsample_bilinear2d_aa(
312+
input_tensor,
313+
[7, 2], # output_size
314+
False, # align_corners
315+
0.010000000000000002, # scales_h (very small)
316+
10.0, # scales_w (very large)
317+
out=et_result,
318+
)
319+
320+
# Verify no NaN or Inf values (the bug would cause these)
321+
self.assertFalse(
322+
torch.isnan(et_result).any().item(),
323+
"Output should not contain NaN values after bug fix",
324+
)
325+
self.assertFalse(
326+
torch.isinf(et_result).any().item(),
327+
"Output should not contain Inf values after bug fix",
328+
)
329+
330+
# Verify reasonable output values
331+
self.assertTrue(
332+
et_result.min().item() >= -100.0,
333+
"Output values should be reasonable (not extremely negative)",
334+
)
335+
self.assertTrue(
336+
et_result.max().item() <= 100.0,
337+
"Output values should be reasonable (not extremely positive)",
338+
)
339+
340+
except RuntimeError as e:
341+
# Skip the direct test if et_test namespace setup issues persist
342+
print(f"Skipping direct extreme scale factors test due to: {e}")
343+
292344

293345
if __name__ == "__main__":
294346
unittest.main()

kernels/test/op_upsample_bilinear2d_aa_test.cpp

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -625,3 +625,49 @@ TEST_F(OpUpsampleBilinear2dAAOutTest, TestPrecisionConsistency) {
625625
EXPECT_EQ(out1_data[i], out2_data[i]);
626626
}
627627
}
628+
629+
TEST_F(OpUpsampleBilinear2dAAOutTest, TestSpecificInputCase) {
630+
TensorFactory<ScalarType::Float> tf;
631+
632+
// Test case with specific inputs:
633+
// Input shape: [8, 2, 7, 1]
634+
// Output size: [7, 2]
635+
// align_corners: false
636+
// scales_h: 0.010000000000000002
637+
// scales_w: 10.0
638+
Tensor input = tf.zeros({8, 2, 7, 1});
639+
auto in_data = input.mutable_data_ptr<float>();
640+
641+
// Fill with some test data
642+
for (int i = 0; i < 8 * 2 * 7 * 1; i++) {
643+
in_data[i] = static_cast<float>(i) * 0.1f;
644+
}
645+
646+
// Output shape will be [8, 2, 7, 2]
647+
Tensor out = tf.zeros({8, 2, 7, 2});
648+
649+
int64_t output_size_data[2] = {7, 2};
650+
ArrayRef<int64_t> output_size(output_size_data, 2);
651+
652+
op_upsample_bilinear2d_aa_out(
653+
input,
654+
output_size,
655+
/*align_corners=*/false,
656+
0.010000000000000002,
657+
10.0,
658+
out);
659+
660+
// Verify output dimensions
661+
EXPECT_EQ(out.size(0), 8);
662+
EXPECT_EQ(out.size(1), 2);
663+
EXPECT_EQ(out.size(2), 7);
664+
EXPECT_EQ(out.size(3), 2);
665+
666+
// Verify that output has reasonable values
667+
auto out_data = out.const_data_ptr<float>();
668+
for (int i = 0; i < 8 * 2 * 7 * 2; i++) {
669+
// Check for NaN or Inf
670+
EXPECT_FALSE(std::isnan(out_data[i]));
671+
EXPECT_FALSE(std::isinf(out_data[i]));
672+
}
673+
}

0 commit comments

Comments
 (0)