Skip to content

Commit 94eaeb9

Browse files
thenumberouscodepytorchmergebot
authored andcommitted
[Conv1d] Check overflow before we compute padding size. (pytorch#162363)
Fixes pytorch#161877 also fixes pytorch#161875 Pull Request resolved: pytorch#162363 Approved by: https://github.com/jbschlosser
1 parent 753d9bd commit 94eaeb9

File tree

3 files changed

+73
-0
lines changed

3 files changed

+73
-0
lines changed

aten/src/ATen/native/Convolution.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -689,6 +689,10 @@ static void check_shape_forward(const at::Tensor& input,
689689
", but got bias of size ", at::symint::sizes<T>(bias), " instead");
690690

691691
for (const auto i : c10::irange(2, k)) {
692+
// T could be int64_t or SymInt, Specialized numeric_limts<SymInt> in c10/core/SymInt.h
693+
TORCH_CHECK(padding[i-2] <= (std::numeric_limits<T>::max() - padding[i-2]),
694+
"Given padding=", padding[i-2], " at dimension ", i-2, " , expected padding to be at most ",
695+
(std::numeric_limits<T>::max() / 2));
692696
input_shape.push_back(at::symint::size<T>(input, i) + 2 * padding[i-2]);
693697
// log new kernel size considering dilation
694698
kernel_shape.push_back(dilation[i-2] * (weight_sizes[i]-1) + 1);
@@ -715,6 +719,11 @@ static void check_shape_forward(const at::Tensor& input,
715719
"Kernel size: (", kernel_ss.str(), "). Kernel size can't be greater than actual input size");
716720
}
717721
} else { // transposed
722+
for (const auto i : c10::irange(2, k)) {
723+
TORCH_CHECK(padding[i-2] <= (std::numeric_limits<T>::max() - padding[i-2]),
724+
"Given padding=", padding[i-2], " at dimension ", i-2, " , expected padding to be at most ",
725+
(std::numeric_limits<T>::max() / 2));
726+
}
718727
TORCH_CHECK(at::symint::size<T>(input, 1) == weight_sizes[0],
719728
"Given transposed=", transposed, ", weight of size ", weight_sizes,
720729
", expected input", at::symint::sizes<T>(input), " to have ", weight_sizes[0],

c10/core/SymInt.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -556,3 +556,26 @@ inline SymBool sym_ge(const SymInt& a, const SymInt& b) {
556556
}
557557

558558
} // namespace c10
559+
560+
#include <limits>
561+
562+
namespace std {
563+
564+
template <>
565+
class numeric_limits<c10::SymInt> {
566+
public:
567+
static constexpr bool is_specialized = true;
568+
569+
static constexpr int64_t max() noexcept {
570+
return std::numeric_limits<int64_t>::max();
571+
}
572+
573+
static constexpr int64_t min() noexcept {
574+
return std::numeric_limits<int64_t>::min();
575+
}
576+
577+
static constexpr bool is_signed = true;
578+
static constexpr bool is_integer = true;
579+
};
580+
581+
} // namespace std

test/nn/test_convolution.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,47 @@ def test_conv_backcompat(self):
9393
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
9494
self.assertEqual(m(input).size(), (1, 1, 1, 1))
9595

96+
def test_huge_padding(self):
97+
class Conv1dModule(torch.nn.Module):
98+
def __init__(self):
99+
super().__init__()
100+
self.conv1 = nn.Conv1d(
101+
in_channels=16,
102+
out_channels=32,
103+
kernel_size=3,
104+
stride=1,
105+
padding=9223372036854775803,
106+
)
107+
self.add_module(name="conv1", module=self.conv1)
108+
109+
input_data = torch.randn(1, 16, 100)
110+
model = Conv1dModule()
111+
with self.assertRaisesRegex(
112+
RuntimeError,
113+
r"Given padding=9223372036854775803 at dimension 0 , expected padding to be at most",
114+
):
115+
model.conv1(input_data)
116+
117+
class ConvTransposed1dModule(torch.nn.Module):
118+
def __init__(self):
119+
super().__init__()
120+
self.conv_transposed1d = nn.ConvTranspose1d(
121+
in_channels=16,
122+
out_channels=32,
123+
kernel_size=3,
124+
stride=2,
125+
padding=9223372036854775803,
126+
)
127+
self.add_module(name="conv_transposed1d", module=self.conv_transposed1d)
128+
129+
input_data = torch.randn(1, 16, 100)
130+
model = ConvTransposed1dModule()
131+
with self.assertRaisesRegex(
132+
RuntimeError,
133+
r"Given padding=9223372036854775803 at dimension 0 , expected padding to be at most",
134+
):
135+
model.conv_transposed1d(input_data)
136+
96137
def test_invalid_conv1d(self):
97138
for dtype in [
98139
torch.half,

0 commit comments

Comments
 (0)