Skip to content

Commit 5640ebe

Browse files
lucylqfacebook-github-bot
authored andcommitted
Fix write-heap-buffer-overflow in et_copy_index (pytorch#15605)
Summary: The crash is a write-heap-buffer-overflow that occurs in the `et_copy_index` function. The root cause is the lack of proper validation of the `index` argument, which can lead to an out-of-bounds write when `index` is negative or exceeds the bounds of the `copy_to` tensor. The patch fixes the crash by adding two checks: `ET_CHECK_MSG(index >= 0, "Index must be non-negative");` and `ET_CHECK_MSG(index < copy_to.sizes()[0], "Index out of bounds");`. These checks ensure that `index` is within the valid range for the `copy_to` tensor, preventing the out-of-bounds write. Other considerations that reviewers should take into account when validating the patch include verifying that the added checks do not introduce any performance regressions and that they correctly handle edge cases, such as when `index` is equal to `copy_to.sizes()[0] - 1`. Reviewers should also check that the patch does not alter the existing functionality of the `et_copy_index` function and that it is consistent with the surrounding code. Additionally, reviewers may want to consider testing the patch with various inputs, including negative `index` values, `index` values that exceed the bounds of `copy_to`, and valid `index` values, to ensure that the patch correctly prevents the write-heap-buffer-overflow crash. Here is the commit message: ``` Fix write-heap-buffer-overflow crash in et_copy_index The crash is a write-heap-buffer-overflow that occurs in the `et_copy_index` function. The root cause is the lack of proper validation of the `index` argument, which can lead to an out-of-bounds write when `index` is negative or exceeds the bounds of the `copy_to` tensor. The patch fixes the crash by adding two checks: ```cpp ET_CHECK_MSG(index >= 0, "Index must be non-negative"); ET_CHECK_MSG(index < copy_to.sizes()[0], "Index out of bounds"); ``` These checks ensure that `index` is within the valid range for the `copy_to` tensor, preventing the out-of-bounds write. Other considerations that reviewers should take into account when validating the patch include verifying that the added checks do not introduce any performance regressions and that they correctly handle edge cases, such as when `index` is equal to `copy_to.sizes()[0] - 1`. Reviewers should also check that the patch does not alter the existing functionality of the `et_copy_index` function and that it is consistent with the surrounding code. ``` NOTE: This diff is entirely auto-generated by LLM-based patch generator. Reviewer should carefully examine this diff as Lionhead does not guarrantee the correctnesss of the patch beyond fixing the crash and passing existing tests. Please commandeer this diff and revise as needed. Our bot does not respond to comments or revision requests (yet). Differential Revision: D80399111
1 parent 149e23d commit 5640ebe

File tree

1 file changed

+45
-10
lines changed

1 file changed

+45
-10
lines changed

kernels/prim_ops/et_copy_index.cpp

Lines changed: 45 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ constexpr size_t kTensorDimensionLimit = 16;
5959
// torch.ops.executorch.prim.add.int(iteration_index, 1, iteration_index)
6060
// done_bool = torch.ops.executorch.prim.eq.int(iteration_index,
6161
// sym_size, done_bool) # Emitter inserts a instruction here, if
62-
// done_bool == False jump to selcect_copy op # if not continue. return
62+
// done_bool == False jump to select_copy op # if not continue. return
6363
// add_tensor
6464
//
6565
// The output of each iteration (copy_from) is copied into the copy_to tensor at
@@ -79,12 +79,27 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
7979
auto copy_from = (*stack[1]).toTensor();
8080
auto index = (*stack[2]).toInt();
8181

82+
ET_KERNEL_CHECK_MSG(
83+
context,
84+
index >= 0 && index < copy_to.sizes()[0],
85+
InvalidArgument,
86+
/* void */,
87+
"Expected index to be non-negative and < %" ET_PRI_TENSOR_SIZE
88+
" got %" ET_PRI_TENSOR_SIZE,
89+
static_cast<int64_t>(copy_to.sizes()[0]),
90+
static_cast<int64_t>(index));
91+
8292
// Number of bytes we need to copy over from copy_from tensor.
8393
size_t size_copy_from = (copy_from.element_size()) * (copy_from.numel());
8494

85-
ET_CHECK_MSG(
95+
ET_KERNEL_CHECK_MSG(
96+
context,
8697
(copy_to.sizes().size() - copy_from.sizes().size()) == 1,
87-
"Ranks of copy_to and copy_from tensor should only differ by 1.");
98+
InvalidArgument,
99+
/* void */,
100+
"Ranks of copy_to %zu and copy_from tensor %zu should only differ by 1.",
101+
copy_to.sizes().size(),
102+
copy_from.sizes().size());
88103

89104
// Here we calculate the size of the out_tensor after copy_from has
90105
// been copied to it. This will be passed onto the resize call.
@@ -93,9 +108,15 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
93108
// If we're copying past the first index then the shape of
94109
// copy_from and copy_to without the leading dimension should be
95110
// the same. i.e. copy_to.size[1:] == copy_from.size[:].
96-
ET_CHECK_MSG(
111+
ET_KERNEL_CHECK_MSG(
112+
context,
97113
copy_to.sizes()[i + 1] == copy_from.sizes()[i],
98-
"Mismatch in shape between copy_to and copy_from tensors");
114+
InvalidArgument,
115+
/* void */,
116+
"Mismatch in shape between copy_to %" ET_PRI_TENSOR_SIZE
117+
" and copy_from %" ET_PRI_TENSOR_SIZE " tensors",
118+
static_cast<int64_t>(copy_to.sizes()[i + 1]),
119+
static_cast<int64_t>(copy_from.sizes()[i]));
99120
expected_output_size[i + 1] = copy_from.sizes()[i];
100121
}
101122

@@ -105,8 +126,11 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
105126
Error err =
106127
resize_tensor(copy_to, {expected_output_size, copy_to.sizes().size()});
107128
ET_CHECK(err == Error::Ok);
108-
ET_CHECK_MSG(
129+
ET_KERNEL_CHECK_MSG(
130+
context,
109131
data_ptr == copy_to.const_data_ptr(),
132+
InvalidState,
133+
/* void */,
110134
"Data ptr of copy_to tensor changed after resize which isn't allowed for static/upper-bounded tensors");
111135
}
112136

@@ -118,12 +142,23 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
118142
// copy_from into the copy_to tensor.
119143

120144
// Check that the destination has enough space for the copy.
145+
ET_KERNEL_CHECK_MSG(
146+
context,
147+
size_copy_from == 0 || index <= SIZE_MAX / size_copy_from,
148+
InvalidArgument,
149+
/* void */,
150+
"Offset multiplication overflow. size_copy_from: %zu, index: %zu",
151+
size_copy_from,
152+
index);
121153
size_t offset = index * size_copy_from;
122154
size_t copy_to_size = copy_to.element_size() * copy_to.numel();
123-
ET_CHECK_MSG(
124-
offset + size_copy_from <= copy_to_size,
125-
"Buffer overflow: copy_to tensor is smaller than copy_from tensor.");
126-
155+
ET_KERNEL_CHECK_MSG(
156+
context,
157+
(offset <= SIZE_MAX - size_copy_from) &&
158+
(offset + size_copy_from <= copy_to_size),
159+
InvalidArgument,
160+
/* void */,
161+
"Buffer overflow; offset overflow or copy_to tensor is smaller than copy_from tensor.");
127162
memcpy(
128163
// NOLINTNEXTLINE(performance-no-int-to-ptr)
129164
(void*)((uintptr_t)copy_to_ptr + offset),

0 commit comments

Comments
 (0)