Skip to content
6 changes: 6 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,13 @@ class ScopedErrorReportLock {
extern uptr stoptheworld_tracer_pid;
extern uptr stoptheworld_tracer_ppid;

// Returns true if we can read a memory range.
bool IsAccessibleMemoryRange(uptr beg, uptr size);
// Returns true if we can read a memory range starting at `src`, and copies
// content into `dest`.
bool TryMemCpy(void *dest, const void *src, uptr n);
// Copies accessible memory, and zero fill the rest.
void MemCpyAccessible(void *dest, const void *src, uptr n);

// Error report formatting.
const char *StripPathPrefix(const char *filepath,
Expand Down
26 changes: 26 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,32 @@ static void StopStackDepotBackgroundThread() {
static void StopStackDepotBackgroundThread() {}
#endif

void MemCpyAccessible(void *dest, const void *src, uptr n) {
if (TryMemCpy(dest, src, n))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not fully sure about this optimization. In the case where there are holes this makes it more expensive. Does the caller generally know that?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure as well. On some examples fast pass 4x faster than page by page.

return;

const uptr page_size = GetPageSize();
uptr b = reinterpret_cast<uptr>(src);
uptr b_up = RoundUpTo(b, page_size);

uptr e = reinterpret_cast<uptr>(src) + n;
uptr e_down = RoundDownTo(e, page_size);

const uptr off = reinterpret_cast<uptr>(dest) - b;

auto copy_or_zero = [off](uptr beg, uptr end) {
void *d = reinterpret_cast<void *>(beg + off);
const uptr size = end - beg;
if (!TryMemCpy(d, reinterpret_cast<void *>(beg), size))
internal_memset(d, 0, size);
};

copy_or_zero(b, b_up);
for (uptr p = b_up; p < e_down; p += page_size)
copy_or_zero(p, p + page_size);
copy_or_zero(e_down, e);
}

} // namespace __sanitizer

SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
Expand Down
5 changes: 5 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,11 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return status == ZX_OK;
}

bool TryMemCpy(void *dest, const void *src, uptr n) {
// TODO: implement.
return false;
}

// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}

Expand Down
44 changes: 44 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,50 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return true;
}

bool TryMemCpy(void *dest, const void *src, uptr n) {
if (!n)
return true;
int fds[2];
CHECK_EQ(0, pipe(fds));

auto cleanup = at_scope_exit([&]() {
internal_close(fds[0]);
internal_close(fds[1]);
});

SetNonBlock(fds[0]);
SetNonBlock(fds[1]);

char *d = static_cast<char *>(dest);
const char *s = static_cast<const char *>(src);

while (n) {
int e;
uptr w = internal_write(fds[1], s, n);
if (internal_iserror(w, &e)) {
if (e == EINTR)
continue;
CHECK_EQ(EFAULT, e);
return false;
}
s += w;
n -= w;

while (w) {
uptr r = internal_read(fds[0], d, w);
if (internal_iserror(r, &e)) {
CHECK_EQ(EINTR, e);
continue;
}

d += r;
w -= r;
}
}

return true;
}

void PlatformPrepareForSandboxing(void *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
Expand Down
5 changes: 5 additions & 0 deletions compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -968,6 +968,11 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return true;
}

bool TryMemCpy(void *dest, const void *src, uptr n) {
// TODO: implement.
return false;
}

bool SignalContext::IsStackOverflow() const {
return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;
}
Expand Down
86 changes: 82 additions & 4 deletions compiler-rt/lib/sanitizer_common/tests/sanitizer_posix_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,14 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX

#include "sanitizer_common/sanitizer_common.h"
#include "gtest/gtest.h"
# include <pthread.h>
# include <sys/mman.h>

#include <pthread.h>
#include <sys/mman.h>
# include <algorithm>
# include <numeric>

# include "gtest/gtest.h"
# include "sanitizer_common/sanitizer_common.h"

namespace __sanitizer {

Expand Down Expand Up @@ -86,6 +89,81 @@ TEST(SanitizerCommon, IsAccessibleMemoryRangeLarge) {
buffer.size()));
}

TEST(SanitizerCommon, TryMemCpy) {
std::vector<char> src(10000000);
std::iota(src.begin(), src.end(), 123);
std::vector<char> dst;

// Don't use ::testing::ElementsAreArray or similar, as the huge output on an
// error is not helpful.

dst.assign(1, 0);
EXPECT_TRUE(TryMemCpy(dst.data(), src.data(), dst.size()));
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), src.begin()));

dst.assign(100, 0);
EXPECT_TRUE(TryMemCpy(dst.data(), src.data(), dst.size()));
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), src.begin()));

dst.assign(534, 0);
EXPECT_TRUE(TryMemCpy(dst.data(), src.data(), dst.size()));
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), src.begin()));

dst.assign(GetPageSize(), 0);
EXPECT_TRUE(TryMemCpy(dst.data(), src.data(), dst.size()));
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), src.begin()));

dst.assign(src.size(), 0);
EXPECT_TRUE(TryMemCpy(dst.data(), src.data(), dst.size()));
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), src.begin()));

dst.assign(src.size() - 1, 0);
EXPECT_TRUE(TryMemCpy(dst.data(), src.data(), dst.size()));
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), src.begin()));
}

TEST(SanitizerCommon, TryMemCpyNull) {
std::vector<char> dst(100);
EXPECT_FALSE(TryMemCpy(dst.data(), nullptr, dst.size()));
}

TEST(SanitizerCommon, MemCpyAccessible) {
const int page_num = 1000;
const int page_size = GetPageSize();
InternalMmapVector<char> src(page_num * page_size);
std::iota(src.begin(), src.end(), 123);
std::vector<char> dst;
std::vector<char> exp = {src.begin(), src.end()};

// Protect some pages.
for (int i = 7; i < page_num; i *= 2) {
mprotect(src.data() + i * page_size, page_size, PROT_NONE);
std::fill(exp.data() + i * page_size, exp.data() + (i + 1) * page_size, 0);
}

dst.assign(src.size(), 0);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit '\0'

EXPECT_FALSE(TryMemCpy(dst.data(), src.data(), dst.size()));

// Full page aligned range with mprotect pages.
dst.assign(src.size(), 0);
MemCpyAccessible(dst.data(), src.data(), dst.size());
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), exp.begin()));

// Misaligned range with mprotect pages.
size_t offb = 3;
size_t offe = 7;
dst.assign(src.size() - offb - offe, 0);
MemCpyAccessible(dst.data(), src.data() + offb, dst.size());
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), exp.begin() + offb));

// Misaligned range with ends in mprotect pages.
offb = 3 + 7 * page_size;
offe = 7 + 14 * page_size;
dst.assign(src.size() - offb - offe, 0);
MemCpyAccessible(dst.data(), src.data() + offb, dst.size());
EXPECT_TRUE(std::equal(dst.begin(), dst.end(), exp.begin() + offb));
}

} // namespace __sanitizer

#endif // SANITIZER_POSIX
Loading