Skip to content

Commit 51feb00

Browse files
committed
Fix race condition in MetadataAllocator.
1 parent 93dfc16 commit 51feb00

File tree

3 files changed

+42
-22
lines changed

3 files changed

+42
-22
lines changed

red2

8.45 KB
Binary file not shown.

stdlib/public/runtime/Metadata.cpp

Lines changed: 40 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -58,38 +58,58 @@ using namespace metadataimpl;
5858

5959
void *MetadataAllocator::alloc(size_t size) {
6060
#if defined(__APPLE__)
61-
const uintptr_t pagesizeMask = vm_page_mask;
61+
const uintptr_t PageSizeMask = vm_page_mask;
6262
#else
63-
static const uintptr_t pagesizeMask = sysconf(_SC_PAGESIZE) - 1;
63+
static const uintptr_t PageSizeMask = sysconf(_SC_PAGESIZE) - 1;
6464
#endif
6565
// If the requested size is a page or larger, map page(s) for it
6666
// specifically.
67-
if (LLVM_UNLIKELY(size > pagesizeMask)) {
68-
auto mem = mmap(nullptr, (size + pagesizeMask) & ~pagesizeMask,
67+
if (LLVM_UNLIKELY(size > PageSizeMask)) {
68+
auto mem = mmap(nullptr, (size + PageSizeMask) & ~PageSizeMask,
6969
PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
7070
VM_TAG_FOR_SWIFT_METADATA, 0);
7171
if (mem == MAP_FAILED)
7272
crash("unable to allocate memory for metadata cache");
7373
return mem;
7474
}
75+
76+
uintptr_t curValue = NextValue.load(std::memory_order_relaxed);
77+
while (true) {
78+
char *next = reinterpret_cast<char*>(curValue);
79+
char *end = next + size;
7580

76-
char *end = next + size;
77-
78-
// Allocate a new page if we need one.
79-
if (LLVM_UNLIKELY(((uintptr_t)next & ~pagesizeMask)
80-
!= (((uintptr_t)end & ~pagesizeMask)))){
81-
next = (char*)
82-
mmap(nullptr, pagesizeMask+1, PROT_READ|PROT_WRITE,
83-
MAP_ANON|MAP_PRIVATE, VM_TAG_FOR_SWIFT_METADATA, 0);
84-
85-
if (next == MAP_FAILED)
86-
crash("unable to allocate memory for metadata cache");
87-
end = next + size;
81+
// If we wrap over the end of the page, allocate a new page.
82+
void *allocation = nullptr;
83+
if (LLVM_UNLIKELY(((uintptr_t)next & ~PageSizeMask)
84+
!= (((uintptr_t)end & ~PageSizeMask)))) {
85+
// Allocate a new page if we haven't already.
86+
allocation = mmap(nullptr, PageSizeMask + 1,
87+
PROT_READ|PROT_WRITE,
88+
MAP_ANON|MAP_PRIVATE,
89+
VM_TAG_FOR_SWIFT_METADATA,
90+
/*offset*/ 0);
91+
92+
if (allocation == MAP_FAILED)
93+
crash("unable to allocate memory for metadata cache");
94+
95+
next = (char*) allocation;
96+
end = next + size;
97+
}
98+
99+
// Swap it into place.
100+
if (LLVM_LIKELY(std::atomic_compare_exchange_weak_explicit(
101+
&NextValue, &curValue, reinterpret_cast<uintptr_t>(end),
102+
std::memory_order_relaxed, std::memory_order_relaxed))) {
103+
return next;
104+
}
105+
106+
// If that didn't succeed, and we allocated, free the allocation.
107+
// This potentially causes us to perform multiple mmaps under contention,
108+
// but it keeps the fast path pristine.
109+
if (allocation) {
110+
munmap(allocation, PageSizeMask + 1);
111+
}
88112
}
89-
90-
char *addr = next;
91-
next = end;
92-
return addr;
93113
}
94114

95115
namespace {

stdlib/public/runtime/MetadataCache.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,10 +37,10 @@ class MetadataAllocator {
3737
///
3838
/// Initializing to -1 instead of nullptr ensures that the first allocation
3939
/// triggers a page allocation since it will always span a "page" boundary.
40-
char *next = (char*)(~(uintptr_t)0U);
40+
std::atomic<uintptr_t> NextValue;
4141

4242
public:
43-
constexpr MetadataAllocator() = default;
43+
constexpr MetadataAllocator() : NextValue(~(uintptr_t)0) {}
4444

4545
// Don't copy or move, please.
4646
MetadataAllocator(const MetadataAllocator &) = delete;

0 commit comments

Comments
 (0)