Skip to content

Commit ca77fde

Browse files
authored
Merge pull request #2666 from ruby/allocator
Use `malloc` based allocator
2 parents ffcb7e2 + f106e83 commit ca77fde

File tree

2 files changed

+80
-92
lines changed

2 files changed

+80
-92
lines changed

include/rbs/util/rbs_allocator.h

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,12 @@
3131
)
3232
#endif
3333

34-
struct rbs_allocator;
35-
typedef struct rbs_allocator rbs_allocator_t;
34+
typedef struct rbs_allocator {
35+
// The head of a linked list of pages, starting with the most recently allocated page.
36+
struct rbs_allocator_page *page;
37+
38+
size_t default_page_payload_size;
39+
} rbs_allocator_t;
3640

3741
rbs_allocator_t *rbs_allocator_init(void);
3842
void rbs_allocator_free(rbs_allocator_t *);

src/util/rbs_allocator.c

Lines changed: 74 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,14 @@
33
*
44
* A simple arena allocator that can be freed all at once.
55
*
6+
* This allocator maintains a linked list of pages, which come in two flavours:
7+
* 1. Small allocation pages, which are the same size as the system page size.
8+
* 2. Large allocation pages, which are the exact size requested, for sizes greater than the small page size.
9+
*
10+
* Small allocations always fit into the unused space at the end of the "head" page. If there isn't enough room, a new
11+
* page is allocated, and the small allocation is placed at its start. This approach wastes that unused slack at the
12+
* end of the previous page, but it means that allocations are instant and never scan the linked list to find a gap.
13+
*
614
* This allocator doesn't support freeing individual allocations. Only the whole arena can be freed at once at the end.
715
*/
816

@@ -23,34 +31,16 @@
2331
#include <fcntl.h>
2432
#endif
2533

26-
struct rbs_allocator {
27-
uintptr_t heap_ptr;
28-
uintptr_t size;
29-
};
30-
31-
static void *portable_mmap_anon(size_t size) {
32-
#ifdef _WIN32
33-
/* Windows doesn't use this function - VirtualAlloc is used instead */
34-
return NULL;
35-
#else
36-
void *ptr;
37-
38-
#if defined(MAP_ANONYMOUS)
39-
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
40-
#elif defined(MAP_ANON)
41-
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
42-
#else
43-
/* Fallback to /dev/zero for systems without anonymous mapping */
44-
int fd = open("/dev/zero", O_RDWR);
45-
rbs_assert(fd != -1, "open('/dev/zero') failed");
34+
typedef struct rbs_allocator_page {
35+
// The previously allocated page, or NULL if this is the first page.
36+
struct rbs_allocator_page *next;
4637

47-
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
48-
close(fd); /* Can close fd after mapping */
49-
#endif
38+
// The size of the payload in bytes.
39+
size_t size;
5040

51-
return ptr;
52-
#endif
53-
}
41+
// The offset of the next available byte.
42+
size_t used;
43+
} rbs_allocator_page_t;
5444

5545
static size_t get_system_page_size(void) {
5646
#ifdef _WIN32
@@ -64,73 +54,37 @@ static size_t get_system_page_size(void) {
6454
#endif
6555
}
6656

67-
static void *map_memory(size_t size) {
68-
#ifdef _WIN32
69-
LPVOID result = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
70-
rbs_assert(result != NULL, "VirtualAlloc failed");
71-
#else
72-
void *result = portable_mmap_anon(size);
73-
rbs_assert(result != MAP_FAILED, "mmap failed");
74-
#endif
75-
return result;
76-
}
57+
static rbs_allocator_page_t *rbs_allocator_page_new(size_t payload_size) {
58+
const size_t page_header_size = sizeof(rbs_allocator_page_t);
7759

78-
static void destroy_memory(void *memory, size_t size) {
79-
#ifdef _WIN32
80-
VirtualFree(memory, 0, MEM_RELEASE);
81-
#else
82-
munmap(memory, size);
83-
#endif
84-
}
60+
rbs_allocator_page_t *page = malloc(page_header_size + payload_size);
61+
page->size = payload_size;
62+
page->used = 0;
8563

86-
static void guard_page(void *memory, size_t page_size) {
87-
#ifdef _WIN32
88-
DWORD old_protect_;
89-
BOOL result = VirtualProtect(memory, page_size, PAGE_NOACCESS, &old_protect_);
90-
rbs_assert(result != 0, "VirtualProtect failed");
91-
#else
92-
int result = mprotect(memory, page_size, PROT_NONE);
93-
rbs_assert(result == 0, "mprotect failed");
94-
#endif
64+
return page;
9565
}
9666

97-
static size_t rbs_allocator_default_mem(void) {
98-
size_t kib = 1024;
99-
size_t mib = kib * 1024;
100-
size_t gib = mib * 1024;
101-
return 4 * gib;
102-
}
67+
rbs_allocator_t *rbs_allocator_init() {
68+
rbs_allocator_t *allocator = malloc(sizeof(rbs_allocator_t));
10369

104-
static inline bool is_power_of_two(uintptr_t value) {
105-
return value > 0 && (value & (value - 1)) == 0;
106-
}
70+
const size_t system_page_size = get_system_page_size();
10771

108-
// Align `val' to nearest multiple of `alignment'.
109-
static uintptr_t align(uintptr_t size, uintptr_t alignment) {
110-
rbs_assert(is_power_of_two(alignment), "alignment is not a power of two");
111-
return (size + alignment - 1) & ~(alignment - 1);
112-
}
72+
allocator->default_page_payload_size = system_page_size - sizeof(rbs_allocator_page_t);
73+
74+
allocator->page = rbs_allocator_page_new(allocator->default_page_payload_size);
75+
allocator->page->next = NULL;
11376

114-
rbs_allocator_t *rbs_allocator_init(void) {
115-
size_t size = rbs_allocator_default_mem();
116-
size_t page_size = get_system_page_size();
117-
size = align(size, page_size);
118-
void *mem = map_memory(size + page_size);
119-
// Guard page; remove range checks in alloc fast path and hard fail if we
120-
// consume all memory
121-
void *last_page = (char *) mem + size;
122-
guard_page(last_page, page_size);
123-
uintptr_t start = (uintptr_t) mem;
124-
rbs_allocator_t header = (rbs_allocator_t) {
125-
.heap_ptr = start + sizeof header,
126-
.size = size + page_size,
127-
};
128-
memcpy(mem, &header, sizeof header);
129-
return (rbs_allocator_t *) mem;
77+
return allocator;
13078
}
13179

13280
void rbs_allocator_free(rbs_allocator_t *allocator) {
133-
destroy_memory((void *) allocator, allocator->size);
81+
rbs_allocator_page_t *page = allocator->page;
82+
while (page) {
83+
rbs_allocator_page_t *next = page->next;
84+
free(page);
85+
page = next;
86+
}
87+
free(allocator);
13488
}
13589

13690
// Allocates `new_size` bytes from `allocator`, aligned to an `alignment`-byte boundary.
@@ -145,20 +99,50 @@ void *rbs_allocator_realloc_impl(rbs_allocator_t *allocator, void *ptr, size_t o
14599
// Allocates `size` bytes from `allocator`, aligned to an `alignment`-byte boundary.
146100
void *rbs_allocator_malloc_impl(rbs_allocator_t *allocator, size_t size, size_t alignment) {
147101
rbs_assert(size % alignment == 0, "size must be a multiple of the alignment. size: %zu, alignment: %zu", size, alignment);
148-
uintptr_t aligned = align(allocator->heap_ptr, alignment);
149-
allocator->heap_ptr = aligned + size;
150-
return (void *) aligned;
102+
103+
if (allocator->default_page_payload_size < size) { // Big allocation, give it its own page.
104+
rbs_allocator_page_t *new_page = rbs_allocator_page_new(size);
105+
106+
// This simple allocator can only put small allocations into the head page.
107+
// Naively prepending this large allocation page to the head of the allocator before the previous head page
108+
// would waste the remaining space in the head page.
109+
// So instead, we'll splice in the large page *after* the head page.
110+
//
111+
// +-------+ +-----------+ +-----------+
112+
// | arena | | head page | | new_page |
113+
// |-------| |-----------+ |-----------+
114+
// | *page |--->| size | +--->| size | +---> ... previous tail
115+
// +-------+ | offset | | | offset | |
116+
// | *next ----+---+ | *next ----+---+
117+
// | ... | | ... |
118+
// +-----------+ +-----------+
119+
//
120+
new_page->next = allocator->page->next;
121+
allocator->page->next = new_page;
122+
123+
uintptr_t pointer = (uintptr_t) new_page + sizeof(rbs_allocator_page_t);
124+
return (void *) pointer;
125+
}
126+
127+
rbs_allocator_page_t *page = allocator->page;
128+
if (page->used + size > page->size) {
129+
// Not enough space. Allocate a new small page and prepend it to the allocator's linked list.
130+
rbs_allocator_page_t *new_page = rbs_allocator_page_new(allocator->default_page_payload_size);
131+
new_page->next = allocator->page;
132+
allocator->page = new_page;
133+
page = new_page;
134+
}
135+
136+
uintptr_t pointer = (uintptr_t) page + sizeof(rbs_allocator_page_t) + page->used;
137+
page->used += size;
138+
return (void *) pointer;
151139
}
152140

153141
// Note: This will eagerly fill with zeroes, unlike `calloc()` which can map a page in a page to be zeroed lazily.
154142
// It's assumed that callers to this function will immediately write to the allocated memory, anyway.
155143
void *rbs_allocator_calloc_impl(rbs_allocator_t *allocator, size_t count, size_t size, size_t alignment) {
156144
void *p = rbs_allocator_malloc_many_impl(allocator, count, size, alignment);
157-
#if defined(__linux__)
158-
// mmap with MAP_ANONYMOUS gives zero-filled pages.
159-
#else
160145
memset(p, 0, count * size);
161-
#endif
162146
return p;
163147
}
164148

0 commit comments

Comments
 (0)