Skip to content

Commit e7b5843

Browse files
committed
runtime: add a StackAllocator utility
A StackAllocator performs fast allocation and deallocation of memory by implementing a bump-pointer allocation strategy. In contrast to a pure bump-pointer allocator, it's possible to free memory. Allocations and deallocations must follow a strict stack discipline. In general, slabs which become unused are _not_ freed, but reused for subsequent allocations. The first slab can be placed into pre-allocated memory.
1 parent c55b9cc commit e7b5843

File tree

3 files changed

+397
-0
lines changed

3 files changed

+397
-0
lines changed
Lines changed: 280 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,280 @@
1+
//===--- StackAllocator.h - A stack allocator -----------------------------===//
2+
//
3+
// This source file is part of the Swift.org open source project
4+
//
5+
// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
6+
// Licensed under Apache License v2.0 with Runtime Library Exception
7+
//
8+
// See https://swift.org/LICENSE.txt for license information
9+
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
10+
//
11+
//===----------------------------------------------------------------------===//
12+
//
13+
// A bump-pointer allocator that obeys a stack discipline.
14+
//
15+
//===----------------------------------------------------------------------===//
16+
17+
#include "swift/Runtime/Debug.h"
18+
#include "llvm/Support/Alignment.h"
19+
#include <cstddef>
20+
21+
namespace swift {
22+
23+
/// A bump-pointer allocator that obeys a stack discipline.
24+
///
25+
/// StackAllocator performs fast allocation and deallocation of memory by
26+
/// implementing a bump-pointer allocation strategy.
27+
///
28+
/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
29+
/// memory rather than relying on a boundless contiguous heap. However, it has
30+
/// bump-pointer semantics in that it is a monotonically growing pool of memory
31+
/// where every allocation is found by merely allocating the next N bytes in
32+
/// the slab, or the next N bytes in the next slab.
33+
///
34+
/// In contrast to a pure bump-pointer allocator, it's possible to free memory.
35+
/// Allocations and deallocations must follow a strict stack discipline. In
36+
/// general, slabs which become unused are _not_ freed, but reused for
37+
/// subsequent allocations.
38+
///
39+
/// It's possible to place the first slab into pre-allocated memory.
40+
///
41+
/// The SlabCapacity specifies the capacity for newly allocated slabs.
42+
template <size_t SlabCapacity>
43+
class StackAllocator {
44+
private:
45+
46+
struct Allocation;
47+
struct Slab;
48+
49+
/// The last active allocation.
50+
///
51+
/// A deallocate() must free this allocation.
52+
Allocation *lastAllocation = nullptr;
53+
54+
/// The first slab.
55+
Slab *firstSlab;
56+
57+
/// Used for unit testing.
58+
int32_t numAllocatedSlabs = 0;
59+
60+
/// True if the first slab is pre-allocated.
61+
bool firstSlabIsPreallocated;
62+
63+
/// If set to true, memory allocations are checked for buffer overflows and
64+
/// use-after-free, similar to guard-malloc.
65+
static constexpr bool guardAllocations =
66+
#ifdef NDEBUG
67+
false;
68+
#else
69+
true;
70+
#endif
71+
72+
static constexpr uintptr_t magicUninitialized = (uintptr_t)0xcdcdcdcdcdcdcdcdull;
73+
static constexpr uintptr_t magicEndOfAllocation = (uintptr_t)0xdeadbeafdeadbeafull;
74+
75+
/// A memory slab holding multiple allocations.
76+
///
77+
/// This struct is actually just the slab header. The slab buffer is tail
78+
/// allocated after Slab.
79+
struct Slab {
80+
/// A single linked list of all allocated slabs.
81+
Slab *next = nullptr;
82+
83+
// Capacity and offset do not include these header fields.
84+
uint32_t capacity;
85+
uint32_t currentOffset = 0;
86+
87+
// Here starts the tail allocated memory buffer of the slab.
88+
89+
Slab(size_t newCapacity) : capacity(newCapacity) {
90+
assert((size_t)capacity == newCapacity && "capacity overflow");
91+
}
92+
93+
/// Return the payload buffer address at \p atOffset.
94+
///
95+
/// Note: it's valid to call this function on a not-yet-constructed slab.
96+
char *getAddr(size_t atOffset) {
97+
return (char *)(this + 1) + atOffset;
98+
}
99+
100+
/// Return true if this slab can fit an allocation of \p size.
101+
///
102+
/// \p size does not include the allocation header, but must include the
103+
/// overhead for guardAllocations (if enabled).
104+
inline bool canAllocate(size_t size) const {
105+
return currentOffset + Allocation::includingHeader(size) <= capacity;
106+
}
107+
108+
/// Return true, if no memory is allocated in this slab.
109+
bool isEmpty() const { return currentOffset == 0; }
110+
111+
/// Allocate \p alignedSize of bytes in this slab.
112+
///
113+
/// \p alignedSize does not include the allocation header, but must include
114+
/// the overhead for guardAllocations (if enabled).
115+
///
116+
/// Precondition: \p alignedSize must be aligned up to
117+
/// StackAllocator::alignment.
118+
/// Precondition: there must be enough space in this slab to fit the
119+
/// allocation.
120+
Allocation *allocate(size_t alignedSize, Allocation *lastAllocation) {
121+
assert(llvm::isAligned(llvm::Align(alignment), alignedSize));
122+
assert(canAllocate(alignedSize));
123+
void *buffer = getAddr(currentOffset);
124+
auto *allocation = new (buffer) Allocation(lastAllocation, this);
125+
currentOffset += Allocation::includingHeader(alignedSize);
126+
if (guardAllocations) {
127+
uintptr_t *endOfCurrentAllocation = (uintptr_t *)getAddr(currentOffset);
128+
endOfCurrentAllocation[-1] = magicEndOfAllocation;
129+
}
130+
return allocation;
131+
}
132+
133+
/// Deallocate \p allocation.
134+
///
135+
/// Precondition: \p allocation must be an allocation in this slab.
136+
void deallocate(Allocation *allocation) {
137+
assert(allocation->slab == this);
138+
if (guardAllocations) {
139+
auto *endOfAllocation = (uintptr_t *)getAddr(currentOffset);
140+
if (endOfAllocation[-1] != magicEndOfAllocation)
141+
fatalError(0, "Buffer overflow in StackAllocator");
142+
for (auto *p = (uintptr_t *)allocation; p < endOfAllocation; ++p)
143+
*p = magicUninitialized;
144+
}
145+
currentOffset = (char *)allocation - getAddr(0);
146+
}
147+
};
148+
149+
/// A single memory allocation.
150+
///
151+
/// This struct is actually just the allocation header. The allocated
152+
/// memory buffer is located after Allocation.
153+
struct Allocation {
154+
/// A single linked list of previous allocations.
155+
Allocation *previous;
156+
/// The containing slab.
157+
Slab *slab;
158+
159+
// Here starts the tail allocated memory.
160+
161+
Allocation(Allocation *previous, Slab *slab) :
162+
previous(previous), slab(slab) {}
163+
164+
void *getAllocatedMemory() {
165+
return (void *)(this + 1);
166+
}
167+
168+
/// Return \p size with the added overhead of the allocation header.
169+
static size_t includingHeader(size_t size) {
170+
return size + sizeof(Allocation);
171+
}
172+
};
173+
174+
static constexpr size_t alignment = alignof(std::max_align_t);
175+
176+
static_assert(sizeof(Slab) % StackAllocator::alignment == 0,
177+
"Slab size must be a multiple of the max allocation alignment");
178+
179+
static_assert(sizeof(Allocation) % StackAllocator::alignment == 0,
180+
"Allocation size must be a multiple of the max allocation alignment");
181+
182+
// Return a slab which is suitable to allocate \p size memory.
183+
Slab *getSlabForAllocation(size_t size) {
184+
Slab *slab = (lastAllocation ? lastAllocation->slab : firstSlab);
185+
if (slab) {
186+
// Is there enough space in the current slab?
187+
if (slab->canAllocate(size))
188+
return slab;
189+
190+
// Is there a successor slab, which we allocated before (and became free
191+
// in the meantime)?
192+
if (Slab *nextSlab = slab->next) {
193+
assert(nextSlab->isEmpty());
194+
if (nextSlab->canAllocate(size))
195+
return nextSlab;
196+
197+
// No space in the next slab. Although it's empty, the size exceeds its
198+
// capacity.
199+
// As we have to allocate a new slab anyway, free all successor slabs
200+
// and allocate a new one with the accumulated capacity.
201+
size_t alreadyAllocatedCapacity = freeAllSlabs(slab->next);
202+
size = std::max(size, alreadyAllocatedCapacity);
203+
}
204+
}
205+
size_t capacity = std::max(SlabCapacity,
206+
Allocation::includingHeader(size));
207+
void *slabBuffer = malloc(sizeof(Slab) + capacity);
208+
Slab *newSlab = new (slabBuffer) Slab(capacity);
209+
if (slab)
210+
slab->next = newSlab;
211+
else
212+
firstSlab = newSlab;
213+
numAllocatedSlabs++;
214+
return newSlab;
215+
}
216+
217+
/// Deallocate all slabs after \p first and set \p first to null.
218+
size_t freeAllSlabs(Slab *&first) {
219+
size_t freedCapacity = 0;
220+
Slab *slab = first;
221+
first = nullptr;
222+
while (slab) {
223+
Slab *next = slab->next;
224+
freedCapacity += slab->capacity;
225+
free(slab);
226+
numAllocatedSlabs--;
227+
slab = next;
228+
}
229+
return freedCapacity;
230+
}
231+
232+
public:
233+
/// Construct a StackAllocator without a pre-allocated first slab.
234+
StackAllocator() : firstSlab(nullptr), firstSlabIsPreallocated(false) { }
235+
236+
/// Construct a StackAllocator with a pre-allocated first slab.
237+
StackAllocator(void *firstSlabBuffer, size_t bufferCapacity) {
238+
char *start = (char *)llvm::alignAddr(firstSlabBuffer, llvm::Align(alignment));
239+
char *end = (char *)firstSlabBuffer + bufferCapacity;
240+
assert(start + sizeof(Slab) <= end && "buffer for first slab too small");
241+
firstSlab = new (start) Slab(end - start - sizeof(Slab));
242+
firstSlabIsPreallocated = true;
243+
}
244+
245+
~StackAllocator() {
246+
if (lastAllocation)
247+
fatalError(0, "not all allocations are deallocated");
248+
(void)freeAllSlabs(firstSlabIsPreallocated ? firstSlab->next : firstSlab);
249+
assert(getNumAllocatedSlabs() == 0);
250+
}
251+
252+
/// Allocate a memory buffer of \p size.
253+
void *alloc(size_t size) {
254+
if (guardAllocations)
255+
size += sizeof(uintptr_t);
256+
size_t alignedSize = llvm::alignTo(size, llvm::Align(alignment));
257+
Slab *slab = getSlabForAllocation(alignedSize);
258+
Allocation *allocation = slab->allocate(alignedSize, lastAllocation);
259+
lastAllocation = allocation;
260+
assert(llvm::isAddrAligned(llvm::Align(alignment),
261+
allocation->getAllocatedMemory()));
262+
return allocation->getAllocatedMemory();
263+
}
264+
265+
/// Deallocate memory \p ptr.
266+
void dealloc(void *ptr) {
267+
if (!lastAllocation || lastAllocation->getAllocatedMemory() != ptr)
268+
fatalError(0, "freed pointer was not the last allocation");
269+
270+
Allocation *prev = lastAllocation->previous;
271+
lastAllocation->slab->deallocate(lastAllocation);
272+
lastAllocation = prev;
273+
}
274+
275+
/// For unit testing.
276+
int getNumAllocatedSlabs() { return numAllocatedSlabs; }
277+
};
278+
279+
} // namespace swift
280+

unittests/runtime/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@ if(("${SWIFT_HOST_VARIANT_SDK}" STREQUAL "${SWIFT_PRIMARY_VARIANT_SDK}") AND
7979
Enum.cpp
8080
Refcounting.cpp
8181
Stdlib.cpp
82+
StackAllocator.cpp
8283
${PLATFORM_SOURCES}
8384

8485
# The runtime tests link to internal runtime symbols, which aren't exported

0 commit comments

Comments
 (0)