Skip to content

Commit 9e8393b

Browse files
committed
Initial commit
0 parents  commit 9e8393b

File tree

7 files changed

+491
-0
lines changed

7 files changed

+491
-0
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
/target
2+
/Cargo.lock

Cargo.toml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[package]
2+
name = "space_alloc"
3+
version = "0.1.0"
4+
edition = "2021"
5+
6+
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
7+
8+
[dependencies]
9+
thiserror = "1.0.38"
10+
11+
[dev-dependencies]
12+
nonzero_ext = "0.3.0"

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# space_alloc
2+
3+
General purpose space allocators.
4+
These don't actually allocate any memory, they only provide bookkeeping. Allocation methods will return [Allocation]s with an offset and size that the user can interpret in whichever way they need, for instance, GPU space division.

src/align.rs

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
use std::num::NonZeroU64;
2+
3+
/// Returns `size` as a multiple of `alignment`. See [align].
4+
pub fn align_nonzero(alignment: NonZeroU64, size: NonZeroU64) -> NonZeroU64 {
5+
let alignment = alignment.get();
6+
let size = size.get();
7+
8+
// TODO: Check if this will ever be zero, and if so, in which circunstances
9+
NonZeroU64::new((size + alignment - 1) & !(alignment - 1)).unwrap()
10+
}
11+
12+
/// Returns `size` as a multiple of `alignment`. Same as `u64::next_multiple_of`.
13+
// TODO: Replace this with `next_multiple_of` once it is stabilized.
14+
// https://github.com/rust-lang/rust/issues/88581
15+
pub fn align(alignment: u64, size: u64) -> u64 {
16+
(size + alignment - 1) & !(alignment - 1)
17+
}

src/buddy.rs

Lines changed: 330 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,330 @@
1+
use std::num::NonZeroU64;
2+
3+
use crate::align::align_nonzero;
4+
5+
use super::OutOfMemory;
6+
7+
/// Allocation information from a `BuddyAllocator` allocation.
8+
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
9+
pub struct BuddyAllocation {
10+
offset: u64,
11+
size: NonZeroU64,
12+
index: usize,
13+
}
14+
15+
impl super::Allocation for BuddyAllocation {
16+
fn offset(&self) -> u64 {
17+
self.offset
18+
}
19+
20+
fn size(&self) -> u64 {
21+
self.size.get()
22+
}
23+
}
24+
25+
/// Bitmap-tree-based [buddy allocator], loosely based on <https://github.com/Restioson/buddy-allocator-workshop>.
26+
///
27+
/// [buddy allocator]: https://en.wikipedia.org/wiki/Buddy_memory_allocation
28+
#[derive(Clone)]
29+
pub struct BuddyAllocator {
30+
blocks: Vec<Block>,
31+
o0_size: u64,
32+
max_order: u8,
33+
}
34+
35+
impl std::fmt::Debug for BuddyAllocator {
36+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
37+
let mut string = String::new();
38+
let mut index = 0;
39+
for level in 0..=self.max_order {
40+
let order = self.max_order - level;
41+
let start_spacing = 2usize.pow(order as u32) - 1;
42+
let item_spacing = 2 * start_spacing + 1;
43+
string.extend(std::iter::repeat(' ').take(start_spacing));
44+
for _block in 0..2usize.pow(level as u32) {
45+
string.push_str(format!("{:x}", self.read_block(index)).as_str());
46+
string.extend(std::iter::repeat(' ').take(item_spacing));
47+
index += 1;
48+
}
49+
string.push('\n');
50+
}
51+
52+
f.write_str(&string)
53+
}
54+
}
55+
56+
impl BuddyAllocator {
57+
/// Creates a new buddy allocator.
58+
///
59+
/// - `max_order` specifies the max allocatable order.
60+
/// - `o0_size` specifies the physical size of a block of order 0.
61+
pub fn new(max_order: u8, o0_size: u64) -> Self {
62+
let max_level = max_order + 1;
63+
let mut blocks = vec![Block::new(0); Self::blocks_for_level(max_level)];
64+
let mut i = 0;
65+
for o in 0..max_level {
66+
let n = 1 << o;
67+
blocks[i..(i + n)].fill(Block::new(max_order - o));
68+
i += n;
69+
}
70+
BuddyAllocator {
71+
blocks,
72+
o0_size,
73+
max_order,
74+
}
75+
}
76+
77+
fn read_block(&self, i: usize) -> u8 {
78+
self.blocks[i].order
79+
}
80+
81+
fn write_block(&mut self, i: usize, new_order: u8) {
82+
self.blocks[i].order = new_order;
83+
}
84+
85+
fn update(&mut self, mut i: usize, max_level: u8) {
86+
// traverse upwards and set parent order to max of child order
87+
for _ in 0..max_level {
88+
// ensure we start from right child (we don't know if i is left or right)
89+
let i_right = (i + 1) & !1;
90+
i = Self::parent(i);
91+
let left = self.read_block(i_right - 1);
92+
let right = self.read_block(i_right);
93+
self.write_block(i, left.max(right)); // parent = max children
94+
}
95+
}
96+
97+
const fn blocks_for_level(level: u8) -> usize {
98+
((1 << level) - 1) as usize
99+
}
100+
101+
const fn left_child(i: usize) -> usize {
102+
((i + 1) << 1) - 1
103+
}
104+
105+
#[allow(dead_code)]
106+
const fn right_child(i: usize) -> usize {
107+
(((i + 1) << 1) + 1) - 1
108+
}
109+
110+
const fn parent(i: usize) -> usize {
111+
((i + 1) >> 1) - 1
112+
}
113+
114+
pub fn order_of(size: NonZeroU64, o0_size: u64) -> u8 {
115+
// SAFETY: ceil div of any number that is not zero will always be bigger than 0.
116+
let o0_blocks_needed = unsafe { NonZeroU64::new_unchecked(div_ceil(size.get(), o0_size)) };
117+
log2_ceil(o0_blocks_needed) as u8
118+
}
119+
}
120+
121+
fn div_ceil(a: u64, b: u64) -> u64 {
122+
(a + b - 1) / b
123+
}
124+
125+
pub const fn log2_ceil(x: NonZeroU64) -> u32 {
126+
u64::BITS - (x.get() - 1).leading_zeros()
127+
}
128+
129+
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
130+
struct Block {
131+
// >0: greatest order - 1 available to allocate from this subtree
132+
// (including from this block itself)
133+
// =0: allocated
134+
order: u8,
135+
}
136+
137+
impl Block {
138+
pub fn new(order: u8) -> Self {
139+
Block { order: order + 1 }
140+
}
141+
}
142+
143+
impl super::Allocator for BuddyAllocator {
144+
type Allocation = BuddyAllocation;
145+
146+
fn allocate(
147+
&mut self,
148+
size: NonZeroU64,
149+
alignment: NonZeroU64,
150+
) -> Result<Self::Allocation, OutOfMemory> {
151+
// If the alignment is a multiple of the minimum block size, then we're good to go since the
152+
// offset can be aligned
153+
// If the minimum block size is a multiple of the alignment, then we're also good to go and
154+
// we don't need to make any adjustments
155+
// Otherwise, we theorically *could* align the result, but we're leaving it unimplemented
156+
// for now
157+
assert!(
158+
self.o0_size % alignment.get() == 0 || alignment.get() % self.o0_size == 0,
159+
"buddy allocator cannot provide allocations when the alignment given is not a multiple of the minimum block size or viceversa"
160+
);
161+
162+
// Our current strategy for alignment is to allocate a block as big as a multiple of the
163+
// alignment.
164+
// This will guarantee alignment but will waste space.
165+
let aligned = align_nonzero(alignment, size);
166+
let order = Self::order_of(size, self.o0_size);
167+
168+
let root = self.read_block(0);
169+
if root == 0 || root - 1 < order {
170+
// root == 0: root is allocated, i.e., there is 0 memory left.
171+
// root - 1 < order: greatest available order cannot satisfy req allocation.
172+
return Err(OutOfMemory);
173+
}
174+
// past this point we know that there is enough memory for req allocation.
175+
176+
// start from the top.
177+
// keep going down until we hit a block that matches order.
178+
let max_level = self.max_order - order;
179+
let mut offset = 0; // physical offset into blocks
180+
let mut i = 0; // current index
181+
for level in 0..max_level {
182+
let i_parent = i;
183+
let i_left = Self::left_child(i_parent);
184+
let left = self.read_block(i_left);
185+
186+
// check if left can satsify req allocation
187+
if left != 0 && left - 1 >= order {
188+
i = i_left;
189+
} else {
190+
// otherwise, we know for certain that the right must then be able to
191+
// because the parent's order said it could (which is the max of left/right)
192+
i = i_left + 1;
193+
offset += 1 << ((self.max_order - level - 1) as u64);
194+
}
195+
}
196+
197+
self.write_block(i, 0);
198+
self.update(i, max_level);
199+
Ok(BuddyAllocation {
200+
offset: self.o0_size * offset,
201+
size: aligned,
202+
index: i,
203+
})
204+
}
205+
206+
fn from_properties(min_alloc: u64, capacity: NonZeroU64) -> Self {
207+
Self::new(Self::order_of(capacity, min_alloc), min_alloc)
208+
}
209+
}
210+
211+
impl super::Deallocator for BuddyAllocator {
212+
fn deallocate(&mut self, alloc: &Self::Allocation) {
213+
// deallocation routine is very simple because we have
214+
// the luxury of storing index with the allocation
215+
let order = Self::order_of(alloc.size, self.o0_size);
216+
self.write_block(alloc.index, order + 1);
217+
self.update(alloc.index, self.max_order - order);
218+
}
219+
}
220+
221+
#[cfg(test)]
222+
mod test {
223+
use crate::Allocator;
224+
225+
use super::BuddyAllocator;
226+
use nonzero_ext::nonzero;
227+
228+
#[test]
229+
fn empty() {
230+
let allocator = BuddyAllocator::new(4, 1);
231+
println!("{:?}", allocator);
232+
assert_eq!(
233+
allocator.blocks,
234+
vec![
235+
5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
236+
1, 1, 1
237+
]
238+
.into_iter()
239+
.map(|order| super::Block { order })
240+
.collect::<Vec<_>>()
241+
)
242+
}
243+
244+
#[test]
245+
fn order() {
246+
assert_eq!(BuddyAllocator::order_of(nonzero!(1u64), 1), 0);
247+
assert_eq!(
248+
BuddyAllocator::order_of(nonzero!(100u64), 1),
249+
// Needs at least 100 o0 blocks -> 128 -> 2^7
250+
7
251+
);
252+
assert_eq!(
253+
BuddyAllocator::order_of(nonzero!(50u64), 10),
254+
// Needs at least 5 o0 blocks -> 8 -> 2^3
255+
3
256+
);
257+
}
258+
259+
#[test]
260+
fn allocate_single() {
261+
// 2
262+
// 1 2
263+
// 0 1 1 1
264+
let mut allocator = BuddyAllocator::new(2, 1);
265+
allocator.allocate(nonzero!(1u64), nonzero!(1u64)).unwrap();
266+
println!("{:?}", allocator);
267+
assert_eq!(
268+
allocator.blocks,
269+
vec![2, 1, 2, 0, 1, 1, 1]
270+
.into_iter()
271+
.map(|order| super::Block { order })
272+
.collect::<Vec<_>>()
273+
)
274+
}
275+
276+
#[test]
277+
fn allocate_small() {
278+
// 1
279+
// 0 1
280+
// 0 0 0 1
281+
let mut allocator = BuddyAllocator::new(2, 1);
282+
allocator.allocate(nonzero!(1u64), nonzero!(1u64)).unwrap();
283+
allocator.allocate(nonzero!(1u64), nonzero!(1u64)).unwrap();
284+
allocator.allocate(nonzero!(1u64), nonzero!(1u64)).unwrap();
285+
println!("{:?}", allocator);
286+
assert_eq!(
287+
allocator.blocks,
288+
vec![1, 0, 1, 0, 0, 0, 1]
289+
.into_iter()
290+
.map(|order| super::Block { order })
291+
.collect::<Vec<_>>()
292+
)
293+
}
294+
295+
#[test]
296+
fn allocate_mixed() {
297+
// 1
298+
// 1 0
299+
// 0 1 1 1
300+
let mut allocator = BuddyAllocator::new(2, 1);
301+
allocator.allocate(nonzero!(1u64), nonzero!(1u64)).unwrap();
302+
allocator.allocate(nonzero!(2u64), nonzero!(1u64)).unwrap();
303+
println!("{:?}", allocator);
304+
assert_eq!(
305+
allocator.blocks,
306+
vec![1, 1, 0, 0, 1, 1, 1]
307+
.into_iter()
308+
.map(|order| super::Block { order })
309+
.collect::<Vec<_>>()
310+
)
311+
}
312+
313+
#[test]
314+
fn allocate_aligned() {
315+
let mut allocator = BuddyAllocator::new(5, 8);
316+
let a1 = allocator.allocate(nonzero!(1u64), nonzero!(16u64)).unwrap();
317+
assert_eq!(a1.offset % 16, 0);
318+
let a2 = allocator
319+
.allocate(nonzero!(32u64), nonzero!(32u64))
320+
.unwrap();
321+
assert_eq!(a2.offset % 32, 0);
322+
let a3 = allocator
323+
.allocate(nonzero!(33u64), nonzero!(32u64))
324+
.unwrap();
325+
assert_eq!(a3.offset % 32, 0);
326+
println!("{:?}", allocator);
327+
}
328+
329+
// TODO: Test deallocation
330+
}

0 commit comments

Comments
 (0)