Skip to content

Commit 2666f84

Browse files
committed
Add a multiple region example.
1 parent 15e5a84 commit 2666f84

File tree

1 file changed

+183
-0
lines changed

1 file changed

+183
-0
lines changed
Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,183 @@
1+
#include "test/setup.h"
2+
3+
#include <iostream>
4+
#include <snmalloc/backend/fixedglobalconfig.h>
5+
#include <snmalloc/backend/standard_range.h>
6+
#include <snmalloc/backend_helpers/backend_helpers.h>
7+
#include <snmalloc/snmalloc.h>
8+
9+
#ifdef assert
10+
# undef assert
11+
#endif
12+
#define assert please_use_SNMALLOC_ASSERT
13+
14+
using namespace snmalloc;
15+
16+
/**
17+
* A single fixed address range allocator configuration
18+
*/
19+
template<SNMALLOC_CONCEPT(IsPAL) PAL = DefaultPal>
20+
class MultiRegionConfig final : public CommonConfig
21+
{
22+
public:
23+
using PagemapEntry = DefaultPagemapEntry;
24+
25+
private:
26+
using ConcretePagemap = FlatPagemap<MIN_CHUNK_BITS, PagemapEntry, PAL, false>;
27+
28+
using Pagemap = BasicPagemap<PAL, ConcretePagemap, PagemapEntry, false>;
29+
30+
static inline FlagWord pagemap_init_lock{};
31+
32+
public:
33+
class LocalState
34+
{
35+
public:
36+
using ObjectRange = Pipe<
37+
EmptyRange<>,
38+
LargeBuddyRange<bits::BITS - 1, bits::BITS - 1, Pagemap>,
39+
SmallBuddyRange>;
40+
41+
// Dummy impl to keep concept happy.
42+
using Stats = Pipe<EmptyRange<>, StatsRange>;
43+
44+
private:
45+
ObjectRange object_range;
46+
47+
void ensure_pagemap_init()
48+
{
49+
auto& pagemap = Pagemap::concretePagemap;
50+
if (pagemap.is_initialised())
51+
return;
52+
53+
FlagLock lock(pagemap_init_lock);
54+
55+
if (pagemap.is_initialised())
56+
return;
57+
58+
pagemap.init();
59+
}
60+
61+
public:
62+
// This should not be called.
63+
using GlobalMetaRange = EmptyRange<>;
64+
65+
// Where we get user allocations from.
66+
ObjectRange* get_object_range()
67+
{
68+
return &object_range;
69+
}
70+
71+
// Where we get meta-data allocations from.
72+
ObjectRange& get_meta_range()
73+
{
74+
// Use the object range to service meta-data requests.
75+
return object_range;
76+
}
77+
78+
LocalState(void* base, size_t size) : object_range()
79+
{
80+
// Ensure the communal pagemap is initialised.
81+
ensure_pagemap_init();
82+
83+
// Notify that pagemap requires committed memory for this range.
84+
Pagemap::register_range(address_cast(base), size);
85+
86+
// Fill the range owned by this region with memory.
87+
object_range.dealloc_range(capptr::Arena<void>::unsafe_from(base), size);
88+
}
89+
};
90+
91+
using Backend = BackendAllocator<PAL, PagemapEntry, Pagemap, LocalState>;
92+
using Pal = PAL;
93+
94+
private:
95+
public:
96+
static constexpr Flags Options{
97+
.CoreAllocOwnsLocalState = false,
98+
.CoreAllocIsPoolAllocated = false,
99+
.LocalAllocSupportsLazyInit = false};
100+
101+
static void register_clean_up() {}
102+
};
103+
104+
using CustomConfig = MultiRegionConfig<DefaultPal>;
105+
using FixedAlloc = LocalAllocator<CustomConfig>;
106+
using CoreAlloc = CoreAllocator<CustomConfig>;
107+
108+
class Region
109+
{
110+
public:
111+
FixedAlloc alloc;
112+
113+
private:
114+
CustomConfig::LocalState region_state;
115+
116+
CoreAlloc core_alloc;
117+
118+
public:
119+
Region(void* base, size_t size)
120+
: region_state(base, size),
121+
core_alloc(&alloc.get_local_cache(), &region_state)
122+
{
123+
// Bind the core_alloc into the region local allocator
124+
alloc.init(&core_alloc);
125+
}
126+
};
127+
128+
int main()
129+
{
130+
#ifndef SNMALLOC_PASS_THROUGH // Depends on snmalloc specific features
131+
setup();
132+
133+
// 28 is large enough to produce a nested allocator.
134+
// It is also large enough for the example to run in.
135+
// For 1MiB superslabs, SUPERSLAB_BITS + 4 is not big enough for the example.
136+
auto size = bits::one_at_bit(28);
137+
auto base = DefaultPal::reserve(size);
138+
DefaultPal::notify_using<NoZero>(base, size);
139+
auto end = pointer_offset(base, size);
140+
std::cout << "Allocated region " << base << " - "
141+
<< pointer_offset(base, size) << std::endl;
142+
143+
Region r(base, size);
144+
auto& a = r.alloc;
145+
146+
size_t object_size = 128;
147+
size_t count = 0;
148+
size_t i = 0;
149+
while (true)
150+
{
151+
auto r1 = a.alloc(object_size);
152+
count += object_size;
153+
i++;
154+
155+
if (i == 1024)
156+
{
157+
i = 0;
158+
std::cout << ".";
159+
}
160+
// Run until we exhaust the fixed region.
161+
// This should return null.
162+
if (r1 == nullptr)
163+
break;
164+
165+
if (base > r1)
166+
{
167+
std::cout << "Allocated: " << r1 << std::endl;
168+
abort();
169+
}
170+
if (end < r1)
171+
{
172+
std::cout << "Allocated: " << r1 << std::endl;
173+
abort();
174+
}
175+
}
176+
177+
std::cout << "Total allocated: " << count << " out of " << size << std::endl;
178+
std::cout << "Overhead: 1/" << (double)size / (double)(size - count)
179+
<< std::endl;
180+
181+
a.teardown();
182+
#endif
183+
}

0 commit comments

Comments
 (0)