Skip to content

Commit 077dba8

Browse files
committed
Add stacked pool benchmark
1 parent a26b734 commit 077dba8

File tree

7 files changed

+246
-20
lines changed

7 files changed

+246
-20
lines changed

benchmark/benchmark.cpp

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,39 @@ UMF_BENCHMARK_REGISTER_F(peak_alloc_benchmark, scalable_pool_uniform)
220220

221221
#endif
222222

223+
// stacked benchmarks
224+
225+
UMF_BENCHMARK_TEMPLATE_DEFINE(multiple_malloc_free_benchmark,
226+
disjoint_pool_stack_fix, fixed_alloc_size,
227+
pool_stacked_allocator<os_provider>);
228+
229+
UMF_BENCHMARK_REGISTER_F(multiple_malloc_free_benchmark,
230+
disjoint_pool_stack_fix)
231+
->Apply(&default_multiple_alloc_fix_size)
232+
->Apply(&multithreaded);
233+
234+
UMF_BENCHMARK_TEMPLATE_DEFINE(multiple_malloc_free_benchmark,
235+
disjoint_pool_stack_uniform, uniform_alloc_size,
236+
pool_stacked_allocator<os_provider>);
237+
UMF_BENCHMARK_REGISTER_F(multiple_malloc_free_benchmark,
238+
disjoint_pool_stack_uniform)
239+
->Apply(&default_multiple_alloc_uniform_size)
240+
->Apply(&multithreaded);
241+
242+
UMF_BENCHMARK_TEMPLATE_DEFINE(peak_alloc_benchmark, disjoint_pool_stack_fix,
243+
fixed_alloc_size,
244+
pool_stacked_allocator<os_provider>);
245+
UMF_BENCHMARK_REGISTER_F(peak_alloc_benchmark, disjoint_pool_stack_fix)
246+
->Apply(&default_multiple_alloc_fix_size)
247+
->Apply(&multithreaded);
248+
249+
UMF_BENCHMARK_TEMPLATE_DEFINE(peak_alloc_benchmark, disjoint_pool_stack_uniform,
250+
uniform_alloc_size,
251+
pool_stacked_allocator<os_provider>);
252+
UMF_BENCHMARK_REGISTER_F(peak_alloc_benchmark, disjoint_pool_stack_uniform)
253+
->Apply(&default_multiple_alloc_uniform_size)
254+
->Apply(&multithreaded);
255+
223256
//BENCHMARK_MAIN();
224257
int main(int argc, char **argv) {
225258
if (initAffinityMask()) {

benchmark/benchmark.hpp

Lines changed: 47 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@
7373
#include <list>
7474
#include <malloc.h>
7575
#include <random>
76+
#include <stdexcept>
7677

7778
#include <benchmark/benchmark.h>
7879
#include <umf/memory_pool.h>
@@ -209,32 +210,71 @@ class provider_allocator : public allocator_interface {
209210
// TODO: assert Pool to be a pool_interface<provider_interface>.
210211
template <typename Pool> class pool_allocator : public allocator_interface {
211212
public:
212-
unsigned SetUp(::benchmark::State &state, unsigned argPos) override {
213+
virtual unsigned SetUp(::benchmark::State &state,
214+
unsigned argPos) override {
213215
pool.SetUp(state);
214216
return argPos;
215217
}
216218

217-
void preBench(::benchmark::State &state) override { pool.preBench(state); }
218-
void postBench(::benchmark::State &state) override {
219+
virtual void preBench(::benchmark::State &state) override {
220+
pool.preBench(state);
221+
}
222+
virtual void postBench(::benchmark::State &state) override {
219223
pool.postBench(state);
220224
}
221225

222-
void TearDown(::benchmark::State &state) override { pool.TearDown(state); }
226+
virtual void TearDown(::benchmark::State &state) override {
227+
pool.TearDown(state);
228+
}
223229

224-
void *benchAlloc(size_t size) override {
230+
virtual void *benchAlloc(size_t size) override {
225231
return umfPoolMalloc(pool.pool, size);
226232
}
227233

228-
void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
234+
virtual void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
229235
umfPoolFree(pool.pool, ptr);
230236
}
231237

232238
static std::string name() { return Pool::name(); }
233239

234-
private:
240+
protected:
235241
Pool pool;
236242
};
237243

244+
template <typename Provider>
245+
class pool_stacked_allocator
246+
: public pool_allocator<disjoint_pool_stack<Provider>> {
247+
using base = pool_allocator<disjoint_pool_stack<Provider>>;
248+
249+
public:
250+
virtual void preBench([[maybe_unused]] ::benchmark::State &state) override {
251+
// we do not measure fragmentation for stack pools
252+
}
253+
virtual void
254+
postBench([[maybe_unused]] ::benchmark::State &state) override {
255+
// we do not measure fragmentation for stack pools
256+
}
257+
void *benchAlloc(size_t size) override {
258+
static thread_local int counter = 0;
259+
static auto pool_number = base::pool.pools.size();
260+
// stacked pools has limited space, so we might need a few
261+
// tries to find one with free space
262+
auto retry = pool_number;
263+
while (retry--) {
264+
void *ptr = umfPoolMalloc(
265+
base::pool.pools[(++counter % pool_number)], size);
266+
if (ptr != NULL) {
267+
return ptr;
268+
}
269+
}
270+
return NULL;
271+
}
272+
273+
void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
274+
umfFree(ptr);
275+
}
276+
};
277+
238278
template <typename Size, typename Allocator>
239279
struct benchmark_interface : public benchmark::Fixture {
240280
int parseArgs(::benchmark::State &state, int argPos) {

benchmark/benchmark_umf.hpp

Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,140 @@ struct disjoint_pool : public pool_interface<Provider> {
316316
return "disjoint_pool<" + Provider::name() + ">";
317317
}
318318
};
319+
template <typename Provider>
320+
struct disjoint_pool_stack : public disjoint_pool<Provider> {
321+
using base = disjoint_pool<Provider>;
322+
323+
std::vector<umf_memory_provider_handle_t> providers;
324+
std::vector<umf_memory_pool_handle_t> pools;
325+
std::vector<void *> pool_ptrs;
326+
327+
static constexpr size_t firstPoolSize = 2ull * 1024 * 1024 * 1024; // 2GB
328+
static constexpr size_t levels = 7;
329+
330+
void SetUp(::benchmark::State &state) {
331+
base::provider.SetUp(state);
332+
if (state.thread_index() != 0) {
333+
return;
334+
}
335+
336+
providers.push_back(base::provider.provider);
337+
base::provider.provider = NULL;
338+
339+
auto params = base::getParams(state);
340+
umf_memory_pool_handle_t rootPool = nullptr;
341+
auto umf_result = umfPoolCreate(base::getOps(state), providers[0],
342+
params.get(), 0, &rootPool);
343+
if (umf_result != UMF_RESULT_SUCCESS) {
344+
state.SkipWithError("umfPoolCreate() failed");
345+
return;
346+
}
347+
348+
pools.push_back(rootPool); // root pool
349+
350+
umf_fixed_memory_provider_params_handle_t params_fixed;
351+
umf_result = umfFixedMemoryProviderParamsCreate(
352+
&params_fixed, (void *)0x1, 0x1); // dummy
353+
354+
size_t poolSize = firstPoolSize;
355+
size_t level_start = 0;
356+
size_t level_pools = 1;
357+
358+
for (size_t level = 1; level < levels; ++level) {
359+
poolSize /=
360+
3; // split each pools for 3 parts - two for children, and third from other allocations from this pool
361+
size_t new_level_pools = level_pools * 2;
362+
363+
for (size_t parent_idx = 0; parent_idx < level_pools;
364+
++parent_idx) {
365+
umf_memory_pool_handle_t parent_pool =
366+
pools[level_start + parent_idx];
367+
368+
for (int child = 0; child < 2; ++child) {
369+
void *ptr = umfPoolMalloc(parent_pool, poolSize);
370+
if (!ptr) {
371+
state.SkipWithError("umfPoolMalloc() failed");
372+
return;
373+
}
374+
pool_ptrs.push_back(ptr);
375+
376+
umf_result = umfFixedMemoryProviderParamsSetMemory(
377+
params_fixed, ptr, poolSize);
378+
umf_memory_provider_handle_t prov;
379+
umf_result = umfMemoryProviderCreate(
380+
umfFixedMemoryProviderOps(), params_fixed, &prov);
381+
if (umf_result != UMF_RESULT_SUCCESS) {
382+
state.SkipWithError("umfMemoryProviderCreate() failed");
383+
return;
384+
}
385+
providers.push_back(prov);
386+
387+
umf_memory_pool_handle_t newPool;
388+
umf_result = umfPoolCreate(base::getOps(state), prov,
389+
params.get(), 0, &newPool);
390+
if (umf_result != UMF_RESULT_SUCCESS) {
391+
state.SkipWithError("umfPoolCreate() failed");
392+
return;
393+
}
394+
395+
pools.push_back(newPool);
396+
}
397+
}
398+
399+
level_start += level_pools;
400+
level_pools = new_level_pools;
401+
}
402+
403+
umfFixedMemoryProviderParamsDestroy(params_fixed);
404+
}
405+
406+
void TearDown(::benchmark::State &state) {
407+
if (state.thread_index() != 0) {
408+
return;
409+
}
410+
411+
size_t pool_index = pools.size();
412+
size_t provider_index = providers.size();
413+
size_t ptr_index = pool_ptrs.size();
414+
415+
// Go from last level to first (excluding level 0, root)
416+
for (int level = levels - 1; level > 0; --level) {
417+
size_t level_pools = 1ull << level; // 2^level pools
418+
419+
// Destroy pools
420+
for (size_t i = 0; i < level_pools; ++i) {
421+
--pool_index;
422+
umfPoolDestroy(pools[pool_index]);
423+
}
424+
425+
// Destroy providers and free pointers
426+
for (size_t i = 0; i < level_pools; ++i) {
427+
--provider_index;
428+
umfMemoryProviderDestroy(providers[provider_index]);
429+
430+
--ptr_index;
431+
void *ptr = pool_ptrs[ptr_index];
432+
if (ptr) {
433+
umfFree(ptr);
434+
}
435+
}
436+
}
437+
438+
// Root pool and provider
439+
umfPoolDestroy(pools[0]);
440+
umfMemoryProviderDestroy(providers[0]);
441+
442+
pools.clear();
443+
providers.clear();
444+
pool_ptrs.clear();
445+
446+
base::TearDown(state);
447+
}
448+
449+
static std::string name() {
450+
return "disjoint_pool_stacked<" + Provider::name() + ">";
451+
}
452+
};
319453

320454
#ifdef UMF_POOL_JEMALLOC_ENABLED
321455
template <typename Provider>

src/libumf.def

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ EXPORTS
133133
umfFixedMemoryProviderOps
134134
umfFixedMemoryProviderParamsCreate
135135
umfFixedMemoryProviderParamsDestroy
136+
umfFixedMemoryProviderParamsSetMemory
136137
umfLevelZeroMemoryProviderParamsSetFreePolicy
137138
umfLevelZeroMemoryProviderParamsSetDeviceOrdinal
138139
; Added in UMF_0.12

src/libumf.map

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,7 @@ UMF_0.11 {
131131
umfFixedMemoryProviderOps;
132132
umfFixedMemoryProviderParamsCreate;
133133
umfFixedMemoryProviderParamsDestroy;
134+
umfFixedMemoryProviderParamsSetMemory;
134135
umfLevelZeroMemoryProviderParamsSetFreePolicy;
135136
umfLevelZeroMemoryProviderParamsSetDeviceOrdinal;
136137
} UMF_0.10;

0 commit comments

Comments
 (0)