forked from rapidsai/rmm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrandom_allocations.cpp
More file actions
343 lines (288 loc) · 11.7 KB
/
random_allocations.cpp
File metadata and controls
343 lines (288 loc) · 11.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2026, NVIDIA CORPORATION.
* SPDX-License-Identifier: Apache-2.0
*/
#include <rmm/cuda_device.hpp>
#include <rmm/mr/arena_memory_resource.hpp>
#include <rmm/mr/binning_memory_resource.hpp>
#include <rmm/mr/cuda_async_memory_resource.hpp>
#include <rmm/mr/cuda_memory_resource.hpp>
#include <rmm/mr/per_device_resource.hpp>
#include <rmm/mr/pool_memory_resource.hpp>
#include <rmm/resource_ref.hpp>
#include <benchmark/benchmark.h>
#include <benchmarks/utilities/cxxopts.hpp>
#include <array>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#define VERBOSE 0
namespace {
constexpr std::size_t size_mb{1 << 20};
struct allocation {
void* ptr{nullptr};
std::size_t size{0};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
allocation() = default;
};
using allocation_vector = std::vector<allocation>;
allocation remove_at(allocation_vector& allocs, std::size_t index)
{
assert(index < allocs.size());
auto removed = allocs[index];
if ((allocs.size() > 1) && (index < allocs.size() - 1)) {
std::swap(allocs[index], allocs.back());
}
allocs.pop_back();
return removed;
}
template <typename SizeDistribution>
void random_allocation_free(rmm::device_async_resource_ref mr,
SizeDistribution size_distribution,
std::size_t num_allocations,
std::size_t max_usage, // in MiB
rmm::cuda_stream_view stream = {})
{
std::default_random_engine generator;
max_usage *= size_mb; // convert to bytes
constexpr int allocation_probability{73}; // percent
constexpr int max_op_chance{99};
std::uniform_int_distribution<int> op_distribution(0, max_op_chance);
std::uniform_int_distribution<std::size_t> index_distribution(0, num_allocations - 1);
std::size_t active_allocations{0};
std::size_t allocation_count{0};
allocation_vector allocations{};
std::size_t allocation_size{0};
for (std::size_t i = 0; i < num_allocations * 2; ++i) {
bool do_alloc = true;
auto size = static_cast<std::size_t>(size_distribution(generator));
if (active_allocations > 0) {
int chance = op_distribution(generator);
do_alloc = (chance < allocation_probability) && (allocation_count < num_allocations) &&
(allocation_size + size < max_usage);
}
void* ptr = nullptr;
if (do_alloc) { // try to allocate
try {
ptr = mr.allocate(stream, size);
} catch (rmm::bad_alloc const&) {
do_alloc = false;
#if VERBOSE
std::cout << "FAILED to allocate " << size << "\n";
#endif
}
}
if (do_alloc) { // alloc succeeded
allocations.emplace_back(ptr, size);
active_allocations++;
allocation_count++;
allocation_size += size;
#if VERBOSE
std::cout << active_allocations << " | " << allocation_count << " Allocating: " << size
<< " | total: " << allocation_size << "\n";
#endif
} else { // dealloc, or alloc failed
if (active_allocations > 0) {
std::size_t index = index_distribution(generator) % active_allocations;
active_allocations--;
allocation to_free = remove_at(allocations, index);
mr.deallocate(stream, to_free.ptr, to_free.size);
allocation_size -= to_free.size;
#if VERBOSE
std::cout << active_allocations << " | " << allocation_count
<< " Deallocating: " << to_free.size << " at " << index
<< " | total: " << allocation_size << "\n";
#endif
}
}
}
// std::cout << "TOTAL ALLOCATIONS: " << allocation_count << "\n";
assert(active_allocations == 0);
assert(allocations.size() == 0);
}
} // namespace
void uniform_random_allocations(
rmm::device_async_resource_ref mr,
std::size_t num_allocations, // NOLINT(bugprone-easily-swappable-parameters)
std::size_t max_allocation_size, // size in MiB
std::size_t max_usage,
rmm::cuda_stream_view stream = {})
{
std::uniform_int_distribution<std::size_t> size_distribution(1, max_allocation_size * size_mb);
random_allocation_free(mr, size_distribution, num_allocations, max_usage, stream);
}
// TODO figure out how to map a normal distribution to integers between 1 and max_allocation_size
/*void normal_random_allocations(rmm::device_async_resource_ref mr,
std::size_t num_allocations = 1000,
std::size_t mean_allocation_size = 500, // in MiB
std::size_t stddev_allocation_size = 500, // in MiB
std::size_t max_usage = 8 << 20,
cuda_stream_view stream) {
std::normal_distribution<std::size_t> size_distribution(, max_allocation_size * size_mb);
}*/
/// MR factory functions
using any_device_resource = cuda::mr::any_resource<cuda::mr::device_accessible>;
inline any_device_resource make_cuda() { return rmm::mr::cuda_memory_resource{}; }
inline any_device_resource make_cuda_async() { return rmm::mr::cuda_async_memory_resource{}; }
inline any_device_resource make_pool()
{
rmm::mr::cuda_memory_resource cuda{};
return rmm::mr::pool_memory_resource{cuda, rmm::percent_of_free_device_memory(50)};
}
inline any_device_resource make_arena()
{
auto free = rmm::available_device_memory().first;
constexpr auto reserve{64UL << 20}; // Leave some space for CUDA overhead.
return rmm::mr::arena_memory_resource{rmm::mr::get_current_device_resource_ref(), free - reserve};
}
inline any_device_resource make_binning()
{
// Add a binning_memory_resource with fixed-size bins of sizes 256, 512, 1024, 2048 and 4096KiB
// Larger allocations will use the pool resource
constexpr auto min_bin_pow2{18};
constexpr auto max_bin_pow2{22};
auto pool = make_pool();
return rmm::mr::binning_memory_resource{pool, min_bin_pow2, max_bin_pow2};
}
using MRFactoryFunc = std::function<any_device_resource()>;
constexpr std::size_t max_usage = 16000;
static void BM_RandomAllocations(benchmark::State& state, MRFactoryFunc const& factory)
{
auto mr = factory();
std::size_t num_allocations = static_cast<std::size_t>(state.range(0));
std::size_t max_size = static_cast<std::size_t>(state.range(1));
try {
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
uniform_random_allocations(mr, num_allocations, max_size, max_usage);
}
} catch (std::exception const& e) {
std::cout << "Error: " << e.what() << "\n";
}
}
static void num_range(benchmark::internal::Benchmark* bench, int size)
{
for (int num_allocations : std::vector<int>{1000, 10000, 100000}) {
bench->Args({num_allocations, size})->Unit(benchmark::kMillisecond);
}
}
static void size_range(benchmark::internal::Benchmark* bench, int num)
{
for (int max_size : std::vector<int>{1, 4, 64, 256, 1024, 4096}) {
bench->Args({num, max_size})->Unit(benchmark::kMillisecond);
}
}
static void num_size_range(benchmark::internal::Benchmark* bench)
{
for (int num_allocations : std::vector<int>{1000, 10000, 100000}) {
size_range(bench, num_allocations);
}
}
int num_allocations = -1; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
int max_size = -1; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
void benchmark_range(benchmark::internal::Benchmark* bench)
{
if (num_allocations > 0) {
if (max_size > 0) {
bench->Args({num_allocations, max_size})->Unit(benchmark::kMillisecond);
} else {
size_range(bench, num_allocations);
}
} else {
if (max_size > 0) {
num_range(bench, max_size);
} else {
num_size_range(bench);
}
}
}
void declare_benchmark(std::string const& name)
{
if (name == "cuda") {
BENCHMARK_CAPTURE(BM_RandomAllocations, cuda_mr, &make_cuda) // NOLINT
->Apply(benchmark_range);
} else if (name == "cuda_async") {
BENCHMARK_CAPTURE(BM_RandomAllocations, cuda_async_mr, &make_cuda_async) // NOLINT
->Apply(benchmark_range);
} else if (name == "binning") {
BENCHMARK_CAPTURE(BM_RandomAllocations, binning_mr, &make_binning) // NOLINT
->Apply(benchmark_range);
} else if (name == "pool") {
BENCHMARK_CAPTURE(BM_RandomAllocations, pool_mr, &make_pool) // NOLINT
->Apply(benchmark_range);
} else if (name == "arena") {
BENCHMARK_CAPTURE(BM_RandomAllocations, arena_mr, &make_arena) // NOLINT
->Apply(benchmark_range);
} else {
std::cout << "Error: invalid memory_resource name: " << name << "\n";
}
}
static void profile_random_allocations(MRFactoryFunc const& factory,
std::size_t num_allocations,
std::size_t max_size)
{
auto mr = factory();
try {
uniform_random_allocations(mr, num_allocations, max_size, max_usage);
} catch (std::exception const& e) {
std::cout << "Error: " << e.what() << "\n";
}
}
int main(int argc, char** argv)
{
try {
// benchmark::Initialize will remove GBench command line arguments it
// recognizes and leave any remaining arguments
::benchmark::Initialize(&argc, argv);
// Parse for replay arguments:
cxxopts::Options options("RMM Random Allocations Benchmark",
"Benchmarks random allocations within a size range.");
options.add_options()(
"p,profile", "Profiling mode: run once", cxxopts::value<bool>()->default_value("false"));
options.add_options()("r,resource",
"Type of memory resource",
cxxopts::value<std::string>()->default_value("pool"));
options.add_options()("n,numallocs",
"Number of allocations (default of 0 tests a range)",
cxxopts::value<int>()->default_value("1000"));
options.add_options()("m,maxsize",
"Maximum allocation size (default of 0 tests a range)",
cxxopts::value<int>()->default_value("4096"));
auto args = options.parse(argc, argv);
num_allocations = args["numallocs"].as<int>();
max_size = args["maxsize"].as<int>();
if (args.count("profile") > 0) {
std::map<std::string, MRFactoryFunc> const funcs({{"arena", &make_arena},
{"binning", &make_binning},
{"cuda", &make_cuda},
{"cuda_async", &make_cuda_async},
{"pool", &make_pool}});
auto resource = args["resource"].as<std::string>();
std::cout << "Profiling " << resource << " with " << num_allocations << " allocations of max "
<< max_size << "B\n";
profile_random_allocations(funcs.at(resource),
static_cast<std::size_t>(num_allocations),
static_cast<std::size_t>(max_size));
std::cout << "Finished\n";
} else {
if (args.count("numallocs") == 0) { // if zero reset to -1 so we benchmark over a range
num_allocations = -1;
}
if (args.count("maxsize") == 0) { // if zero reset to -1 so we benchmark over a range
max_size = -1;
}
if (args.count("resource") > 0) {
std::string mr_name = args["resource"].as<std::string>();
declare_benchmark(mr_name);
} else {
std::vector<std::string> mrs{"pool", "binning", "arena", "cuda_async", "cuda"};
std::for_each(
std::cbegin(mrs), std::cend(mrs), [](auto const& mr) { declare_benchmark(mr); });
}
::benchmark::RunSpecifiedBenchmarks();
}
} catch (std::exception const& e) {
std::cout << "Exception caught: " << e.what() << std::endl;
}
return 0;
}