forked from rapidsai/rmm
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathreplay.cpp
More file actions
435 lines (383 loc) · 15.6 KB
/
replay.cpp
File metadata and controls
435 lines (383 loc) · 15.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2026, NVIDIA CORPORATION.
* SPDX-License-Identifier: Apache-2.0
*/
#include <rmm/aligned.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/logger.hpp>
#include <rmm/mr/arena_memory_resource.hpp>
#include <rmm/mr/binning_memory_resource.hpp>
#include <rmm/mr/cuda_memory_resource.hpp>
#include <rmm/mr/managed_memory_resource.hpp>
#include <rmm/mr/per_device_resource.hpp>
#include <rmm/mr/pool_memory_resource.hpp>
#include <rmm/resource_ref.hpp>
#include <cuda/iterator>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#include <benchmark/benchmark.h>
#include <benchmarks/utilities/cxxopts.hpp>
#include <benchmarks/utilities/log_parser.hpp>
#include <benchmarks/utilities/simulated_memory_resource.hpp>
#include <atomic>
#include <barrier>
#include <chrono>
#include <condition_variable>
#include <cstddef>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <thread>
using any_device_resource = cuda::mr::any_resource<cuda::mr::device_accessible>;
/// MR factory functions
any_device_resource make_cuda(std::size_t = 0) { return rmm::mr::cuda_memory_resource{}; }
any_device_resource make_managed(std::size_t = 0) { return rmm::mr::managed_memory_resource{}; }
inline any_device_resource make_pool(std::size_t simulated_size)
{
if (simulated_size > 0) {
rmm::mr::simulated_memory_resource sim{simulated_size};
return rmm::mr::pool_memory_resource{sim, simulated_size, simulated_size};
}
rmm::mr::cuda_memory_resource cuda{};
return rmm::mr::pool_memory_resource{cuda, 0};
}
inline any_device_resource make_arena(std::size_t simulated_size)
{
if (simulated_size > 0) {
return rmm::mr::arena_memory_resource{rmm::mr::get_current_device_resource_ref(),
simulated_size};
}
return rmm::mr::arena_memory_resource{rmm::mr::get_current_device_resource_ref()};
}
inline any_device_resource make_binning(std::size_t simulated_size)
{
auto pool = make_pool(simulated_size);
auto mr = rmm::mr::binning_memory_resource{pool};
const auto min_size_exp{18};
const auto max_size_exp{22};
for (std::size_t i = min_size_exp; i <= max_size_exp; i++) {
mr.add_bin(1 << i);
}
return mr;
}
using MRFactoryFunc = std::function<any_device_resource(std::size_t)>;
/**
* @brief Represents an allocation made during the replay
*
*/
struct allocation {
allocation() = default;
void* ptr{};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
std::size_t size{};
};
/**
* @brief Function object for running a replay benchmark with the specified
* memory resource.
*
* @tparam MR The type of the memory resource to use for allocation
* replay
*/
struct replay_benchmark {
MRFactoryFunc factory_;
std::size_t simulated_size_;
std::optional<any_device_resource> mr_{};
std::vector<std::vector<rmm::detail::event>> const& events_{};
// Maps a pointer from the event log to an active allocation
std::unordered_map<uintptr_t, allocation> allocation_map;
std::condition_variable cv; // to ensure in-order playback
std::mutex event_mutex; // to make event_index and allocation_map thread-safe
std::size_t event_index{0}; // playback index
std::barrier<> barrier_; // barrier to sequence resetting of event_index
/**
* @brief Construct a `replay_benchmark` from a list of events and
* set of arguments forwarded to the MR constructor.
*
* @param factory A factory function to create the memory resource
* @param events The set of allocation events to replay
* @param args Variable number of arguments forward to the constructor of MR
*/
replay_benchmark(MRFactoryFunc factory,
std::size_t simulated_size,
std::vector<std::vector<rmm::detail::event>> const& events)
: factory_{std::move(factory)},
simulated_size_{simulated_size},
events_{events},
allocation_map{events.size()},
barrier_{static_cast<std::ptrdiff_t>(events_.size())}
{
}
/**
* @brief Move construct a replay_benchmark (needed by RegisterBenchmark)
*
* Does not copy the mutex or the map
*/
replay_benchmark(replay_benchmark&& other) noexcept
: factory_{std::move(other.factory_)},
simulated_size_{other.simulated_size_},
mr_{std::move(other.mr_)},
events_{other.events_},
allocation_map{std::move(other.allocation_map)},
barrier_{static_cast<std::ptrdiff_t>(events_.size())}
{
}
~replay_benchmark() = default;
replay_benchmark(replay_benchmark const&) = delete;
replay_benchmark& operator=(replay_benchmark const&) = delete;
replay_benchmark& operator=(replay_benchmark&& other) noexcept = delete;
/// Add an allocation to the map (NOT thread safe)
void set_allocation(uintptr_t ptr, allocation alloc) { allocation_map.insert({ptr, alloc}); }
/// Remove an allocation from the map (NOT thread safe)
allocation remove_allocation(uintptr_t ptr)
{
auto iter = allocation_map.find(ptr);
if (iter != allocation_map.end()) {
allocation alloc = iter->second;
allocation_map.erase(iter);
return alloc;
}
return allocation{};
}
/// Create the memory resource shared by all threads before the benchmark runs
void SetUp(const ::benchmark::State& state)
{
if (state.thread_index() == 0) {
RMM_LOG_INFO("------ Start of Benchmark -----");
mr_.emplace(factory_(simulated_size_));
}
// Can't release threads until MR is set up.
barrier_.arrive_and_wait();
}
/// Destroy the memory resource and count any unallocated memory
void TearDown(const ::benchmark::State& state)
{
// Can't tear down the MR until every thread is done.
barrier_.arrive_and_wait();
if (state.thread_index() == 0) {
RMM_LOG_INFO("------ End of Benchmark -----");
// clean up any leaked allocations
std::size_t total_leaked{0};
std::size_t num_leaked{0};
for (auto const& ptr_alloc : allocation_map) {
auto alloc = ptr_alloc.second;
num_leaked++;
total_leaked += alloc.size;
mr_->deallocate_sync(alloc.ptr, alloc.size, rmm::CUDA_ALLOCATION_ALIGNMENT);
}
if (num_leaked > 0) {
std::cout << "LOG shows leak of " << num_leaked << " allocations of " << total_leaked
<< " total bytes\n";
}
allocation_map.clear();
mr_.reset();
}
}
/// Run the replay benchmark
void operator()(::benchmark::State& state)
{
SetUp(state);
auto const& my_events = events_.at(static_cast<std::size_t>(state.thread_index()));
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
// At start of each iteration event_index must be reset.
// Any thread could do this, but this is easy
if (state.thread_index() == 0) { event_index = 0; }
// And everyone waits for the reset.
barrier_.arrive_and_wait();
std::for_each(my_events.begin(), my_events.end(), [this](auto event) {
// ensure correct ordering between threads
std::unique_lock<std::mutex> lock{event_mutex};
if (event_index != event.index) {
cv.wait(lock, [&]() { return event_index == event.index; });
}
// rmm::detail::action::ALLOCATE_FAILURE is ignored.
if (rmm::detail::action::ALLOCATE == event.act) {
auto ptr = mr_->allocate_sync(event.size, rmm::CUDA_ALLOCATION_ALIGNMENT);
set_allocation(event.pointer, allocation{ptr, event.size});
} else if (rmm::detail::action::FREE == event.act) {
auto alloc = remove_allocation(event.pointer);
mr_->deallocate_sync(alloc.ptr, event.size, rmm::CUDA_ALLOCATION_ALIGNMENT);
}
event_index++;
cv.notify_all();
});
// Everyone waits to be done (so that the reset of the next
// iteration doesn't proceed until we're finished)
barrier_.arrive_and_wait();
}
TearDown(state);
}
};
/**
* @brief Processes a log file into a set of per-thread vectors of events
*
* @param filename Name of log file
* @return A vector of events for each thread in the log
*/
std::vector<std::vector<rmm::detail::event>> parse_per_thread_events(std::string const& filename)
{
using rmm::detail::event;
std::vector<event> all_events = rmm::detail::parse_csv(filename);
RMM_EXPECTS(std::all_of(all_events.begin(),
all_events.end(),
[](auto const& event) {
cudaStream_t custream;
memcpy(&custream, &event.stream, sizeof(cudaStream_t));
auto stream = rmm::cuda_stream_view{custream};
return stream.is_default() or stream.is_per_thread_default();
}),
"Non-default streams not currently supported.");
// Sort events by thread id
std::stable_sort(all_events.begin(), all_events.end(), [](auto lhs, auto rhs) {
return lhs.thread_id < rhs.thread_id;
});
// Count the number of events per thread
std::vector<std::size_t> events_per_thread{};
thrust::reduce_by_key(
thrust::host,
all_events.begin(),
all_events.end(),
cuda::make_constant_iterator(1),
cuda::make_discard_iterator(),
std::back_inserter(events_per_thread),
[](event const& lhs, event const& rhs) { return lhs.thread_id == rhs.thread_id; });
auto const num_threads = events_per_thread.size();
// Copy each thread's events into its own vector
std::vector<std::vector<event>> per_thread_events(num_threads);
std::transform(events_per_thread.begin(),
events_per_thread.end(),
per_thread_events.begin(),
[&all_events, offset = std::ptrdiff_t{0}](auto num_events) mutable {
auto begin = offset;
offset += static_cast<std::ptrdiff_t>(num_events);
auto end = offset;
std::vector<event> thread_events(all_events.cbegin() + begin,
all_events.cbegin() + end);
// sort into original order
std::sort(thread_events.begin(), thread_events.end(), [](auto lhs, auto rhs) {
return lhs.index < rhs.index;
});
return thread_events;
});
return per_thread_events;
}
void declare_benchmark(std::string const& name,
std::size_t simulated_size,
std::vector<std::vector<rmm::detail::event>> const& per_thread_events,
std::size_t num_threads)
{
if (name == "cuda") {
benchmark::RegisterBenchmark("CUDA Resource",
replay_benchmark(&make_cuda, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "binning") {
benchmark::RegisterBenchmark("Binning Resource",
replay_benchmark(&make_binning, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "pool") {
benchmark::RegisterBenchmark("Pool Resource",
replay_benchmark(&make_pool, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "arena") {
benchmark::RegisterBenchmark("Arena Resource",
replay_benchmark(&make_arena, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "managed") {
benchmark::RegisterBenchmark("Managed Resource",
replay_benchmark(&make_managed, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else {
std::cout << "Error: invalid memory_resource name: " << name << "\n";
}
}
// Usage: REPLAY_BENCHMARK -f "path/to/log/file"
int main(int argc, char** argv)
{
try {
// benchmark::Initialize will remove GBench command line arguments it
// recognizes and leave any remaining arguments
::benchmark::Initialize(&argc, argv);
// Parse for replay arguments:
auto args = [&argc, &argv]() {
cxxopts::Options options(
"RMM Replay Benchmark",
"Replays and benchmarks allocation activity captured from RMM logging.");
options.add_options()("f,file", "Name of RMM log file.", cxxopts::value<std::string>());
options.add_options()("r,resource",
"Type of memory resource",
cxxopts::value<std::string>()->default_value("pool"));
options.add_options()(
"s,size",
"Size of simulated GPU memory in GiB. Not supported for the cuda memory "
"resource.",
cxxopts::value<float>()->default_value("0"));
options.add_options()("v,verbose",
"Enable verbose printing of log events",
cxxopts::value<bool>()->default_value("false"));
auto args = options.parse(argc, argv);
if (args.count("file") == 0) {
std::cout << options.help() << std::endl;
exit(0);
}
return args;
}();
auto filename = args["file"].as<std::string>();
auto per_thread_events = [filename]() {
try {
auto events = parse_per_thread_events(filename);
return events;
} catch (std::exception const& e) {
std::cout << "Failed to parse events: " << e.what() << std::endl;
return std::vector<std::vector<rmm::detail::event>>{};
}
}();
#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
std::cout << "Using CUDA per-thread default stream.\n";
#endif
auto const simulated_size =
static_cast<std::size_t>(args["size"].as<float>() * static_cast<float>(1U << 30U));
if (simulated_size != 0 && args["resource"].as<std::string>() != "cuda") {
std::cout << "Simulating GPU with memory size of " << simulated_size << " bytes.\n";
}
std::cout << "Total Events: "
<< std::accumulate(
per_thread_events.begin(),
per_thread_events.end(),
0,
[](std::size_t accum, auto const& events) { return accum + events.size(); })
<< std::endl;
for (std::size_t thread = 0; thread < per_thread_events.size(); ++thread) {
std::cout << "Thread " << thread << ": " << per_thread_events[thread].size() << " events\n";
if (args["verbose"].as<bool>()) {
for (auto const& event : per_thread_events[thread]) {
std::cout << event << std::endl;
}
}
}
auto const num_threads = per_thread_events.size();
// Uncomment to enable / change default log level
// rmm::logger().set_level(rapids_logger::level_enum::trace);
if (args.count("resource") > 0) {
std::string mr_name = args["resource"].as<std::string>();
declare_benchmark(mr_name, simulated_size, per_thread_events, num_threads);
} else {
std::array<std::string, 5> mrs{"pool", "arena", "binning", "cuda", "managed"};
std::for_each(std::cbegin(mrs),
std::cend(mrs),
[&simulated_size, &per_thread_events, &num_threads](auto const& mr) {
declare_benchmark(mr, simulated_size, per_thread_events, num_threads);
});
}
::benchmark::RunSpecifiedBenchmarks();
} catch (std::exception const& e) {
std::cout << "Exception caught: " << e.what() << std::endl;
}
return 0;
}