|
| 1 | +// Copyright 2025, DragonflyDB authors. All rights reserved. |
| 2 | +// |
| 3 | +// See LICENSE for licensing terms. |
| 4 | +// |
| 5 | + |
| 6 | +#include "facade/disk_connection_backpressure.h" |
| 7 | + |
| 8 | +#include <absl/strings/str_cat.h> |
| 9 | + |
| 10 | +#include <string> |
| 11 | + |
| 12 | +#include "base/flags.h" |
| 13 | +#include "base/logging.h" |
| 14 | +#include "facade/facade_types.h" |
| 15 | +#include "io/io.h" |
| 16 | +#include "util/fibers/uring_file.h" |
| 17 | + |
| 18 | +using facade::operator""_MB; |
| 19 | + |
| 20 | +ABSL_FLAG(std::string, disk_backpressure_folder, "/tmp/", |
| 21 | + "Folder to store " |
| 22 | + "disk backed connection backpressure"); |
| 23 | + |
| 24 | +ABSL_FLAG(size_t, disk_backpressure_file_max_bytes, 50_MB, |
| 25 | + "Maximum size of the backing file. When max size is reached, connection will " |
| 26 | + "stop offloading backpressure to disk and block on client read."); |
| 27 | + |
| 28 | +ABSL_FLAG(size_t, disk_backpressure_load_size, 30, |
| 29 | + "How many items to load in dispatch queue from the disk backed file."); |
| 30 | + |
| 31 | +namespace facade { |
| 32 | + |
| 33 | +DiskBackedBackpressureQueue::DiskBackedBackpressureQueue(uint32_t conn_id) |
| 34 | + : max_backing_size_(absl::GetFlag(FLAGS_disk_backpressure_file_max_bytes)), |
| 35 | + max_queue_load_size_(absl::GetFlag(FLAGS_disk_backpressure_load_size)), |
| 36 | + id_(conn_id) { |
| 37 | +} |
| 38 | + |
| 39 | +std::error_code DiskBackedBackpressureQueue::Init() { |
| 40 | + std::string backing_name = absl::StrCat(absl::GetFlag(FLAGS_disk_backpressure_folder), id_); |
| 41 | + { |
| 42 | + // Kernel transparently handles buffering via the page cache. |
| 43 | + auto res = util::fb2::OpenWrite(backing_name, {} /* overwrite mode + non direct io */); |
| 44 | + if (!res) { |
| 45 | + return res.error(); |
| 46 | + } |
| 47 | + writer_.reset(*res); |
| 48 | + } |
| 49 | + |
| 50 | + auto res = util::fb2::OpenRead(backing_name); |
| 51 | + if (!res) { |
| 52 | + return res.error(); |
| 53 | + } |
| 54 | + reader_.reset(*res); |
| 55 | + |
| 56 | + VLOG(3) << "Created backing for connection " << this << " " << backing_name; |
| 57 | + |
| 58 | + return {}; |
| 59 | +} |
| 60 | + |
| 61 | +DiskBackedBackpressureQueue::~DiskBackedBackpressureQueue() { |
| 62 | + auto ec = writer_->Close(); |
| 63 | + LOG_IF(WARNING, ec) << ec.message(); |
| 64 | + ec = reader_->Close(); |
| 65 | + LOG_IF(WARNING, ec) << ec.message(); |
| 66 | +} |
| 67 | + |
| 68 | +// Check if backing file is empty, i.e. backing file has 0 bytes. |
| 69 | +bool DiskBackedBackpressureQueue::Empty() const { |
| 70 | + return total_backing_bytes_ == 0; |
| 71 | +} |
| 72 | + |
| 73 | +bool DiskBackedBackpressureQueue::HasEnoughBackingSpaceFor(size_t bytes) const { |
| 74 | + return (bytes + total_backing_bytes_) < max_backing_size_; |
| 75 | +} |
| 76 | + |
| 77 | +size_t DiskBackedBackpressureQueue::TotalInMemoryBytes() const { |
| 78 | + return offsets_.size() * sizeof(ItemOffset); |
| 79 | +} |
| 80 | + |
| 81 | +void DiskBackedBackpressureQueue::OffloadToBacking(std::string_view blob) { |
| 82 | + ItemOffset item; |
| 83 | + item.offset = next_offset_; |
| 84 | + item.total_bytes = blob.size(); |
| 85 | + |
| 86 | + // TODO we should truncate as the file grows. That way we never end up with large files |
| 87 | + // on disk. |
| 88 | + auto res = writer_->Write(blob); |
| 89 | + if (res) { |
| 90 | + VLOG(2) << "Failed to offload connection " << this << " backpressure with offset " |
| 91 | + << item.offset << " of size " << item.total_bytes << " to backing with error: " << res; |
| 92 | + return; |
| 93 | + } |
| 94 | + |
| 95 | + total_backing_bytes_ += blob.size(); |
| 96 | + offsets_.push_back(item); |
| 97 | + next_offset_ += item.total_bytes; |
| 98 | + |
| 99 | + VLOG(2) << "Offload connection " << this << " backpressure of " << item.total_bytes |
| 100 | + << " bytes to disk at offset: " << item.offset; |
| 101 | + VLOG(3) << "Command offloaded: " << blob; |
| 102 | +} |
| 103 | + |
| 104 | +void DiskBackedBackpressureQueue::LoadFromDiskToQueue(std::function<void(io::MutableBytes)> f) { |
| 105 | + std::string buffer; |
| 106 | + size_t up_to = max_queue_load_size_; |
| 107 | + |
| 108 | + while (!offsets_.empty() && up_to--) { |
| 109 | + ItemOffset item = offsets_.front(); |
| 110 | + |
| 111 | + buffer.resize(item.total_bytes); |
| 112 | + |
| 113 | + io::MutableBytes bytes{reinterpret_cast<uint8_t*>(buffer.data()), item.total_bytes}; |
| 114 | + auto result = reader_->Read(item.offset, bytes); |
| 115 | + if (!result) { |
| 116 | + LOG(ERROR) << "Could not load item at offset " << item.offset << " of size " |
| 117 | + << item.total_bytes << " from disk with error: " << result.error().value() << " " |
| 118 | + << result.error().message(); |
| 119 | + return; |
| 120 | + } |
| 121 | + |
| 122 | + VLOG(2) << "Loaded item with offset " << item.offset << " of size " << item.total_bytes |
| 123 | + << " for connection " << this; |
| 124 | + |
| 125 | + f(bytes); |
| 126 | + |
| 127 | + offsets_.pop_front(); |
| 128 | + total_backing_bytes_ -= item.total_bytes; |
| 129 | + } |
| 130 | +} |
| 131 | + |
| 132 | +} // namespace facade |
0 commit comments