|
13 | 13 | #include <numeric> |
14 | 14 | #include <variant> |
15 | 15 |
|
| 16 | +#include "absl/strings/str_split.h" |
| 17 | +#include "absl/time/clock.h" |
16 | 18 | #include "base/cycle_clock.h" |
17 | 19 | #include "base/flag_utils.h" |
18 | 20 | #include "base/flags.h" |
|
27 | 29 | #include "facade/redis_parser.h" |
28 | 30 | #include "facade/service_interface.h" |
29 | 31 | #include "facade/socket_utils.h" |
30 | | -#include "io/file.h" |
| 32 | +#include "io/io.h" |
31 | 33 | #include "util/fibers/fibers.h" |
32 | 34 | #include "util/fibers/proactor_base.h" |
33 | 35 |
|
@@ -112,6 +114,16 @@ ABSL_FLAG(uint32_t, pipeline_wait_batch_usec, 0, |
112 | 114 | "If non-zero, waits for this time for more I/O " |
113 | 115 | " events to come for the connection in case there is only one command in the pipeline. "); |
114 | 116 |
|
| 117 | +ABSL_FLAG(size_t, connection_disk_backpressure_watermark, 0, |
| 118 | + "Offload dispach queue backpressure to disk when it crosses watermark. (0 to disable)"); |
| 119 | + |
| 120 | +ABSL_FLAG(size_t, connection_disk_backpressure_file_max_bytes, 50_MB, |
| 121 | + "Maximum size of the backing file. When the watermark is reached, connection will " |
| 122 | + "stop offloading backpressure to disk"); |
| 123 | + |
| 124 | +ABSL_FLAG(size_t, connection_disk_backpressure_load_size, 30, |
| 125 | + "How many queue backpressure items to load from disk when dispatch queue is drained"); |
| 126 | + |
115 | 127 | using namespace util; |
116 | 128 | using namespace std; |
117 | 129 | using absl::GetFlag; |
@@ -434,6 +446,10 @@ size_t Connection::PipelineMessage::StorageCapacity() const { |
434 | 446 | return storage.capacity() + args.capacity(); |
435 | 447 | } |
436 | 448 |
|
| 449 | +size_t Connection::PipelineMessage::StorageBytes() const { |
| 450 | + return storage.size(); |
| 451 | +} |
| 452 | + |
437 | 453 | size_t Connection::MessageHandle::UsedMemory() const { |
438 | 454 | struct MessageSize { |
439 | 455 | size_t operator()(const PubMessagePtr& msg) { |
@@ -676,6 +692,11 @@ Connection::Connection(Protocol protocol, util::HttpListenerBase* http_listener, |
676 | 692 | #endif |
677 | 693 |
|
678 | 694 | UpdateLibNameVerMap(lib_name_, lib_ver_, +1); |
| 695 | + |
| 696 | + const size_t disk_watermark = absl::GetFlag(FLAGS_connection_disk_backpressure_watermark); |
| 697 | + if (disk_watermark) { |
| 698 | + backing_queue_ = std::make_unique<DiskBackedBackpressureQueue>(); |
| 699 | + } |
679 | 700 | } |
680 | 701 |
|
681 | 702 | Connection::~Connection() { |
@@ -1162,8 +1183,12 @@ void Connection::ConnectionFlow() { |
1162 | 1183 |
|
1163 | 1184 | void Connection::DispatchSingle(bool has_more, absl::FunctionRef<void()> invoke_cb, |
1164 | 1185 | absl::FunctionRef<MessageHandle()> cmd_msg_cb) { |
1165 | | - bool optimize_for_async = has_more; |
1166 | 1186 | QueueBackpressure& qbp = GetQueueBackpressure(); |
| 1187 | + if (OffloadBackpressureToDiskIfNeeded(cmd_msg_cb)) { |
| 1188 | + return; |
| 1189 | + } |
| 1190 | + |
| 1191 | + bool optimize_for_async = has_more; |
1167 | 1192 | if (optimize_for_async && |
1168 | 1193 | qbp.IsPipelineBufferOverLimit(stats_->dispatch_queue_bytes, dispatch_q_.size())) { |
1169 | 1194 | stats_->pipeline_throttle_count++; |
@@ -1683,12 +1708,23 @@ void Connection::AsyncFiber() { |
1683 | 1708 | QueueBackpressure& qbp = GetQueueBackpressure(); |
1684 | 1709 | while (!reply_builder_->GetError()) { |
1685 | 1710 | DCHECK_EQ(socket()->proactor(), ProactorBase::me()); |
| 1711 | + |
| 1712 | + LoadBackpressureFromDiskIfNeeded(); |
| 1713 | + |
1686 | 1714 | cnd_.wait(noop_lk, [this] { |
1687 | | - return cc_->conn_closing || (!dispatch_q_.empty() && !cc_->sync_dispatch); |
| 1715 | + return cc_->conn_closing || (!dispatch_q_.empty() && !cc_->sync_dispatch) || |
| 1716 | + disk_backpressure_available_; |
1688 | 1717 | }); |
| 1718 | + |
1689 | 1719 | if (cc_->conn_closing) |
1690 | 1720 | break; |
1691 | 1721 |
|
| 1722 | + if (disk_backpressure_available_) { |
| 1723 | + LoadBackpressureFromDiskIfNeeded(); |
| 1724 | + DCHECK(dispatch_q_.size() > 0); |
| 1725 | + disk_backpressure_available_ = false; |
| 1726 | + } |
| 1727 | + |
1692 | 1728 | // We really want to have batching in the builder if possible. This is especially |
1693 | 1729 | // critical in situations where Nagle's algorithm can introduce unwanted high |
1694 | 1730 | // latencies. However we can only batch if we're sure that there are more commands |
@@ -2254,4 +2290,173 @@ void ResetStats() { |
2254 | 2290 | io_req_size_hist->Clear(); |
2255 | 2291 | } |
2256 | 2292 |
|
| 2293 | +bool Connection::OffloadBackpressureToDiskIfNeeded(absl::FunctionRef<MessageHandle()> handle) { |
| 2294 | + // Offload only when dispatch_q_ crosses watermark or when backing queue already |
| 2295 | + // has pending items. |
| 2296 | + if (backing_queue_ && |
| 2297 | + ((dispatch_q_.size() > backing_queue_->Watermark()) || !backing_queue_->Empty())) { |
| 2298 | + auto ec = backing_queue_->Init(); |
| 2299 | + LOG_IF(ERROR, ec) << "Failed to init disk backed backpressure with error " << ec.message(); |
| 2300 | + |
| 2301 | + MessageHandle msg; |
| 2302 | + if (!ec) { |
| 2303 | + msg = handle(); |
| 2304 | + PipelineMessage* pmsg = std::get<Connection::PipelineMessagePtr>(msg.handle).get(); |
| 2305 | + if (backing_queue_->HasEnoughBackingSpace(pmsg)) { |
| 2306 | + backing_queue_->OffloadToBacking(pmsg); |
| 2307 | + if (dispatch_q_.size() == 0) { |
| 2308 | + disk_backpressure_available_ = true; |
| 2309 | + cnd_.notify_one(); |
| 2310 | + } |
| 2311 | + // Recycle message |
| 2312 | + QueueBackpressure& qbp = GetQueueBackpressure(); |
| 2313 | + if (stats_->pipeline_cmd_cache_bytes < qbp.pipeline_cache_limit) { |
| 2314 | + stats_->pipeline_cmd_cache_bytes += pmsg->StorageCapacity(); |
| 2315 | + pipeline_req_pool_.push_back( |
| 2316 | + std::move(std::get<Connection::PipelineMessagePtr>(msg.handle))); |
| 2317 | + } |
| 2318 | + // item offloaded to disk without errors, unblock connection fiber |
| 2319 | + return true; |
| 2320 | + } |
| 2321 | + LOG(WARNING) << "Disk backpressure file size limit reached. Could not offload backpressure."; |
| 2322 | + } |
| 2323 | + } |
| 2324 | + return false; |
| 2325 | +} |
| 2326 | + |
| 2327 | +void Connection::LoadBackpressureFromDiskIfNeeded() { |
| 2328 | + if (HasDiskBacked()) { |
| 2329 | + auto q_insert_cb = [this](io::MutableBytes bytes) { |
| 2330 | + PipelineMessagePtr ptr; |
| 2331 | + if (ptr = GetFromPipelinePool(); ptr) { |
| 2332 | + ptr->storage.resize(bytes.size()); |
| 2333 | + } else { |
| 2334 | + ptr = make_unique<PipelineMessage>(1, 1); |
| 2335 | + ptr->storage.resize(bytes.size()); |
| 2336 | + } |
| 2337 | + |
| 2338 | + memcpy(ptr->storage.begin(), bytes.begin(), bytes.size()); |
| 2339 | + std::string_view read{reinterpret_cast<char*>(ptr->storage.data()), bytes.size()}; |
| 2340 | + ptr->args = absl::StrSplit(read, '\0', absl::SkipEmpty()); |
| 2341 | + |
| 2342 | + SendAsync({.handle = std::move(ptr)}); |
| 2343 | + }; |
| 2344 | + |
| 2345 | + backing_queue_->LoadFromDiskToQueue(q_insert_cb); |
| 2346 | + } |
| 2347 | +} |
| 2348 | + |
| 2349 | +size_t Connection::DiskBackedBackpressureQueue::unique_id = 0; |
| 2350 | + |
| 2351 | +Connection::DiskBackedBackpressureQueue::DiskBackedBackpressureQueue() |
| 2352 | + : max_backing_size_(absl::GetFlag(FLAGS_connection_disk_backpressure_file_max_bytes)), |
| 2353 | + max_queue_load_size_(absl::GetFlag(FLAGS_connection_disk_backpressure_load_size)), |
| 2354 | + watermark_(absl::GetFlag(FLAGS_connection_disk_backpressure_watermark)) { |
| 2355 | + id_ = ++unique_id; |
| 2356 | +} |
| 2357 | + |
| 2358 | +std::error_code Connection::DiskBackedBackpressureQueue::Init() { |
| 2359 | + if (init_) { |
| 2360 | + return {}; |
| 2361 | + } |
| 2362 | + |
| 2363 | + std::string backing_name = absl::StrCat("/tmp/backing_", id_); |
| 2364 | + { |
| 2365 | + // Kernel transparently handles buffering via the page cache. |
| 2366 | + auto res = util::fb2::OpenWrite(backing_name, {} /* overwrite mode + non direct io */); |
| 2367 | + if (!res) { |
| 2368 | + return res.error(); |
| 2369 | + } |
| 2370 | + writer_.reset(*res); |
| 2371 | + } |
| 2372 | + |
| 2373 | + auto res = util::fb2::OpenRead(backing_name); |
| 2374 | + if (!res) { |
| 2375 | + return res.error(); |
| 2376 | + } |
| 2377 | + reader_.reset(*res); |
| 2378 | + |
| 2379 | + VLOG(3) << "Created backing for connection " << this << " " << backing_name; |
| 2380 | + init_ = true; |
| 2381 | + |
| 2382 | + return {}; |
| 2383 | +} |
| 2384 | + |
| 2385 | +bool Connection::DiskBackedBackpressureQueue::Empty() const { |
| 2386 | + return total_backing_bytes_ == 0; |
| 2387 | +} |
| 2388 | + |
| 2389 | +bool Connection::DiskBackedBackpressureQueue::HasEnoughBackingSpace( |
| 2390 | + const Connection::PipelineMessage* msg) const { |
| 2391 | + return (msg->StorageBytes() + total_backing_bytes_) < max_backing_size_; |
| 2392 | +} |
| 2393 | + |
| 2394 | +size_t Connection::DiskBackedBackpressureQueue::TotalInMemoryBytes() const { |
| 2395 | + return offsets_.size() * sizeof(ItemOffset); |
| 2396 | +} |
| 2397 | + |
| 2398 | +void Connection::DiskBackedBackpressureQueue::OffloadToBacking( |
| 2399 | + const Connection::PipelineMessage* msg) { |
| 2400 | + ItemOffset item; |
| 2401 | + item.offset = next_offset_; |
| 2402 | + item.total_bytes = msg->FullCommand().size(); |
| 2403 | + |
| 2404 | + size_t start = absl::GetCurrentTimeNanos(); |
| 2405 | + |
| 2406 | + // Only update for non error paths |
| 2407 | + size_t end = absl::GetCurrentTimeNanos(); |
| 2408 | + max_io_write_latency_ = std::max(max_io_write_latency_, (end - start)); |
| 2409 | + |
| 2410 | + // TODO we should truncate as the file grows. That way we never end up with large files |
| 2411 | + // on disk. |
| 2412 | + auto res = writer_->Write(msg->FullCommand()); |
| 2413 | + if (res) { |
| 2414 | + VLOG(2) << "Failed to offload connection " << this << " backpressure with offset " |
| 2415 | + << item.offset << " of size " << item.total_bytes << " to backing with error: " << res; |
| 2416 | + return; |
| 2417 | + } |
| 2418 | + |
| 2419 | + total_backing_bytes_ += msg->FullCommand().size(); |
| 2420 | + offsets_.push_back(item); |
| 2421 | + next_offset_ += item.total_bytes; |
| 2422 | + |
| 2423 | + VLOG(2) << "Offload connection " << this << " backpressure of " << item.total_bytes |
| 2424 | + << " bytes to disk at offset: " << item.offset; |
| 2425 | + VLOG(3) << "Command offloaded: " << msg->FullCommand(); |
| 2426 | +} |
| 2427 | + |
| 2428 | +template <typename F> void Connection::DiskBackedBackpressureQueue::LoadFromDiskToQueue(F f) { |
| 2429 | + std::string buffer; |
| 2430 | + size_t up_to = max_queue_load_size_; |
| 2431 | + |
| 2432 | + size_t start = absl::GetCurrentTimeNanos(); |
| 2433 | + |
| 2434 | + while (!offsets_.empty() && up_to--) { |
| 2435 | + ItemOffset item = offsets_.front(); |
| 2436 | + |
| 2437 | + buffer.resize(item.total_bytes); |
| 2438 | + |
| 2439 | + io::MutableBytes bytes{reinterpret_cast<uint8_t*>(buffer.data()), item.total_bytes}; |
| 2440 | + auto result = reader_->Read(item.offset, bytes); |
| 2441 | + if (!result) { |
| 2442 | + LOG(ERROR) << "Could not load item at offset " << item.offset << " of size " |
| 2443 | + << item.total_bytes << " from disk with error: " << result.error().value() << " " |
| 2444 | + << result.error().message(); |
| 2445 | + return; |
| 2446 | + } |
| 2447 | + |
| 2448 | + VLOG(2) << "Loaded item with offset " << item.offset << " of size " << item.total_bytes |
| 2449 | + << " for connection " << this; |
| 2450 | + |
| 2451 | + f(bytes); |
| 2452 | + |
| 2453 | + offsets_.pop_front(); |
| 2454 | + total_backing_bytes_ -= item.total_bytes; |
| 2455 | + } |
| 2456 | + |
| 2457 | + // Only update for non error paths |
| 2458 | + size_t end = absl::GetCurrentTimeNanos(); |
| 2459 | + max_io_read_latency_ = std::max(max_io_read_latency_, (end - start)); |
| 2460 | +} |
| 2461 | + |
2257 | 2462 | } // namespace facade |
0 commit comments