Skip to content

Commit f66d0b2

Browse files
authored
Merge pull request ceph#56744 from benhanokh/ncb_async_discard_fix
os/BlueStore: NCB fix for leaked space when bdev_async_discard is ena… Reviewed-by: Igor Fedotov <[email protected]> Reviewed-by: Ronen Friedman <[email protected]> Reviewed-by: Joshua Baergen <[email protected]>
2 parents 2c1d0c7 + b37080c commit f66d0b2

File tree

4 files changed

+46
-14
lines changed

4 files changed

+46
-14
lines changed

src/blk/BlockDevice.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ class BlockDevice {
286286
virtual int flush() = 0;
287287
virtual bool try_discard(interval_set<uint64_t> &to_release, bool async=true) { return false; }
288288
virtual void discard_drain() { return; }
289-
289+
virtual void swap_discard_queued(interval_set<uint64_t>& other) { other.clear(); }
290290
// for managing buffered readers/writers
291291
virtual int invalidate_cache(uint64_t off, uint64_t len) = 0;
292292
virtual int open(const std::string& path) = 0;

src/blk/kernel/KernelDevice.cc

Lines changed: 24 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ void KernelDevice::discard_drain()
591591
{
592592
dout(10) << __func__ << dendl;
593593
std::unique_lock l(discard_lock);
594-
while (!discard_queued.empty() || discard_running) {
594+
while (!discard_queued.empty() || (discard_running > 0)) {
595595
discard_cond.wait(l);
596596
}
597597
}
@@ -731,6 +731,12 @@ void KernelDevice::_aio_thread()
731731
dout(10) << __func__ << " end" << dendl;
732732
}
733733

734+
void KernelDevice::swap_discard_queued(interval_set<uint64_t>& other)
735+
{
736+
std::unique_lock l(discard_lock);
737+
discard_queued.swap(other);
738+
}
739+
734740
void KernelDevice::_discard_thread(uint64_t tid)
735741
{
736742
dout(10) << __func__ << " thread " << tid << " start" << dendl;
@@ -755,13 +761,21 @@ void KernelDevice::_discard_thread(uint64_t tid)
755761
discard_cond.wait(l);
756762
dout(20) << __func__ << " wake" << dendl;
757763
} else {
758-
// Swap the queued discards for a local list we'll process here
759-
// without caring about thread fairness. This allows the current
760-
// thread to wait on the discard running while other threads pick
761-
// up the next-in-queue, and do the same, ultimately issuing more
762-
// discards in parallel, which is the goal.
763-
discard_processing.swap(discard_queued);
764-
discard_running = true;
764+
// Limit local processing to MAX_LOCAL_DISCARD items.
765+
// This will allow threads to work in parallel
766+
// instead of a single thread taking over the whole discard_queued.
767+
// It will also allow threads to finish in a timely manner.
768+
constexpr unsigned MAX_LOCAL_DISCARD = 32;
769+
unsigned count = 0;
770+
for (auto p = discard_queued.begin();
771+
p != discard_queued.end() && count < MAX_LOCAL_DISCARD;
772+
++p, ++count) {
773+
discard_processing.insert(p.get_start(), p.get_len());
774+
discard_queued.erase(p);
775+
}
776+
777+
// there are multiple active threads -> must use a counter instead of a flag
778+
discard_running ++;
765779
l.unlock();
766780
dout(20) << __func__ << " finishing" << dendl;
767781
for (auto p = discard_processing.begin(); p != discard_processing.end(); ++p) {
@@ -771,7 +785,8 @@ void KernelDevice::_discard_thread(uint64_t tid)
771785
discard_callback(discard_callback_priv, static_cast<void*>(&discard_processing));
772786
discard_processing.clear();
773787
l.lock();
774-
discard_running = false;
788+
discard_running --;
789+
ceph_assert(discard_running >= 0);
775790
}
776791
}
777792

src/blk/kernel/KernelDevice.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ class KernelDevice : public BlockDevice,
5555

5656
ceph::mutex discard_lock = ceph::make_mutex("KernelDevice::discard_lock");
5757
ceph::condition_variable discard_cond;
58-
bool discard_running = false;
58+
int discard_running = 0;
5959
interval_set<uint64_t> discard_queued;
6060

6161
struct AioCompletionThread : public Thread {
@@ -124,7 +124,7 @@ class KernelDevice : public BlockDevice,
124124

125125
void aio_submit(IOContext *ioc) override;
126126
void discard_drain() override;
127-
127+
void swap_discard_queued(interval_set<uint64_t>& other) override;
128128
int collect_metadata(const std::string& prefix, std::map<std::string,std::string> *pm) const override;
129129
int get_devname(std::string *s) const override {
130130
if (devname.empty()) {

src/os/bluestore/BlueStore.cc

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7788,9 +7788,26 @@ void BlueStore::_close_db()
77887788
db = nullptr;
77897789

77907790
if (do_destage && fm && fm->is_null_manager()) {
7791+
if (cct->_conf->osd_fast_shutdown) {
7792+
interval_set<uint64_t> discard_queued;
7793+
bdev->swap_discard_queued(discard_queued);
7794+
dout(10) << __func__ << "::discard_drain: size=" << discard_queued.size()
7795+
<< " num_intervals=" << discard_queued.num_intervals() << dendl;
7796+
// copy discard_queued to the allocator before storing it
7797+
for (auto p = discard_queued.begin(); p != discard_queued.end(); ++p) {
7798+
dout(20) << __func__ << "::discarded-extent=[" << p.get_start()
7799+
<< ", " << p.get_len() << "]" << dendl;
7800+
alloc->init_add_free(p.get_start(), p.get_len());
7801+
}
7802+
}
7803+
7804+
// When we reach here it is either a graceful shutdown (so can drain the full discards-queue)
7805+
// or it was a fast shutdown, but we already moved the main discards-queue to the allocator
7806+
// and only need to wait for the threads local discard_processing queues to drain
7807+
bdev->discard_drain();
77917808
int ret = store_allocator(alloc);
7792-
if (ret != 0) {
7793-
derr << __func__ << "::NCB::store_allocator() failed (continue with bitmapFreelistManager)" << dendl;
7809+
if (unlikely(ret != 0)) {
7810+
derr << __func__ << "::NCB::store_allocator() failed (we will need to rebuild it on startup)" << dendl;
77947811
}
77957812
}
77967813

0 commit comments

Comments
 (0)