Skip to content

Commit 5d35433

Browse files
Philipp Brüschweilercopybara-github
authored andcommitted
Add a method to drain items out of the transfer cache if it's not used.
This will be used in the future to clean up transfer caches of L3 nodes that are no longer used. PiperOrigin-RevId: 392657424 Change-Id: Iab91fc986b515a4f7fc0f2b10d85b10cf5f694de
1 parent a2d8f3a commit 5d35433

File tree

4 files changed

+111
-1
lines changed

4 files changed

+111
-1
lines changed

tcmalloc/background.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include "tcmalloc/internal_malloc_extension.h"
2424
#include "tcmalloc/malloc_extension.h"
2525
#include "tcmalloc/parameters.h"
26+
#include "tcmalloc/static_vars.h"
2627

2728
GOOGLE_MALLOC_SECTION_BEGIN
2829
namespace tcmalloc {
@@ -133,6 +134,7 @@ void MallocExtension_Internal_ProcessBackgroundActions() {
133134
}
134135
}
135136

137+
tcmalloc::tcmalloc_internal::Static().sharded_transfer_cache().Plunder();
136138
prev_time = now;
137139
absl::SleepFor(kSleepTime);
138140
}

tcmalloc/transfer_cache.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,18 @@ class TransferCacheManager : public StaticForwarder {
9595
}
9696
}
9797

98+
// All caches which have not been modified since the last time this method has
99+
// been called will return all objects to the freelist.
100+
void Plunder() {
101+
for (int i = 0; i < kNumClasses; ++i) {
102+
if (use_ringbuffer_) {
103+
cache_[i].rbtc.TryPlunder(i);
104+
} else {
105+
cache_[i].tc.TryPlunder(i);
106+
}
107+
}
108+
}
109+
98110
// This is not const because the underlying ring-buffer transfer cache
99111
// function requires acquiring a lock.
100112
size_t tc_length(int size_class) {
@@ -197,6 +209,7 @@ struct ShardedTransferCacheManager {
197209
static constexpr void *Pop(int cl) { return nullptr; }
198210
static constexpr void Push(int cl, void *ptr) {}
199211
static constexpr size_t TotalBytes() { return 0; }
212+
static constexpr void Plunder() {}
200213
};
201214

202215
} // namespace tcmalloc_internal

tcmalloc/transfer_cache_internals.h

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ class TransferCache {
8585
lock_(absl::kConstInit, absl::base_internal::SCHEDULE_KERNEL_ONLY),
8686
max_capacity_(capacity.max_capacity),
8787
slot_info_(SizeInfo({0, capacity.capacity})),
88+
touched_(false),
8889
slots_(nullptr),
8990
freelist_do_not_access_directly_() {
9091
freelist().Init(cl);
@@ -150,6 +151,7 @@ class TransferCache {
150151
const int B = Manager::num_objects_to_move(size_class);
151152
ASSERT(0 < N && N <= B);
152153
auto info = slot_info_.load(std::memory_order_relaxed);
154+
touched_.store(true, std::memory_order_release);
153155
if (N == B) {
154156
if (info.used + N <= max_capacity_) {
155157
absl::base_internal::SpinLockHolder h(&lock_);
@@ -183,6 +185,7 @@ class TransferCache {
183185
ASSERT(N > 0);
184186
const int B = Manager::num_objects_to_move(size_class);
185187
auto info = slot_info_.load(std::memory_order_relaxed);
188+
touched_.store(true, std::memory_order_release);
186189
if (N == B) {
187190
if (info.used >= N) {
188191
absl::base_internal::SpinLockHolder h(&lock_);
@@ -208,6 +211,35 @@ class TransferCache {
208211
return freelist().RemoveRange(batch, N);
209212
}
210213

214+
// If this object has not been touched since the last attempt, then
215+
// return all objects to 'freelist()'.
216+
void TryPlunder(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
217+
if (max_capacity_ == 0) return;
218+
if (touched_.load(std::memory_order_acquire)) {
219+
touched_.store(false, std::memory_order_release);
220+
return;
221+
}
222+
// If the lock is being held, someone is modifying the cache.
223+
if (!lock_.TryLock()) return;
224+
const int B = Manager::num_objects_to_move(size_class);
225+
SizeInfo info = GetSlotInfo();
226+
while (info.used > 0) {
227+
const size_t num_to_move = std::min(B, info.used);
228+
void *buf[kMaxObjectsToMove];
229+
void **const entry = GetSlot(info.used - B);
230+
memcpy(buf, entry, sizeof(void *) * B);
231+
info.used -= num_to_move;
232+
SetSlotInfo(info);
233+
lock_.Unlock();
234+
freelist().InsertRange({buf, num_to_move});
235+
// If someone is starting to use the cache, stop doing this.
236+
if (touched_.load(std::memory_order_acquire)) return;
237+
if (!lock_.TryLock()) return;
238+
info = GetSlotInfo();
239+
}
240+
lock_.Unlock();
241+
}
242+
211243
// Returns the number of free objects in the transfer cache.
212244
size_t tc_length() const {
213245
return static_cast<size_t>(slot_info_.load(std::memory_order_relaxed).used);
@@ -371,6 +403,9 @@ class TransferCache {
371403
// INVARIANT: [0 <= slot_info_.used <= slot_info.capacity <= max_cache_slots_]
372404
std::atomic<SizeInfo> slot_info_;
373405

406+
// Has this transfer cache been changed since the last call to TryPlunder()?
407+
std::atomic<bool> touched_;
408+
374409
// Pointer to array of free objects. Use GetSlot() to get pointers to
375410
// entries.
376411
void **slots_ ABSL_GUARDED_BY(lock_);
@@ -448,6 +483,7 @@ class RingBufferTransferCache {
448483

449484
{
450485
absl::base_internal::SpinLockHolder h(&lock_);
486+
touched_ = true;
451487
RingBufferSizeInfo info = GetSlotInfo();
452488
if (info.used + N <= max_capacity_) {
453489
const bool cache_grown = MakeCacheSpace(size_class, N);
@@ -517,6 +553,7 @@ class RingBufferTransferCache {
517553

518554
{
519555
absl::base_internal::SpinLockHolder h(&lock_);
556+
touched_ = true;
520557
RingBufferSizeInfo info = GetSlotInfo();
521558
if (info.used > 0) {
522559
// Return up to however much we have in our local cache.
@@ -534,6 +571,34 @@ class RingBufferTransferCache {
534571
return freelist().RemoveRange(batch, N);
535572
}
536573

574+
// If this object has not been touched since the last attempt, then
575+
// return all objects to 'freelist()'.
576+
void TryPlunder(int size_class) ABSL_LOCKS_EXCLUDED(lock_) {
577+
if (max_capacity_ == 0) return;
578+
// If the lock is being held, someone is modifying the cache.
579+
if (!lock_.TryLock()) return;
580+
if (touched_) {
581+
touched_ = false;
582+
lock_.Unlock();
583+
return;
584+
}
585+
const int B = Manager::num_objects_to_move(size_class);
586+
while (slot_info_.used > 0) {
587+
const size_t num_to_move(std::min(B, slot_info_.used));
588+
void *buf[kMaxObjectsToMove];
589+
CopyOutOfEnd(buf, num_to_move, slot_info_);
590+
lock_.Unlock();
591+
freelist().InsertRange({buf, num_to_move});
592+
// If someone is starting to use the cache, stop doing this.
593+
if (!lock_.TryLock()) return;
594+
if (touched_) {
595+
lock_.Unlock();
596+
return;
597+
}
598+
}
599+
lock_.Unlock();
600+
}
601+
537602
// Returns the number of free objects in the central cache.
538603
size_t central_length() const { return freelist().length(); }
539604

@@ -778,6 +843,9 @@ class RingBufferTransferCache {
778843
// INVARIANT: [0 <= slot_info_.used <= slot_info.capacity <= max_cache_slots_]
779844
RingBufferSizeInfo slot_info_ ABSL_GUARDED_BY(lock_);
780845

846+
// Has this transfer cache been changed since the last call to TryPlunder()?
847+
bool touched_ ABSL_GUARDED_BY(lock_) = false;
848+
781849
// Maximum size of the cache.
782850
const int32_t max_capacity_;
783851
// This is a bitmask used instead of a modulus in the ringbuffer index

tcmalloc/transfer_cache_test.cc

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -311,6 +311,33 @@ TYPED_TEST_P(TransferCacheTest, WrappingFlex) {
311311
}
312312
}
313313

314+
TYPED_TEST_P(TransferCacheTest, Plunder) {
315+
TypeParam env;
316+
317+
// Fill in some elements so the test runs without problems.
318+
env.Insert(TypeParam::kBatchSize);
319+
env.Insert(TypeParam::kBatchSize);
320+
321+
EXPECT_CALL(env.central_freelist(), RemoveRange).Times(0);
322+
EXPECT_CALL(env.central_freelist(), InsertRange).Times(2);
323+
324+
void* buf[TypeParam::kBatchSize];
325+
326+
// Make changes and make sure plundering does not do anything.
327+
(void)env.transfer_cache().RemoveRange(kSizeClass, buf,
328+
TypeParam::kBatchSize);
329+
env.transfer_cache().TryPlunder(kSizeClass);
330+
ASSERT_GT(env.transfer_cache().tc_length(), 0);
331+
332+
env.transfer_cache().InsertRange(kSizeClass, {buf, TypeParam::kBatchSize});
333+
env.transfer_cache().TryPlunder(kSizeClass);
334+
ASSERT_GT(env.transfer_cache().tc_length(), 0);
335+
336+
// And now plunder again and confirm this time we emptied the cache.
337+
env.transfer_cache().TryPlunder(kSizeClass);
338+
ASSERT_EQ(env.transfer_cache().tc_length(), 0);
339+
}
340+
314341
// PickCoprimeBatchSize picks a batch size in [2, max_batch_size) that is
315342
// coprime with 2^32. We choose the largest possible batch size within that
316343
// constraint to minimize the number of iterations of insert/remove required.
@@ -414,7 +441,7 @@ REGISTER_TYPED_TEST_SUITE_P(TransferCacheTest, IsolatedSmoke, ReadStats,
414441
FetchesFromFreelist, PartialFetchFromFreelist,
415442
EvictsOtherCaches, PushesToFreelist, WrappingWorks,
416443
SingleItemSmoke, EvictsOtherCachesFlex,
417-
FullCacheFlex, WrappingFlex);
444+
FullCacheFlex, WrappingFlex, Plunder);
418445
template <typename Env>
419446
using FuzzTest = ::testing::Test;
420447
TYPED_TEST_SUITE_P(FuzzTest);

0 commit comments

Comments
 (0)