Skip to content

Commit 2a9b30a

Browse files
ckennellycopybara-github
authored andcommitted
Avoid roundtripping data through memory.
When we are representing page heap allocations with AllocationState, we can work directly with that until the final Spanify call. PiperOrigin-RevId: 768151285 Change-Id: Ib7d351e5f1fc4d6055debede0a725e2386173c41
1 parent c0855b9 commit 2a9b30a

File tree

2 files changed

+26
-11
lines changed

2 files changed

+26
-11
lines changed

tcmalloc/huge_page_aware_allocator.h

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -406,10 +406,7 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
406406
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
407407
using FinalizeType = Span*;
408408
#else // !TCMALLOC_INTERNAL_LEGACY_LOCKING
409-
struct FinalizeType {
410-
Range r;
411-
bool donated = false;
412-
};
409+
using FinalizeType = AllocationState;
413410
#endif // !TCMALLOC_INTERNAL_LEGACY_LOCKING
414411

415412
FinalizeType LockAndAlloc(Length n, SpanAllocInfo span_alloc_info,
@@ -441,6 +438,7 @@ class HugePageAwareAllocator final : public PageAllocatorInterface {
441438
FinalizeType Finalize(Range r);
442439

443440
Span* Spanify(FinalizeType f);
441+
Range Unspanify(FinalizeType f);
444442

445443
// Whether this HPAA should use subrelease. This delegates to the appropriate
446444
// parameter depending whether this is for the cold heap or another heap.
@@ -674,14 +672,16 @@ inline Span* HugePageAwareAllocator<Forwarder>::New(
674672
Length n, SpanAllocInfo span_alloc_info) {
675673
TC_CHECK_GT(n, Length(0));
676674
bool from_released;
677-
Span* s = Spanify(LockAndAlloc(n, span_alloc_info, &from_released));
678-
if (s) {
675+
FinalizeType f = LockAndAlloc(n, span_alloc_info, &from_released);
676+
if (f) {
677+
Range r = Unspanify(f);
679678
// Prefetch for writing, as we anticipate using the memory soon.
680-
PrefetchW(s->start_address());
679+
PrefetchW(r.p.start_addr());
681680
if (from_released) {
682-
forwarder_.Back(Range(s->first_page(), s->num_pages()));
681+
forwarder_.Back(r);
683682
}
684683
}
684+
Span* s = Spanify(f);
685685
TC_ASSERT(!s || GetMemoryTag(s->start_address()) == tag_);
686686
return s;
687687
}
@@ -726,11 +726,14 @@ inline Span* HugePageAwareAllocator<Forwarder>::NewAligned(
726726
PageHeapSpinLockHolder l;
727727
f = AllocRawHugepages(n, span_alloc_info, &from_released);
728728
}
729-
Span* s = Spanify(f);
730-
if (s && from_released) {
731-
forwarder_.Back(Range(s->first_page(), s->num_pages()));
729+
if (f && from_released) {
730+
Range r = Unspanify(f);
731+
// Prefetch for writing, as we anticipate using the memory soon.
732+
PrefetchW(r.p.start_addr());
733+
forwarder_.Back(r);
732734
}
733735

736+
Span* s = Spanify(f);
734737
TC_ASSERT(!s || GetMemoryTag(s->start_address()) == tag_);
735738
return s;
736739
}
@@ -752,6 +755,16 @@ inline Span* HugePageAwareAllocator<Forwarder>::Spanify(FinalizeType f) {
752755
#endif
753756
}
754757

758+
template <class Forwarder>
759+
inline Range HugePageAwareAllocator<Forwarder>::Unspanify(FinalizeType f) {
760+
#ifdef TCMALLOC_INTERNAL_LEGACY_LOCKING
761+
TC_ASSERT(f);
762+
return Range(f->first_page(), f->num_pages());
763+
#else
764+
return f.r;
765+
#endif
766+
}
767+
755768
template <class Forwarder>
756769
inline void HugePageAwareAllocator<Forwarder>::DeleteFromHugepage(
757770
FillerType::Tracker* pt, Range r, bool might_abandon) {

tcmalloc/page_allocator_interface.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,8 @@ class PageAllocatorInterface {
6161
struct AllocationState {
6262
Range r;
6363
bool donated;
64+
65+
operator bool() const { return ABSL_PREDICT_TRUE(r.p != PageId{0}); }
6466
};
6567

6668
virtual void Delete(AllocationState s)

0 commit comments

Comments
 (0)