Skip to content

Commit 17b5a92

Browse files
committed
Implementation of a mapped cache
1 parent 16033ea commit 17b5a92

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+3445
-2693
lines changed

src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
#include "gc/z/zGlobals.hpp"
2727
#include "gc/z/zInitialize.hpp"
2828
#include "gc/z/zLargePages.inline.hpp"
29-
#include "gc/z/zPhysicalMemory.inline.hpp"
3029
#include "gc/z/zPhysicalMemoryBacking_bsd.hpp"
3130
#include "logging/log.hpp"
3231
#include "runtime/globals.hpp"

src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp

Lines changed: 7 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -627,30 +627,11 @@ bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
627627
return true;
628628
}
629629

630-
static int offset_to_node(zoffset offset) {
631-
const GrowableArray<int>* mapping = os::Linux::numa_nindex_to_node();
632-
const size_t nindex = (untype(offset) >> ZGranuleSizeShift) % mapping->length();
633-
return mapping->at((int)nindex);
634-
}
635-
636-
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t length) const {
637-
size_t committed = 0;
638-
639-
// Commit one granule at a time, so that each granule
640-
// can be allocated from a different preferred node.
641-
while (committed < length) {
642-
const zoffset granule_offset = offset + committed;
643-
644-
// Setup NUMA policy to allocate memory from a preferred node
645-
os::Linux::numa_set_preferred(offset_to_node(granule_offset));
630+
size_t ZPhysicalMemoryBacking::commit_numa_preferred(zoffset offset, size_t length, int numa_id) const {
631+
// Setup NUMA policy to allocate memory from a preferred node
632+
os::Linux::numa_set_preferred(numa_id);
646633

647-
if (!commit_inner(granule_offset, ZGranuleSize)) {
648-
// Failed
649-
break;
650-
}
651-
652-
committed += ZGranuleSize;
653-
}
634+
size_t committed = commit_default(offset, length);
654635

655636
// Restore NUMA policy
656637
os::Linux::numa_set_preferred(-1);
@@ -686,11 +667,9 @@ size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) con
686667
}
687668
}
688669

689-
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
690-
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
691-
// To get granule-level NUMA interleaving when using non-large pages,
692-
// we must explicitly interleave the memory at commit/fallocate time.
693-
return commit_numa_interleaved(offset, length);
670+
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length, int numa_id) const {
671+
if (ZNUMA::is_enabled()) {
672+
return commit_numa_preferred(offset, length, numa_id);
694673
}
695674

696675
return commit_default(offset, length);

src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class ZPhysicalMemoryBacking {
5959
ZErrno fallocate(bool punch_hole, zoffset offset, size_t length) const;
6060

6161
bool commit_inner(zoffset offset, size_t length) const;
62-
size_t commit_numa_interleaved(zoffset offset, size_t length) const;
62+
size_t commit_numa_preferred(zoffset offset, size_t length, int numa_id) const;
6363
size_t commit_default(zoffset offset, size_t length) const;
6464

6565
public:
@@ -69,7 +69,7 @@ class ZPhysicalMemoryBacking {
6969

7070
void warn_commit_limits(size_t max_capacity) const;
7171

72-
size_t commit(zoffset offset, size_t length) const;
72+
size_t commit(zoffset offset, size_t length, int numa_id) const;
7373
size_t uncommit(zoffset offset, size_t length) const;
7474

7575
void map(zaddress_unsafe addr, size_t size, zoffset offset) const;

src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp renamed to src/hotspot/os/posix/gc/z/zVirtualMemoryManager_posix.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
*/
2323

2424
#include "gc/z/zAddress.inline.hpp"
25-
#include "gc/z/zVirtualMemory.hpp"
25+
#include "gc/z/zVirtualMemoryManager.hpp"
2626
#include "logging/log.hpp"
2727

2828
#include <sys/mman.h>

src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp renamed to src/hotspot/os/windows/gc/z/zVirtualMemoryManager_windows.cpp

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@
2626
#include "gc/z/zLargePages.inline.hpp"
2727
#include "gc/z/zMapper_windows.hpp"
2828
#include "gc/z/zSyscall_windows.hpp"
29-
#include "gc/z/zVirtualMemory.inline.hpp"
29+
#include "gc/z/zValue.inline.hpp"
30+
#include "gc/z/zMemory.inline.hpp"
3031
#include "utilities/align.hpp"
3132
#include "utilities/debug.hpp"
3233

@@ -82,41 +83,41 @@ class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
8283
// Called when a memory area is returned to the memory manager but can't
8384
// be merged with an already existing area. Make sure this area is covered
8485
// by a single placeholder.
85-
static void create_callback(const ZMemory* area) {
86-
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
86+
static void create_callback(const ZMemoryRange& range) {
87+
assert(is_aligned(range.size(), ZGranuleSize), "Must be granule aligned");
8788

88-
coalesce_into_one_placeholder(area->start(), area->size());
89+
coalesce_into_one_placeholder(range.start(), range.size());
8990
}
9091

9192
// Called when a complete memory area in the memory manager is allocated.
9293
// Create granule sized placeholders for the entire area.
93-
static void destroy_callback(const ZMemory* area) {
94-
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
94+
static void destroy_callback(const ZMemoryRange& range) {
95+
assert(is_aligned(range.size(), ZGranuleSize), "Must be granule aligned");
9596

96-
split_into_granule_sized_placeholders(area->start(), area->size());
97+
split_into_granule_sized_placeholders(range.start(), range.size());
9798
}
9899

99100
// Called when a memory area is allocated at the front of an exising memory area.
100101
// Turn the first part of the memory area into granule sized placeholders.
101-
static void shrink_from_front_callback(const ZMemory* area, size_t size) {
102-
assert(area->size() > size, "Must be larger than what we try to split out");
102+
static void shrink_from_front_callback(const ZMemoryRange& range, size_t size) {
103+
assert(range.size() > size, "Must be larger than what we try to split out");
103104
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
104105

105106
// Split the area into two placeholders
106-
split_placeholder(area->start(), size);
107+
split_placeholder(range.start(), size);
107108

108109
// Split the first part into granule sized placeholders
109-
split_into_granule_sized_placeholders(area->start(), size);
110+
split_into_granule_sized_placeholders(range.start(), size);
110111
}
111112

112113
// Called when a memory area is allocated at the end of an existing memory area.
113114
// Turn the second part of the memory area into granule sized placeholders.
114-
static void shrink_from_back_callback(const ZMemory* area, size_t size) {
115-
assert(area->size() > size, "Must be larger than what we try to split out");
115+
static void shrink_from_back_callback(const ZMemoryRange& range, size_t size) {
116+
assert(range.size() > size, "Must be larger than what we try to split out");
116117
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
117118

118119
// Split the area into two placeholders
119-
const zoffset start = to_zoffset(area->end() - size);
120+
const zoffset start = to_zoffset(range.end() - size);
120121
split_placeholder(start, size);
121122

122123
// Split the second part into granule sized placeholders
@@ -125,19 +126,19 @@ class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
125126

126127
// Called when freeing a memory area and it can be merged at the start of an
127128
// existing area. Coalesce the underlying placeholders into one.
128-
static void grow_from_front_callback(const ZMemory* area, size_t size) {
129-
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
129+
static void grow_from_front_callback(const ZMemoryRange& range, size_t size) {
130+
assert(is_aligned(range.size(), ZGranuleSize), "Must be granule aligned");
130131

131-
const zoffset start = area->start() - size;
132-
coalesce_into_one_placeholder(start, area->size() + size);
132+
const zoffset start = range.start() - size;
133+
coalesce_into_one_placeholder(start, range.size() + size);
133134
}
134135

135136
// Called when freeing a memory area and it can be merged at the end of an
136137
// existing area. Coalesce the underlying placeholders into one.
137-
static void grow_from_back_callback(const ZMemory* area, size_t size) {
138-
assert(is_aligned(area->size(), ZGranuleSize), "Must be granule aligned");
138+
static void grow_from_back_callback(const ZMemoryRange& range, size_t size) {
139+
assert(is_aligned(range.size(), ZGranuleSize), "Must be granule aligned");
139140

140-
coalesce_into_one_placeholder(area->start(), area->size() + size);
141+
coalesce_into_one_placeholder(range.start(), range.size() + size);
141142
}
142143

143144
static void register_with(ZMemoryManager* manager) {
@@ -221,7 +222,7 @@ void ZVirtualMemoryManager::pd_initialize_before_reserve() {
221222
}
222223

223224
void ZVirtualMemoryManager::pd_initialize_after_reserve() {
224-
_impl->initialize_after_reserve(&_manager);
225+
_impl->initialize_after_reserve(_managers.addr(0));
225226
}
226227

227228
bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) {

src/hotspot/share/gc/z/vmStructs_z.hpp

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -83,24 +83,20 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
8383
\
8484
nonstatic_field(ZPage, _type, const ZPageType) \
8585
volatile_nonstatic_field(ZPage, _seqnum, uint32_t) \
86-
nonstatic_field(ZPage, _virtual, const ZVirtualMemory) \
86+
nonstatic_field(ZPage, _virtual, const ZMemoryRange) \
8787
volatile_nonstatic_field(ZPage, _top, zoffset_end) \
8888
\
89-
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
90-
volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \
91-
volatile_nonstatic_field(ZPageAllocator, _used, size_t) \
92-
\
9389
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
9490
\
9591
nonstatic_field(ZGranuleMapForPageTable, _map, ZPage** const) \
9692
nonstatic_field(ZGranuleMapForForwarding, _map, ZForwarding** const) \
9793
\
9894
nonstatic_field(ZForwardingTable, _map, ZGranuleMapForForwarding) \
9995
\
100-
nonstatic_field(ZVirtualMemory, _start, const zoffset) \
101-
nonstatic_field(ZVirtualMemory, _end, const zoffset_end) \
96+
nonstatic_field(ZMemoryRange, _start, const zoffset) \
97+
nonstatic_field(ZMemoryRange, _end, const zoffset_end) \
10298
\
103-
nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \
99+
nonstatic_field(ZForwarding, _virtual, const ZMemoryRange) \
104100
nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \
105101
volatile_nonstatic_field(ZForwarding, _ref_count, int) \
106102
nonstatic_field(ZForwarding, _entries, const ZAttachedArrayForForwarding) \
@@ -126,6 +122,7 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
126122
#define VM_TYPES_Z(declare_type, declare_toplevel_type, declare_integer_type) \
127123
declare_toplevel_type(zoffset) \
128124
declare_toplevel_type(zoffset_end) \
125+
declare_toplevel_type(ZMemoryRange) \
129126
declare_toplevel_type(ZGlobalsForVMStructs) \
130127
declare_type(ZCollectedHeap, CollectedHeap) \
131128
declare_toplevel_type(ZHeap) \
@@ -137,7 +134,6 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
137134
declare_toplevel_type(ZAttachedArrayForForwarding) \
138135
declare_toplevel_type(ZGranuleMapForPageTable) \
139136
declare_toplevel_type(ZGranuleMapForForwarding) \
140-
declare_toplevel_type(ZVirtualMemory) \
141137
declare_toplevel_type(ZForwardingTable) \
142138
declare_toplevel_type(ZForwarding) \
143139
declare_toplevel_type(ZForwardingEntry) \

src/hotspot/share/gc/z/zAllocationFlags.hpp

Lines changed: 9 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -31,25 +31,22 @@
3131
// Allocation flags layout
3232
// -----------------------
3333
//
34-
// 7 2 1 0
35-
// +-----+-+-+-+
36-
// |00000|1|1|1|
37-
// +-----+-+-+-+
38-
// | | | |
39-
// | | | * 0-0 Non-Blocking Flag (1-bit)
40-
// | | |
41-
// | | * 1-1 GC Relocation Flag (1-bit)
42-
// | |
43-
// | * 2-2 Low Address Flag (1-bit)
34+
// 7 1 0
35+
// +------+-+-+
36+
// |000000|1|1|
37+
// +------+-+-+
38+
// | | |
39+
// | | * 0-0 Non-Blocking Flag (1-bit)
40+
// | |
41+
// | * 1-1 GC Relocation Flag (1-bit)
4442
// |
45-
// * 7-3 Unused (5-bits)
43+
// * 7-2 Unused (6-bits)
4644
//
4745

4846
class ZAllocationFlags {
4947
private:
5048
typedef ZBitField<uint8_t, bool, 0, 1> field_non_blocking;
5149
typedef ZBitField<uint8_t, bool, 1, 1> field_gc_relocation;
52-
typedef ZBitField<uint8_t, bool, 2, 1> field_low_address;
5350

5451
uint8_t _flags;
5552

@@ -65,21 +62,13 @@ class ZAllocationFlags {
6562
_flags |= field_gc_relocation::encode(true);
6663
}
6764

68-
void set_low_address() {
69-
_flags |= field_low_address::encode(true);
70-
}
71-
7265
bool non_blocking() const {
7366
return field_non_blocking::decode(_flags);
7467
}
7568

7669
bool gc_relocation() const {
7770
return field_gc_relocation::decode(_flags);
7871
}
79-
80-
bool low_address() const {
81-
return field_low_address::decode(_flags);
82-
}
8372
};
8473

8574
#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP

src/hotspot/share/gc/z/zForwarding.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,9 @@
2929
#include "gc/z/zForwardingEntry.hpp"
3030
#include "gc/z/zGenerationId.hpp"
3131
#include "gc/z/zLock.hpp"
32+
#include "gc/z/zMemory.hpp"
3233
#include "gc/z/zPageAge.hpp"
3334
#include "gc/z/zPageType.hpp"
34-
#include "gc/z/zVirtualMemory.hpp"
3535

3636
class ObjectClosure;
3737
class ZForwardingAllocator;
@@ -55,7 +55,7 @@ class ZForwarding {
5555
typedef ZAttachedArray<ZForwarding, ZForwardingEntry> AttachedArray;
5656
typedef ZArray<volatile zpointer*> PointerArray;
5757

58-
const ZVirtualMemory _virtual;
58+
const ZMemoryRange _virtual;
5959
const size_t _object_alignment_shift;
6060
const AttachedArray _entries;
6161
ZPage* const _page;

src/hotspot/share/gc/z/zForwarding.inline.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
#include "gc/z/zLock.inline.hpp"
3636
#include "gc/z/zPage.inline.hpp"
3737
#include "gc/z/zUtils.inline.hpp"
38-
#include "gc/z/zVirtualMemory.inline.hpp"
38+
#include "gc/z/zMemory.inline.hpp"
3939
#include "runtime/atomic.hpp"
4040
#include "utilities/debug.hpp"
4141
#include "utilities/powerOfTwo.hpp"

src/hotspot/share/gc/z/zGeneration.cpp

Lines changed: 10 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -194,17 +194,6 @@ void ZGeneration::select_relocation_set(ZGenerationId generation, bool promote_a
194194
for (ZPage* page; pt_iter.next(&page);) {
195195
if (!page->is_relocatable()) {
196196
// Not relocatable, don't register
197-
// Note that the seqnum can change under our feet here as the page
198-
// can be concurrently freed and recycled by a concurrent generation
199-
// collection. However this property is stable across such transitions.
200-
// If it was not relocatable before recycling, then it won't be
201-
// relocatable after it gets recycled either, as the seqnum atomically
202-
// becomes allocating for the given generation. The opposite property
203-
// also holds: if the page is relocatable, then it can't have been
204-
// concurrently freed; if it was re-allocated it would not be
205-
// relocatable, and if it was not re-allocated we know that it was
206-
// allocated earlier than mark start of the current generation
207-
// collection.
208197
continue;
209198
}
210199

@@ -217,15 +206,14 @@ void ZGeneration::select_relocation_set(ZGenerationId generation, bool promote_a
217206

218207
// Reclaim empty pages in bulk
219208

220-
// An active iterator blocks immediate recycle and delete of pages.
221-
// The intent it to allow the code that iterates over the pages to
222-
// safely read the properties of the pages without them being changed
223-
// by another thread. However, this function both iterates over the
224-
// pages AND frees/recycles them. We "yield" the iterator, so that we
225-
// can perform immediate recycling (as long as no other thread is
226-
// iterating over the pages). The contract is that the pages that are
227-
// about to be freed are "owned" by this thread, and no other thread
228-
// will change their states.
209+
// An active iterator blocks immediate deletion of pages. The intent is
210+
// to allow the code that iterates over pages to safely read properties
211+
// of the pages without them being freed/deleted. However, this function
212+
// both iterates over the pages AND frees them. We "yield" the iterator,
213+
// so that we can perform immediate deletion (as long as no other thread
214+
// is iterating over the pages). The contract is that the pages that are
215+
// about to be freed are "owned" by this thread, and no other thread will
216+
// change their states.
229217
pt_iter.yield([&]() {
230218
free_empty_pages(&selector, 64 /* bulk */);
231219
});
@@ -938,7 +926,7 @@ void ZGenerationYoung::flip_promote(ZPage* from_page, ZPage* to_page) {
938926
_page_table->replace(from_page, to_page);
939927

940928
// Update statistics
941-
_page_allocator->promote_used(from_page->size());
929+
_page_allocator->promote_used(from_page->virtual_memory(), to_page->virtual_memory());
942930
increase_freed(from_page->size());
943931
increase_promoted(from_page->live_bytes());
944932
}
@@ -947,7 +935,7 @@ void ZGenerationYoung::in_place_relocate_promote(ZPage* from_page, ZPage* to_pag
947935
_page_table->replace(from_page, to_page);
948936

949937
// Update statistics
950-
_page_allocator->promote_used(from_page->size());
938+
_page_allocator->promote_used(from_page->virtual_memory(), to_page->virtual_memory());
951939
}
952940

953941
void ZGenerationYoung::register_flip_promoted(const ZArray<ZPage*>& pages) {

0 commit comments

Comments
 (0)