27
27
#include " memory/metaspaceUtils.hpp"
28
28
#include " nmt/memBaseline.hpp"
29
29
#include " nmt/memTracker.hpp"
30
- #include " runtime/javaThread.hpp"
31
- #include " runtime/safepoint.hpp"
30
+ #include " nmt/regionsTree.inline.hpp"
32
31
33
32
/*
34
33
* Sizes are sorted in descenting order for reporting
@@ -104,38 +103,6 @@ class MallocAllocationSiteWalker : public MallocSiteWalker {
104
103
}
105
104
};
106
105
107
- // Walk all virtual memory regions for baselining
108
- class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
109
- private:
110
- typedef LinkedListImpl<ReservedMemoryRegion, AnyObj::C_HEAP, mtNMT,
111
- AllocFailStrategy::RETURN_NULL> EntryList;
112
- EntryList _virtual_memory_regions;
113
- DEBUG_ONLY (address _last_base;)
114
- public:
115
- VirtualMemoryAllocationWalker () {
116
- DEBUG_ONLY (_last_base = nullptr );
117
- }
118
-
119
- bool do_allocation_site (const ReservedMemoryRegion* rgn) {
120
- assert (rgn->base () >= _last_base, " region unordered?" );
121
- DEBUG_ONLY (_last_base = rgn->base ());
122
- if (rgn->size () > 0 ) {
123
- if (_virtual_memory_regions.add (*rgn) != nullptr ) {
124
- return true ;
125
- } else {
126
- return false ;
127
- }
128
- } else {
129
- // Ignore empty sites.
130
- return true ;
131
- }
132
- }
133
-
134
- LinkedList<ReservedMemoryRegion>* virtual_memory_allocations () {
135
- return &_virtual_memory_regions;
136
- }
137
- };
138
-
139
106
void MemBaseline::baseline_summary () {
140
107
MallocMemorySummary::snapshot (&_malloc_memory_snapshot);
141
108
VirtualMemorySummary::snapshot (&_virtual_memory_snapshot);
@@ -158,14 +125,15 @@ bool MemBaseline::baseline_allocation_sites() {
158
125
// The malloc sites are collected in size order
159
126
_malloc_sites_order = by_size;
160
127
161
- // Virtual memory allocation sites
162
- VirtualMemoryAllocationWalker virtual_memory_walker;
163
- if (!VirtualMemoryTracker::Instance::walk_virtual_memory (&virtual_memory_walker)) {
164
- return false ;
165
- }
128
+ assert (_vma_allocations == nullptr , " must" );
166
129
167
- // Virtual memory allocations are collected in call stack order
168
- _virtual_memory_allocations.move (virtual_memory_walker.virtual_memory_allocations ());
130
+ {
131
+ MemTracker::NmtVirtualMemoryLocker locker;
132
+ _vma_allocations = new (mtNMT, std::nothrow) RegionsTree (*VirtualMemoryTracker::Instance::tree ());
133
+ if (_vma_allocations == nullptr ) {
134
+ return false ;
135
+ }
136
+ }
169
137
170
138
if (!aggregate_virtual_memory_allocation_sites ()) {
171
139
return false ;
@@ -202,20 +170,28 @@ int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
202
170
bool MemBaseline::aggregate_virtual_memory_allocation_sites () {
203
171
SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
204
172
205
- VirtualMemoryAllocationIterator itr = virtual_memory_allocations ();
206
- const ReservedMemoryRegion* rgn;
207
173
VirtualMemoryAllocationSite* site;
208
- while ((rgn = itr.next ()) != nullptr ) {
209
- VirtualMemoryAllocationSite tmp (*rgn->call_stack (), rgn->mem_tag ());
174
+ bool failed_oom = false ;
175
+ _vma_allocations->visit_reserved_regions ([&](ReservedMemoryRegion& rgn) {
176
+ VirtualMemoryAllocationSite tmp (*rgn.call_stack (), rgn.mem_tag ());
210
177
site = allocation_sites.find (tmp);
211
178
if (site == nullptr ) {
212
179
LinkedListNode<VirtualMemoryAllocationSite>* node =
213
180
allocation_sites.add (tmp);
214
- if (node == nullptr ) return false ;
181
+ if (node == nullptr ) {
182
+ failed_oom = true ;
183
+ return false ;
184
+ }
215
185
site = node->data ();
216
186
}
217
- site->reserve_memory (rgn->size ());
218
- site->commit_memory (VirtualMemoryTracker::Instance::committed_size (rgn));
187
+ site->reserve_memory (rgn.size ());
188
+
189
+ site->commit_memory (_vma_allocations->committed_size (rgn));
190
+ return true ;
191
+ });
192
+
193
+ if (failed_oom) {
194
+ return false ;
219
195
}
220
196
221
197
_virtual_memory_sites.move (&allocation_sites);
0 commit comments