27
27
#include " memory/metaspaceUtils.hpp"
28
28
#include " nmt/memBaseline.hpp"
29
29
#include " nmt/memTracker.hpp"
30
- #include " nmt/regionsTree.inline.hpp"
30
+ #include " runtime/javaThread.hpp"
31
+ #include " runtime/safepoint.hpp"
31
32
32
33
/*
33
34
* Sizes are sorted in descenting order for reporting
@@ -103,6 +104,38 @@ class MallocAllocationSiteWalker : public MallocSiteWalker {
103
104
}
104
105
};
105
106
107
+ // Walk all virtual memory regions for baselining
108
+ class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
109
+ private:
110
+ typedef LinkedListImpl<ReservedMemoryRegion, AnyObj::C_HEAP, mtNMT,
111
+ AllocFailStrategy::RETURN_NULL> EntryList;
112
+ EntryList _virtual_memory_regions;
113
+ DEBUG_ONLY (address _last_base;)
114
+ public:
115
+ VirtualMemoryAllocationWalker () {
116
+ DEBUG_ONLY (_last_base = nullptr );
117
+ }
118
+
119
+ bool do_allocation_site (const ReservedMemoryRegion* rgn) {
120
+ assert (rgn->base () >= _last_base, " region unordered?" );
121
+ DEBUG_ONLY (_last_base = rgn->base ());
122
+ if (rgn->size () > 0 ) {
123
+ if (_virtual_memory_regions.add (*rgn) != nullptr ) {
124
+ return true ;
125
+ } else {
126
+ return false ;
127
+ }
128
+ } else {
129
+ // Ignore empty sites.
130
+ return true ;
131
+ }
132
+ }
133
+
134
+ LinkedList<ReservedMemoryRegion>* virtual_memory_allocations () {
135
+ return &_virtual_memory_regions;
136
+ }
137
+ };
138
+
106
139
void MemBaseline::baseline_summary () {
107
140
MallocMemorySummary::snapshot (&_malloc_memory_snapshot);
108
141
VirtualMemorySummary::snapshot (&_virtual_memory_snapshot);
@@ -125,16 +158,15 @@ bool MemBaseline::baseline_allocation_sites() {
125
158
// The malloc sites are collected in size order
126
159
_malloc_sites_order = by_size;
127
160
128
- assert (_vma_allocations == nullptr , " must" );
129
-
130
- {
131
- MemTracker::NmtVirtualMemoryLocker locker;
132
- _vma_allocations = new (mtNMT, std::nothrow) RegionsTree (*VirtualMemoryTracker::Instance::tree ());
133
- if (_vma_allocations == nullptr ) {
134
- return false ;
135
- }
161
+ // Virtual memory allocation sites
162
+ VirtualMemoryAllocationWalker virtual_memory_walker;
163
+ if (!VirtualMemoryTracker::Instance::walk_virtual_memory (&virtual_memory_walker)) {
164
+ return false ;
136
165
}
137
166
167
+ // Virtual memory allocations are collected in call stack order
168
+ _virtual_memory_allocations.move (virtual_memory_walker.virtual_memory_allocations ());
169
+
138
170
if (!aggregate_virtual_memory_allocation_sites ()) {
139
171
return false ;
140
172
}
@@ -170,28 +202,20 @@ int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
170
202
bool MemBaseline::aggregate_virtual_memory_allocation_sites () {
171
203
SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
172
204
205
+ VirtualMemoryAllocationIterator itr = virtual_memory_allocations ();
206
+ const ReservedMemoryRegion* rgn;
173
207
VirtualMemoryAllocationSite* site;
174
- bool failed_oom = false ;
175
- _vma_allocations->visit_reserved_regions ([&](ReservedMemoryRegion& rgn) {
176
- VirtualMemoryAllocationSite tmp (*rgn.call_stack (), rgn.mem_tag ());
208
+ while ((rgn = itr.next ()) != nullptr ) {
209
+ VirtualMemoryAllocationSite tmp (*rgn->call_stack (), rgn->mem_tag ());
177
210
site = allocation_sites.find (tmp);
178
211
if (site == nullptr ) {
179
212
LinkedListNode<VirtualMemoryAllocationSite>* node =
180
213
allocation_sites.add (tmp);
181
- if (node == nullptr ) {
182
- failed_oom = true ;
183
- return false ;
184
- }
214
+ if (node == nullptr ) return false ;
185
215
site = node->data ();
186
216
}
187
- site->reserve_memory (rgn.size ());
188
-
189
- site->commit_memory (_vma_allocations->committed_size (rgn));
190
- return true ;
191
- });
192
-
193
- if (failed_oom) {
194
- return false ;
217
+ site->reserve_memory (rgn->size ());
218
+ site->commit_memory (VirtualMemoryTracker::Instance::committed_size (rgn));
195
219
}
196
220
197
221
_virtual_memory_sites.move (&allocation_sites);
0 commit comments