@@ -25,8 +25,9 @@ namespace dart {
2525// munmap will noticeably impact performance.
2626static constexpr intptr_t kPageCacheCapacity = 128 * kWordSize ;
2727static Mutex* page_cache_mutex = nullptr ;
28- static VirtualMemory* page_cache[kPageCacheCapacity ] = {nullptr };
29- static intptr_t page_cache_size = 0 ;
28+ static VirtualMemory* page_cache[2 ][kPageCacheCapacity ] = {{nullptr },
29+ {nullptr }};
30+ static intptr_t page_cache_size[2 ] = {0 , 0 };
3031
3132void Page::Init () {
3233 ASSERT (page_cache_mutex == nullptr );
@@ -35,10 +36,12 @@ void Page::Init() {
3536
3637void Page::ClearCache () {
3738 MutexLocker ml (page_cache_mutex);
38- ASSERT (page_cache_size >= 0 );
39- ASSERT (page_cache_size <= kPageCacheCapacity );
40- while (page_cache_size > 0 ) {
41- delete page_cache[--page_cache_size];
39+ for (intptr_t i = 0 ; i < 2 ; i++) {
40+ ASSERT (page_cache_size[i] >= 0 );
41+ ASSERT (page_cache_size[i] <= kPageCacheCapacity );
42+ while (page_cache_size[i] > 0 ) {
43+ delete page_cache[i][--page_cache_size[i]];
44+ }
4245 }
4346}
4447
@@ -50,12 +53,19 @@ void Page::Cleanup() {
5053
5154intptr_t Page::CachedSize () {
5255 MutexLocker ml (page_cache_mutex);
53- return page_cache_size * kPageSize ;
56+ intptr_t pages = 0 ;
57+ for (intptr_t i = 0 ; i < 2 ; i++) {
58+ pages += page_cache_size[i];
59+ }
60+ return pages * kPageSize ;
5461}
5562
5663static bool CanUseCache (uword flags) {
57- return (flags & (Page::kExecutable | Page::kImage | Page::kLarge |
58- Page::kVMIsolate )) == 0 ;
64+ return (flags & (Page::kImage | Page::kLarge | Page::kVMIsolate )) == 0 ;
65+ }
66+
67+ static intptr_t CacheIndex (uword flags) {
68+ return (flags & Page::kExecutable ) != 0 ? 1 : 0 ;
5969}
6070
6171Page* Page::Allocate (intptr_t size, uword flags) {
@@ -71,10 +81,11 @@ Page* Page::Allocate(intptr_t size, uword flags) {
7181 // cached pages are dirty.
7282 ASSERT (size == kPageSize );
7383 MutexLocker ml (page_cache_mutex);
74- ASSERT (page_cache_size >= 0 );
75- ASSERT (page_cache_size <= kPageCacheCapacity );
76- if (page_cache_size > 0 ) {
77- memory = page_cache[--page_cache_size];
84+ intptr_t index = CacheIndex (flags);
85+ ASSERT (page_cache_size[index] >= 0 );
86+ ASSERT (page_cache_size[index] <= kPageCacheCapacity );
87+ if (page_cache_size[index] > 0 ) {
88+ memory = page_cache[index][--page_cache_size[index]];
7889 }
7990 }
8091 if (memory == nullptr ) {
@@ -90,15 +101,13 @@ Page* Page::Allocate(intptr_t size, uword flags) {
90101 MSAN_UNPOISON (memory->address (), size);
91102
92103#if defined(DEBUG)
104+ // Allocation stubs check that the TLAB hasn't been corrupted.
93105 uword* cursor = reinterpret_cast <uword*>(memory->address ());
94106 uword* end = reinterpret_cast <uword*>(memory->end ());
95107 while (cursor < end) {
96108 *cursor++ = kAllocationCanary ;
97109 }
98110#endif
99- } else {
100- // We don't zap old-gen because we rely on implicit zero-initialization
101- // of large typed data arrays.
102111 }
103112
104113 Page* result = reinterpret_cast <Page*>(memory->address ());
@@ -148,7 +157,8 @@ void Page::Deallocate() {
148157
149158 LSAN_UNREGISTER_ROOT_REGION (this , sizeof (*this ));
150159
151- if (CanUseCache (flags_)) {
160+ const uword flags = flags_;
161+ if (CanUseCache (flags)) {
152162 ASSERT (memory->size () == kPageSize );
153163
154164 // Allow caching up to one new-space worth of pages to avoid the cost unmap
@@ -164,20 +174,28 @@ void Page::Deallocate() {
164174 limit = Utils::Minimum (limit, kPageCacheCapacity );
165175
166176 MutexLocker ml (page_cache_mutex);
167- ASSERT (page_cache_size >= 0 );
168- ASSERT (page_cache_size <= kPageCacheCapacity );
169- if (page_cache_size < limit) {
177+ intptr_t index = CacheIndex (flags);
178+ ASSERT (page_cache_size[index] >= 0 );
179+ ASSERT (page_cache_size[index] <= kPageCacheCapacity );
180+ if (page_cache_size[index] < limit) {
170181 intptr_t size = memory->size ();
182+ if ((flags & kExecutable ) != 0 && FLAG_write_protect_code) {
183+ // Reset to initial protection.
184+ memory->Protect (VirtualMemory::kReadWrite );
185+ }
171186#if defined(DEBUG)
172- if ((flags_ & kNew ) != 0 ) {
173- memset (memory->address (), Heap::kZapByte , size);
187+ if ((flags & kExecutable ) != 0 ) {
188+ uword* cursor = reinterpret_cast <uword*>(memory->address ());
189+ uword* end = reinterpret_cast <uword*>(memory->end ());
190+ while (cursor < end) {
191+ *cursor++ = kBreakInstructionFiller ;
192+ }
174193 } else {
175- // We don't zap old-gen because we rely on implicit zero-initialization
176- // of large typed data arrays.
194+ memset (memory->address (), Heap::kZapByte , size);
177195 }
178196#endif
179197 MSAN_POISON (memory->address (), size);
180- page_cache[page_cache_size++] = memory;
198+ page_cache[index][ page_cache_size[index] ++] = memory;
181199 memory = nullptr ;
182200 }
183201 }
0 commit comments