diff --git a/common.gypi b/common.gypi index a1cd836b1867cf..20acf954bc02d4 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.9', + 'v8_embedder_string': '-node.10', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/include/v8-statistics.h b/deps/v8/include/v8-statistics.h index 82b78f5ec65729..2dc10e0fbfd304 100644 --- a/deps/v8/include/v8-statistics.h +++ b/deps/v8/include/v8-statistics.h @@ -154,6 +154,13 @@ class V8_EXPORT HeapStatistics { size_t number_of_native_contexts() { return number_of_native_contexts_; } size_t number_of_detached_contexts() { return number_of_detached_contexts_; } + /** + * Returns the total number of bytes allocated since the Isolate was created. + * This includes all heap objects allocated in any space (new, old, code, + * etc.). + */ + uint64_t total_allocated_bytes() { return total_allocated_bytes_; } + /** * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap * garbage with a bit pattern. @@ -175,6 +182,7 @@ class V8_EXPORT HeapStatistics { size_t number_of_detached_contexts_; size_t total_global_handles_size_; size_t used_global_handles_size_; + uint64_t total_allocated_bytes_; friend class V8; friend class Isolate; diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index d8d924ac922c8d..33a4acad5f55db 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -6557,7 +6557,8 @@ HeapStatistics::HeapStatistics() peak_malloced_memory_(0), does_zap_garbage_(false), number_of_native_contexts_(0), - number_of_detached_contexts_(0) {} + number_of_detached_contexts_(0), + total_allocated_bytes_(0) {} HeapSpaceStatistics::HeapSpaceStatistics() : space_name_(nullptr), @@ -10411,6 +10412,7 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts(); heap_statistics->number_of_detached_contexts_ = heap->NumberOfDetachedContexts(); + heap_statistics->total_allocated_bytes_ = heap->GetTotalAllocatedBytes(); heap_statistics->does_zap_garbage_ = i::heap::ShouldZapGarbage(); #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc index d336338a4de40d..c458a20047f5fb 100644 --- a/deps/v8/src/heap/heap-allocator.cc +++ b/deps/v8/src/heap/heap-allocator.cc @@ -85,25 +85,42 @@ AllocationResult HeapAllocator::AllocateRawLargeInternal( int size_in_bytes, AllocationType allocation, AllocationOrigin origin, AllocationAlignment alignment, AllocationHint hint) { DCHECK_GT(size_in_bytes, heap_->MaxRegularHeapObjectSize(allocation)); + AllocationResult allocation_result; switch (allocation) { case AllocationType::kYoung: - return new_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + allocation_result = + new_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + break; case AllocationType::kOld: - return lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + allocation_result = + lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + break; case AllocationType::kCode: - return code_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + allocation_result = + code_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + break; case AllocationType::kSharedOld: - return shared_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + allocation_result = + shared_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + break; case AllocationType::kTrusted: - return trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + allocation_result = + trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes, hint); + break; case AllocationType::kSharedTrusted: - return shared_trusted_lo_space()->AllocateRaw(local_heap_, size_in_bytes, - hint); + allocation_result = shared_trusted_lo_space()->AllocateRaw( + local_heap_, size_in_bytes, hint); + break; case AllocationType::kMap: case AllocationType::kReadOnly: case AllocationType::kSharedMap: UNREACHABLE(); } + if (!allocation_result.IsFailure()) { + int allocated_size = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes); + heap_->AddTotalAllocatedBytes(allocated_size); + } + return allocation_result; } namespace { diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc index a4a4fc72263d87..3c0633e4bfb82b 100644 --- a/deps/v8/src/heap/heap.cc +++ b/deps/v8/src/heap/heap.cc @@ -7839,6 +7839,10 @@ int Heap::NextStackTraceId() { return last_id; } +uint64_t Heap::GetTotalAllocatedBytes() { + return total_allocated_bytes_.load(std::memory_order_relaxed); +} + EmbedderStackStateScope::EmbedderStackStateScope( Heap* heap, EmbedderStackStateOrigin origin, StackState stack_state) : heap_(heap), diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h index b08f80b9e82259..56dc93d1a400ba 100644 --- a/deps/v8/src/heap/heap.h +++ b/deps/v8/src/heap/heap.h @@ -1703,6 +1703,11 @@ class Heap final { bool ShouldUseBackgroundThreads() const; bool ShouldUseIncrementalMarking() const; + void AddTotalAllocatedBytes(size_t size) { + total_allocated_bytes_.fetch_add(size, std::memory_order_relaxed); + } + uint64_t GetTotalAllocatedBytes(); + HeapAllocator* allocator() { return heap_allocator_; } const HeapAllocator* allocator() const { return heap_allocator_; } @@ -2498,6 +2503,8 @@ class Heap final { // no value was provided this will be 0. uint64_t physical_memory_; + std::atomic total_allocated_bytes_ = 0; + #if defined(V8_USE_PERFETTO) perfetto::NamedTrack tracing_track_; #endif diff --git a/deps/v8/src/heap/main-allocator.cc b/deps/v8/src/heap/main-allocator.cc index ddfdb72e1d983f..95d40952041d9e 100644 --- a/deps/v8/src/heap/main-allocator.cc +++ b/deps/v8/src/heap/main-allocator.cc @@ -297,6 +297,12 @@ void MainAllocator::ResetLab(Address start, Address end, Address extended_end) { MemoryChunkMetadata::UpdateHighWaterMark(top()); } + // This is going to overestimate a bit of the total allocated bytes, since the + // LAB was not used yet. However the leftover compared to the LAB itself is + // quite small, so it seems tolerable. + if (local_heap_) { + local_heap_->heap()->AddTotalAllocatedBytes(end - start); + } allocation_info().Reset(start, end); extended_limit_ = extended_end; diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 49ee23ebee3f1a..024e8434dba7e5 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -17700,6 +17700,152 @@ TEST(GetHeapSpaceStatistics) { CHECK_EQ(total_physical_size, heap_statistics.total_physical_size()); } +UNINITIALIZED_TEST(GetHeapTotalAllocatedBytes) { + // This test is incompatible with concurrent allocation, which may occur + // while collecting the statistics and break the final `CHECK_EQ`s. + if (i::v8_flags.stress_concurrent_allocation) return; + + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + + const uint32_t number_of_elements = 1; + const uint32_t allocation_size = i::FixedArray::SizeFor(number_of_elements); + const uint32_t trusted_allocation_size = + i::TrustedFixedArray::SizeFor(number_of_elements); + const uint32_t lo_number_of_elements = 256 * 1024; + const uint32_t lo_allocation_size = + i::FixedArray::SizeFor(lo_number_of_elements); + const uint32_t trusted_lo_allocation_size = + i::TrustedFixedArray::SizeFor(lo_number_of_elements); + const uint32_t expected_allocation_size = + allocation_size * 2 + lo_allocation_size * 2 + trusted_allocation_size + + trusted_lo_allocation_size; + + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + LocalContext env(isolate); + i::Isolate* i_isolate = reinterpret_cast(isolate); + + v8::HeapStatistics heap_stats_before; + isolate->GetHeapStatistics(&heap_stats_before); + size_t initial_allocated = heap_stats_before.total_allocated_bytes(); + + i::MaybeHandle young_alloc = + i_isolate->factory()->TryNewFixedArray(number_of_elements, + i::AllocationType::kYoung); + USE(young_alloc); + i::MaybeHandle old_alloc = + i_isolate->factory()->TryNewFixedArray(number_of_elements, + i::AllocationType::kOld); + USE(old_alloc); + i::Handle trusted_alloc = + i_isolate->factory()->NewTrustedFixedArray(number_of_elements, + i::AllocationType::kTrusted); + USE(trusted_alloc); + i::MaybeHandle old_lo_alloc = + i_isolate->factory()->TryNewFixedArray(lo_number_of_elements, + i::AllocationType::kOld); + USE(old_lo_alloc); + + { + v8::HandleScope inner_handle_scope(isolate); + auto young_lo_alloc = i_isolate->factory()->TryNewFixedArray( + lo_number_of_elements, i::AllocationType::kYoung); + USE(young_lo_alloc); + } + + auto trusted_lo_alloc = i_isolate->factory()->NewTrustedFixedArray( + lo_number_of_elements, i::AllocationType::kTrusted); + USE(trusted_lo_alloc); + + v8::HeapStatistics heap_stats_after; + isolate->GetHeapStatistics(&heap_stats_after); + uint64_t final_allocated = heap_stats_after.total_allocated_bytes(); + + CHECK_GT(final_allocated, initial_allocated); + uint64_t allocated_diff = final_allocated - initial_allocated; + CHECK_GE(allocated_diff, expected_allocation_size); + + // This either tests counting happening when a LAB freed and validate + // there's no double counting on evacuated/promoted objects. + v8::internal::heap::InvokeAtomicMajorGC(i_isolate->heap()); + + v8::HeapStatistics heap_stats_after_gc; + isolate->GetHeapStatistics(&heap_stats_after_gc); + uint64_t total_allocation_after_gc = + heap_stats_after_gc.total_allocated_bytes(); + + CHECK_EQ(total_allocation_after_gc, final_allocated); + } + + isolate->Dispose(); +} + +#if V8_CAN_CREATE_SHARED_HEAP_BOOL + +UNINITIALIZED_TEST(GetHeapTotalAllocatedBytesSharedSpaces) { + // This test is incompatible with concurrent allocation, which may occur + // while collecting the statistics and break the final `CHECK_EQ`s. + if (i::v8_flags.stress_concurrent_allocation) return; + if (COMPRESS_POINTERS_IN_MULTIPLE_CAGES_BOOL) return; + + i::v8_flags.shared_heap = true; + i::FlagList::EnforceFlagImplications(); + + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + + { + v8::Isolate::Scope isolate_scope(isolate); + v8::HandleScope handle_scope(isolate); + LocalContext env(isolate); + + v8::HeapStatistics heap_stats_before; + isolate->GetHeapStatistics(&heap_stats_before); + size_t initial_allocated = heap_stats_before.total_allocated_bytes(); + + i::Isolate* i_isolate = reinterpret_cast(isolate); + + const uint32_t number_of_elements = 1; + const uint32_t allocation_size = i::FixedArray::SizeFor(number_of_elements); + const uint32_t trusted_allocation_size = + i::TrustedFixedArray::SizeFor(number_of_elements); + const uint32_t lo_number_of_elements = 256 * 1024; + const uint32_t lo_allocation_size = + i::FixedArray::SizeFor(lo_number_of_elements); + const uint32_t expected_allocation_size = + allocation_size + trusted_allocation_size + lo_allocation_size; + + i::MaybeHandle shared_alloc = + i_isolate->factory()->TryNewFixedArray(number_of_elements, + i::AllocationType::kSharedOld); + USE(shared_alloc); + i::Handle shared_trusted_alloc = + i_isolate->factory()->NewTrustedFixedArray( + number_of_elements, i::AllocationType::kSharedTrusted); + USE(shared_trusted_alloc); + i::MaybeHandle shared_lo_alloc = + i_isolate->factory()->TryNewFixedArray(lo_number_of_elements, + i::AllocationType::kSharedOld); + USE(shared_lo_alloc); + + v8::HeapStatistics heap_stats_after; + isolate->GetHeapStatistics(&heap_stats_after); + uint64_t final_allocated = heap_stats_after.total_allocated_bytes(); + + CHECK_GT(final_allocated, initial_allocated); + uint64_t allocated_diff = final_allocated - initial_allocated; + CHECK_GE(allocated_diff, expected_allocation_size); + } + + isolate->Dispose(); +} + +#endif // V8_CAN_CREATE_SHARED_HEAP_BOOL + TEST(NumberOfNativeContexts) { static const size_t kNumTestContexts = 10; i::Isolate* isolate = CcTest::i_isolate();