Skip to content

Commit d4406f0

Browse files
committed
Grow GC heaps independently
[Bug #21214] If we allocate objects where one heap holds transient objects and another holds long lived objects, then the heap with transient objects will grow along the heap with long lived objects, causing higher memory usage. For example, we can see this issue in this script: def allocate_small_object = [] def allocate_large_object = Array.new(10) arys = Array.new(1_000_000) do # Allocate 10 small transient objects 10.times { allocate_small_object } # Allocate 1 large object that is persistent allocate_large_object end pp GC.stat pp GC.stat_heap Before this change: heap_live_slots: 2837243 {0 => {slot_size: 40, heap_eden_pages: 1123, heap_eden_slots: 1838807}, 2 => {slot_size: 160, heap_eden_pages: 2449, heap_eden_slots: 1001149}, } After this change: heap_live_slots: 1094474 {0 => {slot_size: 40, heap_eden_pages: 58, heap_eden_slots: 94973}, 2 => {slot_size: 160, heap_eden_pages: 2449, heap_eden_slots: 1001149}, }
1 parent 432e5fa commit d4406f0

File tree

2 files changed

+56
-41
lines changed

2 files changed

+56
-41
lines changed

gc/default/default.c

Lines changed: 35 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -804,8 +804,6 @@ heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page *
804804
#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
805805
#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
806806

807-
#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
808-
809807
#define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
810808
#define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
811809

@@ -1771,13 +1769,7 @@ heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
17711769
static void
17721770
heap_pages_free_unused_pages(rb_objspace_t *objspace)
17731771
{
1774-
size_t pages_to_keep_count =
1775-
// Get number of pages estimated for the smallest size pool
1776-
CEILDIV(objspace->heap_pages.allocatable_slots, HEAP_PAGE_OBJ_LIMIT) *
1777-
// Estimate the average slot size multiple
1778-
(1 << (HEAP_COUNT / 2));
1779-
1780-
if (objspace->empty_pages != NULL && objspace->empty_pages_count > pages_to_keep_count) {
1772+
if (objspace->empty_pages != NULL && heap_pages_freeable_pages > 0) {
17811773
GC_ASSERT(objspace->empty_pages_count > 0);
17821774
objspace->empty_pages = NULL;
17831775
objspace->empty_pages_count = 0;
@@ -1786,15 +1778,15 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
17861778
for (i = j = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
17871779
struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
17881780

1789-
if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count == 0) {
1781+
if (heap_page_in_global_empty_pages_pool(objspace, page) && heap_pages_freeable_pages > 0) {
17901782
heap_page_free(objspace, page);
1783+
heap_pages_freeable_pages--;
17911784
}
17921785
else {
1793-
if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count > 0) {
1786+
if (heap_page_in_global_empty_pages_pool(objspace, page)) {
17941787
page->free_next = objspace->empty_pages;
17951788
objspace->empty_pages = page;
17961789
objspace->empty_pages_count++;
1797-
pages_to_keep_count--;
17981790
}
17991791

18001792
if (i != j) {
@@ -2026,29 +2018,33 @@ heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
20262018
static int
20272019
heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_heap_t *heap)
20282020
{
2029-
if (objspace->heap_pages.allocatable_slots > 0) {
2030-
gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
2021+
gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
20312022
"allocatable_slots: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
20322023
rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, heap->total_pages);
20332024

2034-
struct heap_page *page = heap_page_resurrect(objspace);
2035-
if (page == NULL) {
2036-
page = heap_page_allocate(objspace);
2037-
}
2025+
bool allocated = false;
2026+
struct heap_page *page = heap_page_resurrect(objspace);
2027+
2028+
if (page == NULL && objspace->heap_pages.allocatable_slots > 0) {
2029+
page = heap_page_allocate(objspace);
2030+
allocated = true;
2031+
}
2032+
2033+
if (page != NULL) {
20382034
heap_add_page(objspace, heap, page);
20392035
heap_add_freepage(heap, page);
20402036

2041-
if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) {
2042-
objspace->heap_pages.allocatable_slots -= page->total_slots;
2043-
}
2044-
else {
2045-
objspace->heap_pages.allocatable_slots = 0;
2037+
if (allocated) {
2038+
if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) {
2039+
objspace->heap_pages.allocatable_slots -= page->total_slots;
2040+
}
2041+
else {
2042+
objspace->heap_pages.allocatable_slots = 0;
2043+
}
20462044
}
2047-
2048-
return true;
20492045
}
20502046

2051-
return false;
2047+
return page != NULL;
20522048
}
20532049

20542050
static void
@@ -3781,7 +3777,6 @@ gc_sweep_start(rb_objspace_t *objspace)
37813777
{
37823778
gc_mode_transition(objspace, gc_mode_sweeping);
37833779
objspace->rincgc.pooled_slots = 0;
3784-
objspace->heap_pages.allocatable_slots = 0;
37853780

37863781
#if GC_CAN_COMPILE_COMPACTION
37873782
if (objspace->flags.during_compacting) {
@@ -3818,7 +3813,7 @@ gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
38183813

38193814
if (swept_slots < min_free_slots &&
38203815
/* The heap is a growth heap if it freed more slots than had empty slots. */
3821-
(heap->empty_slots == 0 || heap->freed_slots > heap->empty_slots)) {
3816+
((heap->empty_slots == 0 && total_slots > 0) || heap->freed_slots > heap->empty_slots)) {
38223817
/* If we don't have enough slots and we have pages on the tomb heap, move
38233818
* pages from the tomb heap to the eden heap. This may prevent page
38243819
* creation thrashing (frequently allocating and deallocting pages) and
@@ -3834,10 +3829,12 @@ gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
38343829

38353830
if (swept_slots < min_free_slots) {
38363831
/* Grow this heap if we are in a major GC or if we haven't run at least
3837-
* RVALUE_OLD_AGE minor GC since the last major GC. */
3832+
* RVALUE_OLD_AGE minor GC since the last major GC. */
38383833
if (is_full_marking(objspace) ||
38393834
objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3840-
heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3835+
if (objspace->heap_pages.allocatable_slots < min_free_slots) {
3836+
heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3837+
}
38413838
}
38423839
else {
38433840
gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
@@ -3887,7 +3884,6 @@ static int
38873884
gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
38883885
{
38893886
struct heap_page *sweep_page = heap->sweeping_page;
3890-
int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
38913887
int swept_slots = 0;
38923888
int pooled_slots = 0;
38933889

@@ -3911,11 +3907,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
39113907

39123908
heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
39133909

3914-
if (free_slots == sweep_page->total_slots &&
3915-
heap_pages_freeable_pages > 0 &&
3916-
unlink_limit > 0) {
3917-
heap_pages_freeable_pages--;
3918-
unlink_limit--;
3910+
if (free_slots == sweep_page->total_slots) {
39193911
/* There are no living objects, so move this page to the global empty pages. */
39203912
heap_unlink_page(objspace, heap, sweep_page);
39213913

@@ -3994,9 +3986,7 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *sweep_heap)
39943986
for (int i = 0; i < HEAP_COUNT; i++) {
39953987
rb_heap_t *heap = &heaps[i];
39963988
if (!gc_sweep_step(objspace, heap)) {
3997-
/* sweep_heap requires a free slot but sweeping did not yield any
3998-
* and we cannot allocate a new page. */
3999-
if (heap == sweep_heap && objspace->heap_pages.allocatable_slots == 0) {
3989+
if (heap == sweep_heap && objspace->empty_pages_count == 0 && objspace->heap_pages.allocatable_slots == 0) {
40003990
/* Not allowed to create a new page so finish sweeping. */
40013991
gc_sweep_rest(objspace);
40023992
break;
@@ -5462,6 +5452,10 @@ gc_marks_finish(rb_objspace_t *objspace)
54625452
gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
54635453
}
54645454
}
5455+
5456+
if (full_marking) {
5457+
heap_allocatable_slots_expand(objspace, NULL, sweep_slots, total_slots);
5458+
}
54655459
}
54665460

54675461
if (full_marking) {
@@ -6844,7 +6838,9 @@ rb_gc_impl_prepare_heap(void *objspace_ptr)
68446838
gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
68456839

68466840
objspace->heap_pages.allocatable_slots = 0;
6841+
heap_pages_freeable_pages = objspace->empty_pages_count;
68476842
heap_pages_free_unused_pages(objspace_ptr);
6843+
GC_ASSERT(heap_pages_freeable_pages == 0);
68486844
GC_ASSERT(objspace->empty_pages_count == 0);
68496845
objspace->heap_pages.allocatable_slots = orig_allocatable_slots;
68506846

test/ruby/test_gc.rb

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ def test_stat_constraints
211211
assert_equal stat[:total_allocated_pages], stat[:heap_allocated_pages] + stat[:total_freed_pages]
212212
assert_equal stat[:heap_available_slots], stat[:heap_live_slots] + stat[:heap_free_slots] + stat[:heap_final_slots]
213213
assert_equal stat[:heap_live_slots], stat[:total_allocated_objects] - stat[:total_freed_objects] - stat[:heap_final_slots]
214-
assert_equal stat[:heap_allocated_pages], stat[:heap_eden_pages]
214+
assert_equal stat[:heap_allocated_pages], stat[:heap_eden_pages] + stat[:heap_empty_pages]
215215

216216
if use_rgengc?
217217
assert_equal stat[:count], stat[:major_gc_count] + stat[:minor_gc_count]
@@ -679,13 +679,32 @@ def test_thrashing_for_young_objects
679679
680680
# Should not be thrashing in page creation
681681
assert_equal before_stats[:heap_allocated_pages], after_stats[:heap_allocated_pages], debug_msg
682-
assert_equal 0, after_stats[:heap_empty_pages], debug_msg
683682
assert_equal 0, after_stats[:total_freed_pages], debug_msg
684683
# Only young objects, so should not trigger major GC
685684
assert_equal before_stats[:major_gc_count], after_stats[:major_gc_count], debug_msg
686685
RUBY
687686
end
688687

688+
def test_heaps_grow_independently
689+
# [Bug #21214]
690+
691+
assert_separately([], __FILE__, __LINE__, <<-'RUBY', timeout: 60)
692+
COUNT = 1_000_000
693+
694+
def allocate_small_object = []
695+
def allocate_large_object = Array.new(10)
696+
697+
@arys = Array.new(COUNT) do
698+
# Allocate 10 small transient objects
699+
10.times { allocate_small_object }
700+
# Allocate 1 large object that is persistent
701+
allocate_large_object
702+
end
703+
704+
assert_operator(GC.stat(:heap_available_slots), :<, COUNT * 2)
705+
RUBY
706+
end
707+
689708
def test_gc_internals
690709
assert_not_nil GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT]
691710
assert_not_nil GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE]

0 commit comments

Comments
 (0)