@@ -238,8 +238,7 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stole
238
238
}
239
239
240
240
MAYBE_INLINE_EVACUATION
241
- void G1ParScanThreadState::start_partial_objarray (G1HeapRegionAttr dest_attr,
242
- oop from_obj,
241
+ void G1ParScanThreadState::start_partial_objarray (oop from_obj,
243
242
oop to_obj) {
244
243
assert (from_obj->is_forwarded (), " precondition" );
245
244
assert (from_obj->forwardee () == to_obj, " precondition" );
@@ -251,12 +250,7 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
251
250
// The source array is unused when processing states.
252
251
_partial_array_splitter.start (_task_queue, nullptr , to_array, array_length);
253
252
254
- // Skip the card enqueue iff the object (to_array) is in survivor region.
255
- // However, G1HeapRegion::is_survivor() is too expensive here.
256
- // Instead, we use dest_attr.is_young() because the two values are always
257
- // equal: successfully allocated young regions must be survivor regions.
258
- assert (dest_attr.is_young () == _g1h->heap_region_containing (to_array)->is_survivor (), " must be" );
259
- G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
253
+ assert (_scanner.skip_card_enqueue_set (), " must be" );
260
254
// Process the initial chunk. No need to process the type in the
261
255
// klass, as it will already be handled by processing the built-in
262
256
// module.
@@ -422,6 +416,45 @@ void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
422
416
region->update_bot_for_block (obj_start, obj_start + word_sz);
423
417
}
424
418
419
+ ALWAYSINLINE
420
+ void G1ParScanThreadState::do_iterate_object (oop const obj,
421
+ oop const old,
422
+ Klass* const klass,
423
+ G1HeapRegionAttr const region_attr,
424
+ G1HeapRegionAttr const dest_attr,
425
+ uint age) {
426
+ // Most objects are not arrays, so do one array check rather than
427
+ // checking for each array category for each object.
428
+ if (klass->is_array_klass ()) {
429
+ assert (!klass->is_stack_chunk_instance_klass (), " must be" );
430
+
431
+ if (klass->is_objArray_klass ()) {
432
+ start_partial_objarray (old, obj);
433
+ } else {
434
+ // Nothing needs to be done for typeArrays. Body doesn't contain
435
+ // any oops to scan, and the type in the klass will already be handled
436
+ // by processing the built-in module.
437
+ assert (klass->is_typeArray_klass (), " invariant" );
438
+ }
439
+ return ;
440
+ }
441
+
442
+ ContinuationGCSupport::transform_stack_chunk (obj);
443
+
444
+ // Check for deduplicating young Strings.
445
+ if (G1StringDedup::is_candidate_from_evacuation (klass,
446
+ region_attr,
447
+ dest_attr,
448
+ age)) {
449
+ // Record old; request adds a new weak reference, which reference
450
+ // processing expects to refer to a from-space object.
451
+ _string_dedup_requests.add (old);
452
+ }
453
+
454
+ assert (_scanner.skip_card_enqueue_set (), " must be" );
455
+ obj->oop_iterate_backwards (&_scanner, klass);
456
+ }
457
+
425
458
// Private inline function, for direct internal use and providing the
426
459
// implementation of the public not-inline function.
427
460
MAYBE_INLINE_EVACUATION
@@ -446,7 +479,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
446
479
447
480
// JNI only allows pinning of typeArrays, so we only need to keep those in place.
448
481
if (region_attr.is_pinned () && klass->is_typeArray_klass ()) {
449
- return handle_evacuation_failure_par (old, old_mark, word_sz, true /* cause_pinned */ );
482
+ return handle_evacuation_failure_par (old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */ );
450
483
}
451
484
452
485
uint age = 0 ;
@@ -463,7 +496,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
463
496
if (obj_ptr == nullptr ) {
464
497
// This will either forward-to-self, or detect that someone else has
465
498
// installed a forwarding pointer.
466
- return handle_evacuation_failure_par (old, old_mark, word_sz, false /* cause_pinned */ );
499
+ return handle_evacuation_failure_par (old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */ );
467
500
}
468
501
}
469
502
@@ -475,7 +508,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
475
508
// Doing this after all the allocation attempts also tests the
476
509
// undo_allocation() method too.
477
510
undo_allocation (dest_attr, obj_ptr, word_sz, node_index);
478
- return handle_evacuation_failure_par (old, old_mark, word_sz, false /* cause_pinned */ );
511
+ return handle_evacuation_failure_par (old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */ );
479
512
}
480
513
481
514
// We're going to allocate linearly, so might as well prefetch ahead.
@@ -507,39 +540,16 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
507
540
update_bot_after_copying (obj, word_sz);
508
541
}
509
542
510
- // Most objects are not arrays, so do one array check rather than
511
- // checking for each array category for each object.
512
- if (klass->is_array_klass ()) {
513
- if (klass->is_objArray_klass ()) {
514
- start_partial_objarray (dest_attr, old, obj);
515
- } else {
516
- // Nothing needs to be done for typeArrays. Body doesn't contain
517
- // any oops to scan, and the type in the klass will already be handled
518
- // by processing the built-in module.
519
- assert (klass->is_typeArray_klass (), " invariant" );
520
- }
521
- return obj;
522
- }
523
-
524
- ContinuationGCSupport::transform_stack_chunk (obj);
525
-
526
- // Check for deduplicating young Strings.
527
- if (G1StringDedup::is_candidate_from_evacuation (klass,
528
- region_attr,
529
- dest_attr,
530
- age)) {
531
- // Record old; request adds a new weak reference, which reference
532
- // processing expects to refer to a from-space object.
533
- _string_dedup_requests.add (old);
543
+ {
544
+ // Skip the card enqueue iff the object (obj) is in survivor region.
545
+ // However, G1HeapRegion::is_survivor() is too expensive here.
546
+ // Instead, we use dest_attr.is_young() because the two values are always
547
+ // equal: successfully allocated young regions must be survivor regions.
548
+ assert (dest_attr.is_young () == _g1h->heap_region_containing (obj)->is_survivor (), " must be" );
549
+ G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
550
+ do_iterate_object (obj, old, klass, region_attr, dest_attr, age);
534
551
}
535
552
536
- // Skip the card enqueue iff the object (obj) is in survivor region.
537
- // However, G1HeapRegion::is_survivor() is too expensive here.
538
- // Instead, we use dest_attr.is_young() because the two values are always
539
- // equal: successfully allocated young regions must be survivor regions.
540
- assert (dest_attr.is_young () == _g1h->heap_region_containing (obj)->is_survivor (), " must be" );
541
- G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
542
- obj->oop_iterate_backwards (&_scanner, klass);
543
553
return obj;
544
554
} else {
545
555
_plab_allocator->undo_allocation (dest_attr, obj_ptr, word_sz, node_index);
@@ -621,7 +631,7 @@ void G1ParScanThreadState::record_evacuation_failed_region(G1HeapRegion* r, uint
621
631
}
622
632
623
633
NOINLINE
624
- oop G1ParScanThreadState::handle_evacuation_failure_par (oop old, markWord m, size_t word_sz, bool cause_pinned) {
634
+ oop G1ParScanThreadState::handle_evacuation_failure_par (oop old, markWord m, Klass* klass, G1HeapRegionAttr attr, size_t word_sz, bool cause_pinned) {
625
635
assert (_g1h->is_in_cset (old), " Object " PTR_FORMAT " should be in the CSet" , p2i (old));
626
636
627
637
oop forward_ptr = old->forward_to_self_atomic (m, memory_order_relaxed);
@@ -635,16 +645,16 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
635
645
// evacuation failure recovery.
636
646
_g1h->mark_evac_failure_object (_worker_id, old, word_sz);
637
647
638
- ContinuationGCSupport::transform_stack_chunk (old);
639
-
640
648
_evacuation_failed_info.register_copy_failure (word_sz);
641
649
642
- // For iterating objects that failed evacuation currently we can reuse the
643
- // existing closure to scan evacuated objects; since we are iterating from a
644
- // collection set region (i.e. never a Survivor region), we always need to
645
- // gather cards for this case.
646
- G1SkipCardEnqueueSetter x (&_scanner, false /* skip_card_enqueue */ );
647
- old->oop_iterate_backwards (&_scanner);
650
+ {
651
+ // For iterating objects that failed evacuation currently we can reuse the
652
+ // existing closure to scan evacuated objects; since we are iterating from a
653
+ // collection set region (i.e. never a Survivor region), we always need to
654
+ // gather cards for this case.
655
+ G1SkipCardEnqueueSetter x (&_scanner, false /* skip_card_enqueue */ );
656
+ do_iterate_object (old, old, klass, attr, attr, m.age ());
657
+ }
648
658
649
659
return old;
650
660
} else {
0 commit comments