@@ -238,8 +238,7 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stole
238
238
}
239
239
240
240
MAYBE_INLINE_EVACUATION
241
- void G1ParScanThreadState::start_partial_objarray (G1HeapRegionAttr dest_attr,
242
- oop from_obj,
241
+ void G1ParScanThreadState::start_partial_objarray (oop from_obj,
243
242
oop to_obj) {
244
243
assert (from_obj->is_forwarded (), " precondition" );
245
244
assert (from_obj->forwardee () == to_obj, " precondition" );
@@ -251,12 +250,6 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
251
250
// The source array is unused when processing states.
252
251
_partial_array_splitter.start (_task_queue, nullptr , to_array, array_length);
253
252
254
- // Skip the card enqueue iff the object (to_array) is in survivor region.
255
- // However, G1HeapRegion::is_survivor() is too expensive here.
256
- // Instead, we use dest_attr.is_young() because the two values are always
257
- // equal: successfully allocated young regions must be survivor regions.
258
- assert (dest_attr.is_young () == _g1h->heap_region_containing (to_array)->is_survivor (), " must be" );
259
- G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
260
253
// Process the initial chunk. No need to process the type in the
261
254
// klass, as it will already be handled by processing the built-in
262
255
// module.
@@ -422,6 +415,44 @@ void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
422
415
region->update_bot_for_block (obj_start, obj_start + word_sz);
423
416
}
424
417
418
+ ALWAYSINLINE
419
+ void G1ParScanThreadState::do_iterate_object (oop const obj,
420
+ oop const old,
421
+ Klass* const klass,
422
+ G1HeapRegionAttr const region_attr,
423
+ G1HeapRegionAttr const dest_attr,
424
+ uint age) {
425
+ // Most objects are not arrays, so do one array check rather than
426
+ // checking for each array category for each object.
427
+ if (klass->is_array_klass ()) {
428
+ assert (!klass->is_stack_chunk_instance_klass (), " must be" );
429
+
430
+ if (klass->is_objArray_klass ()) {
431
+ start_partial_objarray (old, obj);
432
+ } else {
433
+ // Nothing needs to be done for typeArrays. Body doesn't contain
434
+ // any oops to scan, and the type in the klass will already be handled
435
+ // by processing the built-in module.
436
+ assert (klass->is_typeArray_klass (), " invariant" );
437
+ }
438
+ return ;
439
+ }
440
+
441
+ ContinuationGCSupport::transform_stack_chunk (obj);
442
+
443
+ // Check for deduplicating young Strings.
444
+ if (G1StringDedup::is_candidate_from_evacuation (klass,
445
+ region_attr,
446
+ dest_attr,
447
+ age)) {
448
+ // Record old; request adds a new weak reference, which reference
449
+ // processing expects to refer to a from-space object.
450
+ _string_dedup_requests.add (old);
451
+ }
452
+
453
+ obj->oop_iterate_backwards (&_scanner, klass);
454
+ }
455
+
425
456
// Private inline function, for direct internal use and providing the
426
457
// implementation of the public not-inline function.
427
458
MAYBE_INLINE_EVACUATION
@@ -446,7 +477,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
446
477
447
478
// JNI only allows pinning of typeArrays, so we only need to keep those in place.
448
479
if (region_attr.is_pinned () && klass->is_typeArray_klass ()) {
449
- return handle_evacuation_failure_par (old, old_mark, word_sz, true /* cause_pinned */ );
480
+ return handle_evacuation_failure_par (old, old_mark, klass, region_attr, word_sz, true /* cause_pinned */ );
450
481
}
451
482
452
483
uint age = 0 ;
@@ -463,7 +494,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
463
494
if (obj_ptr == nullptr ) {
464
495
// This will either forward-to-self, or detect that someone else has
465
496
// installed a forwarding pointer.
466
- return handle_evacuation_failure_par (old, old_mark, word_sz, false /* cause_pinned */ );
497
+ return handle_evacuation_failure_par (old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */ );
467
498
}
468
499
}
469
500
@@ -475,7 +506,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
475
506
// Doing this after all the allocation attempts also tests the
476
507
// undo_allocation() method too.
477
508
undo_allocation (dest_attr, obj_ptr, word_sz, node_index);
478
- return handle_evacuation_failure_par (old, old_mark, word_sz, false /* cause_pinned */ );
509
+ return handle_evacuation_failure_par (old, old_mark, klass, region_attr, word_sz, false /* cause_pinned */ );
479
510
}
480
511
481
512
// We're going to allocate linearly, so might as well prefetch ahead.
@@ -507,39 +538,17 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
507
538
update_bot_after_copying (obj, word_sz);
508
539
}
509
540
510
- // Most objects are not arrays, so do one array check rather than
511
- // checking for each array category for each object.
512
- if (klass->is_array_klass ()) {
513
- if (klass->is_objArray_klass ()) {
514
- start_partial_objarray (dest_attr, old, obj);
515
- } else {
516
- // Nothing needs to be done for typeArrays. Body doesn't contain
517
- // any oops to scan, and the type in the klass will already be handled
518
- // by processing the built-in module.
519
- assert (klass->is_typeArray_klass (), " invariant" );
520
- }
521
- return obj;
522
- }
523
-
524
- ContinuationGCSupport::transform_stack_chunk (obj);
525
-
526
- // Check for deduplicating young Strings.
527
- if (G1StringDedup::is_candidate_from_evacuation (klass,
528
- region_attr,
529
- dest_attr,
530
- age)) {
531
- // Record old; request adds a new weak reference, which reference
532
- // processing expects to refer to a from-space object.
533
- _string_dedup_requests.add (old);
541
+ {
542
+ // Skip the card enqueue iff the object (obj) is in survivor region.
543
+ // However, G1HeapRegion::is_survivor() is too expensive here.
544
+ // Instead, we use dest_attr.is_young() because the two values are always
545
+ // equal: successfully allocated young regions must be survivor regions.
546
+ assert (dest_attr.is_young () == _g1h->heap_region_containing (obj)->is_survivor (), " must be" );
547
+ G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
548
+
549
+ do_iterate_object (obj, old, klass, region_attr, dest_attr, age);
534
550
}
535
551
536
- // Skip the card enqueue iff the object (obj) is in survivor region.
537
- // However, G1HeapRegion::is_survivor() is too expensive here.
538
- // Instead, we use dest_attr.is_young() because the two values are always
539
- // equal: successfully allocated young regions must be survivor regions.
540
- assert (dest_attr.is_young () == _g1h->heap_region_containing (obj)->is_survivor (), " must be" );
541
- G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
542
- obj->oop_iterate_backwards (&_scanner, klass);
543
552
return obj;
544
553
} else {
545
554
_plab_allocator->undo_allocation (dest_attr, obj_ptr, word_sz, node_index);
@@ -621,7 +630,7 @@ void G1ParScanThreadState::record_evacuation_failed_region(G1HeapRegion* r, uint
621
630
}
622
631
623
632
NOINLINE
624
- oop G1ParScanThreadState::handle_evacuation_failure_par (oop old, markWord m, size_t word_sz, bool cause_pinned) {
633
+ oop G1ParScanThreadState::handle_evacuation_failure_par (oop old, markWord m, Klass* klass, G1HeapRegionAttr attr, size_t word_sz, bool cause_pinned) {
625
634
assert (_g1h->is_in_cset (old), " Object " PTR_FORMAT " should be in the CSet" , p2i (old));
626
635
627
636
oop forward_ptr = old->forward_to_self_atomic (m, memory_order_relaxed);
@@ -635,16 +644,16 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
635
644
// evacuation failure recovery.
636
645
_g1h->mark_evac_failure_object (_worker_id, old, word_sz);
637
646
638
- ContinuationGCSupport::transform_stack_chunk (old);
639
-
640
647
_evacuation_failed_info.register_copy_failure (word_sz);
641
648
642
- // For iterating objects that failed evacuation currently we can reuse the
643
- // existing closure to scan evacuated objects; since we are iterating from a
644
- // collection set region (i.e. never a Survivor region), we always need to
645
- // gather cards for this case.
646
- G1SkipCardEnqueueSetter x (&_scanner, false /* skip_card_enqueue */ );
647
- old->oop_iterate_backwards (&_scanner);
649
+ {
650
+ // For iterating objects that failed evacuation currently we can reuse the
651
+ // existing closure to scan evacuated objects; since we are iterating from a
652
+ // collection set region (i.e. never a Survivor region), we always need to
653
+ // gather cards for this case.
654
+ G1SkipCardEnqueueSetter x (&_scanner, false /* skip_card_enqueue */ );
655
+ do_iterate_object (old, old, klass, attr, attr, m.age ());
656
+ }
648
657
649
658
return old;
650
659
} else {
0 commit comments