@@ -238,8 +238,7 @@ void G1ParScanThreadState::do_partial_array(PartialArrayState* state, bool stole
238238}
239239
240240MAYBE_INLINE_EVACUATION
241- void  G1ParScanThreadState::start_partial_objarray (G1HeapRegionAttr dest_attr,
242-                                                   oop from_obj,
241+ void  G1ParScanThreadState::start_partial_objarray (oop from_obj,
243242                                                  oop to_obj) {
244243  assert (from_obj->is_forwarded (), " precondition" 
245244  assert (from_obj->forwardee () == to_obj, " precondition" 
@@ -251,12 +250,6 @@ void G1ParScanThreadState::start_partial_objarray(G1HeapRegionAttr dest_attr,
251250    //  The source array is unused when processing states.
252251    _partial_array_splitter.start (_task_queue, nullptr , to_array, array_length);
253252
254-   //  Skip the card enqueue iff the object (to_array) is in survivor region.
255-   //  However, G1HeapRegion::is_survivor() is too expensive here.
256-   //  Instead, we use dest_attr.is_young() because the two values are always
257-   //  equal: successfully allocated young regions must be survivor regions.
258-   assert (dest_attr.is_young () == _g1h->heap_region_containing (to_array)->is_survivor (), " must be" 
259-   G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
260253  //  Process the initial chunk.  No need to process the type in the
261254  //  klass, as it will already be handled by processing the built-in
262255  //  module.
@@ -422,6 +415,44 @@ void G1ParScanThreadState::update_bot_after_copying(oop obj, size_t word_sz) {
422415  region->update_bot_for_block (obj_start, obj_start + word_sz);
423416}
424417
418+ ALWAYSINLINE
419+ void  G1ParScanThreadState::do_iterate_object (oop const  obj,
420+                                              oop const  old,
421+                                              Klass* const  klass,
422+                                              G1HeapRegionAttr const  region_attr,
423+                                              G1HeapRegionAttr const  dest_attr,
424+                                              uint age) {
425+     //  Most objects are not arrays, so do one array check rather than
426+     //  checking for each array category for each object.
427+     if  (klass->is_array_klass ()) {
428+       assert (!klass->is_stack_chunk_instance_klass (), " must be" 
429+ 
430+       if  (klass->is_objArray_klass ()) {
431+         start_partial_objarray (old, obj);
432+       } else  {
433+         //  Nothing needs to be done for typeArrays.  Body doesn't contain
434+         //  any oops to scan, and the type in the klass will already be handled
435+         //  by processing the built-in module.
436+         assert (klass->is_typeArray_klass (), " invariant" 
437+       }
438+       return ;
439+     }
440+ 
441+     ContinuationGCSupport::transform_stack_chunk (obj);
442+ 
443+     //  Check for deduplicating young Strings.
444+     if  (G1StringDedup::is_candidate_from_evacuation (klass,
445+                                                     region_attr,
446+                                                     dest_attr,
447+                                                     age)) {
448+       //  Record old; request adds a new weak reference, which reference
449+       //  processing expects to refer to a from-space object.
450+       _string_dedup_requests.add (old);
451+     }
452+ 
453+     obj->oop_iterate_backwards (&_scanner, klass);
454+ }
455+ 
425456//  Private inline function, for direct internal use and providing the
426457//  implementation of the public not-inline function.
427458MAYBE_INLINE_EVACUATION
@@ -446,7 +477,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
446477
447478  //  JNI only allows pinning of typeArrays, so we only need to keep those in place.
448479  if  (region_attr.is_pinned () && klass->is_typeArray_klass ()) {
449-     return  handle_evacuation_failure_par (old, old_mark, word_sz, true  /*  cause_pinned */ 
480+     return  handle_evacuation_failure_par (old, old_mark, klass, region_attr,  word_sz, true  /*  cause_pinned */ 
450481  }
451482
452483  uint age = 0 ;
@@ -463,7 +494,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
463494    if  (obj_ptr == nullptr ) {
464495      //  This will either forward-to-self, or detect that someone else has
465496      //  installed a forwarding pointer.
466-       return  handle_evacuation_failure_par (old, old_mark, word_sz, false  /*  cause_pinned */ 
497+       return  handle_evacuation_failure_par (old, old_mark, klass, region_attr,  word_sz, false  /*  cause_pinned */ 
467498    }
468499  }
469500
@@ -475,7 +506,7 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
475506    //  Doing this after all the allocation attempts also tests the
476507    //  undo_allocation() method too.
477508    undo_allocation (dest_attr, obj_ptr, word_sz, node_index);
478-     return  handle_evacuation_failure_par (old, old_mark, word_sz, false  /*  cause_pinned */ 
509+     return  handle_evacuation_failure_par (old, old_mark, klass, region_attr,  word_sz, false  /*  cause_pinned */ 
479510  }
480511
481512  //  We're going to allocate linearly, so might as well prefetch ahead.
@@ -507,39 +538,17 @@ oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const regio
507538      update_bot_after_copying (obj, word_sz);
508539    }
509540
510-     //  Most objects are not arrays, so do one array check rather than
511-     //  checking for each array category for each object.
512-     if  (klass->is_array_klass ()) {
513-       if  (klass->is_objArray_klass ()) {
514-         start_partial_objarray (dest_attr, old, obj);
515-       } else  {
516-         //  Nothing needs to be done for typeArrays.  Body doesn't contain
517-         //  any oops to scan, and the type in the klass will already be handled
518-         //  by processing the built-in module.
519-         assert (klass->is_typeArray_klass (), " invariant" 
520-       }
521-       return  obj;
522-     }
523- 
524-     ContinuationGCSupport::transform_stack_chunk (obj);
525- 
526-     //  Check for deduplicating young Strings.
527-     if  (G1StringDedup::is_candidate_from_evacuation (klass,
528-                                                     region_attr,
529-                                                     dest_attr,
530-                                                     age)) {
531-       //  Record old; request adds a new weak reference, which reference
532-       //  processing expects to refer to a from-space object.
533-       _string_dedup_requests.add (old);
541+     {
542+       //  Skip the card enqueue iff the object (obj) is in survivor region.
543+       //  However, G1HeapRegion::is_survivor() is too expensive here.
544+       //  Instead, we use dest_attr.is_young() because the two values are always
545+       //  equal: successfully allocated young regions must be survivor regions.
546+       assert (dest_attr.is_young () == _g1h->heap_region_containing (obj)->is_survivor (), " must be" 
547+       G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
548+ 
549+       do_iterate_object (obj, old, klass, region_attr, dest_attr, age);
534550    }
535551
536-     //  Skip the card enqueue iff the object (obj) is in survivor region.
537-     //  However, G1HeapRegion::is_survivor() is too expensive here.
538-     //  Instead, we use dest_attr.is_young() because the two values are always
539-     //  equal: successfully allocated young regions must be survivor regions.
540-     assert (dest_attr.is_young () == _g1h->heap_region_containing (obj)->is_survivor (), " must be" 
541-     G1SkipCardEnqueueSetter x (&_scanner, dest_attr.is_young ());
542-     obj->oop_iterate_backwards (&_scanner, klass);
543552    return  obj;
544553  } else  {
545554    _plab_allocator->undo_allocation (dest_attr, obj_ptr, word_sz, node_index);
@@ -621,7 +630,7 @@ void G1ParScanThreadState::record_evacuation_failed_region(G1HeapRegion* r, uint
621630}
622631
623632NOINLINE
624- oop G1ParScanThreadState::handle_evacuation_failure_par (oop old, markWord m, size_t  word_sz, bool  cause_pinned) {
633+ oop G1ParScanThreadState::handle_evacuation_failure_par (oop old, markWord m, Klass* klass, G1HeapRegionAttr attr,  size_t  word_sz, bool  cause_pinned) {
625634  assert (_g1h->is_in_cset (old), " Object " "  should be in the CSet" p2i (old));
626635
627636  oop forward_ptr = old->forward_to_self_atomic (m, memory_order_relaxed);
@@ -635,16 +644,16 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
635644    //  evacuation failure recovery.
636645    _g1h->mark_evac_failure_object (_worker_id, old, word_sz);
637646
638-     ContinuationGCSupport::transform_stack_chunk (old);
639- 
640647    _evacuation_failed_info.register_copy_failure (word_sz);
641648
642-     //  For iterating objects that failed evacuation currently we can reuse the
643-     //  existing closure to scan evacuated objects; since we are iterating from a
644-     //  collection set region (i.e. never a Survivor region), we always need to
645-     //  gather cards for this case.
646-     G1SkipCardEnqueueSetter x (&_scanner, false  /*  skip_card_enqueue */ 
647-     old->oop_iterate_backwards (&_scanner);
649+     {
650+       //  For iterating objects that failed evacuation currently we can reuse the
651+       //  existing closure to scan evacuated objects; since we are iterating from a
652+       //  collection set region (i.e. never a Survivor region), we always need to
653+       //  gather cards for this case.
654+       G1SkipCardEnqueueSetter x (&_scanner, false  /*  skip_card_enqueue */ 
655+       do_iterate_object (old, old, klass, attr, attr, m.age ());
656+     }
648657
649658    return  old;
650659  } else  {
0 commit comments