diff --git a/src/hotspot/share/gc/g1/g1Allocator.hpp b/src/hotspot/share/gc/g1/g1Allocator.hpp index 16feef2e6b1..35a757aeba9 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.hpp @@ -88,9 +88,6 @@ class G1Allocator : public CHeapObj { size_t desired_word_size, size_t* actual_word_size); - // Node index of current thread. - inline uint current_node_index() const; - public: G1Allocator(G1CollectedHeap* heap); ~G1Allocator(); @@ -110,22 +107,26 @@ class G1Allocator : public CHeapObj { void abandon_gc_alloc_regions(); bool is_retained_old_region(HeapRegion* hr); + // Node index of current thread. + inline uint current_node_index() const; + // Allocate blocks of memory during mutator time. // Attempt allocation in the current alloc region. - inline HeapWord* attempt_allocation(size_t min_word_size, + inline HeapWord* attempt_allocation(uint node_index, + size_t min_word_size, size_t desired_word_size, size_t* actual_word_size); // Attempt allocation, retiring the current region and allocating a new one. It is // assumed that attempt_allocation() has been tried and failed already first. - inline HeapWord* attempt_allocation_using_new_region(size_t word_size); + inline HeapWord* attempt_allocation_using_new_region(uint node_index, size_t word_size); // This is to be called when holding an appropriate lock. It first tries in the // current allocation region, and then attempts an allocation using a new region. inline HeapWord* attempt_allocation_locked(size_t word_size); - inline HeapWord* attempt_allocation_force(size_t word_size); + inline HeapWord* attempt_allocation_force(uint node_index, size_t word_size); size_t unsafe_max_tlab_alloc(); size_t used_in_alloc_regions(); diff --git a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp index 42d6a547257..71e34fa34b9 100644 --- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp @@ -49,10 +49,10 @@ inline OldGCAllocRegion* G1Allocator::old_gc_alloc_region() { return &_old_gc_alloc_region; } -inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size, +inline HeapWord* G1Allocator::attempt_allocation(uint node_index, + size_t min_word_size, size_t desired_word_size, size_t* actual_word_size) { - uint node_index = current_node_index(); HeapWord* result = mutator_alloc_region(node_index)->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size); if (result != NULL) { @@ -62,8 +62,8 @@ inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size, return mutator_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size); } -inline HeapWord* G1Allocator::attempt_allocation_using_new_region(size_t word_size) { - uint node_index = current_node_index(); +inline HeapWord* G1Allocator::attempt_allocation_using_new_region(uint node_index, size_t word_size) { + size_t temp; HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_using_new_region(word_size, word_size, &temp); assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL, @@ -81,8 +81,7 @@ inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) { return result; } -inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) { - uint node_index = current_node_index(); +inline HeapWord* G1Allocator::attempt_allocation_force(uint node_index, size_t word_size) { return mutator_alloc_region(node_index)->attempt_allocation_force(word_size); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 8acbaae9bb5..63bd7bf7db4 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -375,7 +375,7 @@ G1CollectedHeap::mem_allocate(size_t word_size, return attempt_allocation(word_size, word_size, &dummy); } -HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { +HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_size) { ResourceMark rm; // For retrieving the thread names in log messages. // Make sure you read the note in attempt_allocation_humongous(). @@ -403,7 +403,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // Now that we have the lock, we first retry the allocation in case another // thread changed the region while we were waiting to acquire the lock. size_t actual_size; - result = _allocator->attempt_allocation(word_size, word_size, &actual_size); + result = _allocator->attempt_allocation(node_index, word_size, word_size, &actual_size); if (result != NULL) { return result; } @@ -412,7 +412,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { if (!preventive_collection_required) { // We've already attempted a lock-free allocation above, so we don't want to // do it again. Let's jump straight to replacing the active region. - result = _allocator->attempt_allocation_using_new_region(word_size); + result = _allocator->attempt_allocation_using_new_region(node_index, word_size); if (result != NULL) { return result; } @@ -423,7 +423,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) { // No need for an ergo message here, can_expand_young_list() does this when // it returns true. - result = _allocator->attempt_allocation_force(word_size); + result = _allocator->attempt_allocation_force(node_index, word_size); if (result != NULL) { return result; } @@ -483,7 +483,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { // follow-on attempt will be at the start of the next loop // iteration (after taking the Heap_lock). size_t dummy = 0; - result = _allocator->attempt_allocation(word_size, word_size, &dummy); + result = _allocator->attempt_allocation(node_index, word_size, word_size, &dummy); if (result != NULL) { return result; } @@ -711,11 +711,14 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, assert(!is_humongous(desired_word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); - HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size); + // Fix NUMA node association for the duration of this allocation + const uint node_index = _allocator->current_node_index(); + + HeapWord* result = _allocator->attempt_allocation(node_index, min_word_size, desired_word_size, actual_word_size); if (result == NULL) { *actual_word_size = desired_word_size; - result = attempt_allocation_slow(desired_word_size); + result = attempt_allocation_slow(node_index, desired_word_size); } assert_heap_not_locked(); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 50aaac4cd53..6a705c1dbdc 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -469,7 +469,7 @@ class G1CollectedHeap : public CollectedHeap { // Second-level mutator allocation attempt: take the Heap_lock and // retry the allocation attempt, potentially scheduling a GC // pause. This should only be used for non-humongous allocations. - HeapWord* attempt_allocation_slow(size_t word_size); + HeapWord* attempt_allocation_slow(uint node_index, size_t word_size); // Takes the Heap_lock and attempts a humongous allocation. It can // potentially schedule a GC pause.