Skip to content
Permalink
Browse files
8267834: Refactor G1CardSetAllocator and BufferNode::Allocator to use…
… a common base class

Reviewed-by: kbarrett, tschatzl
  • Loading branch information
Ivan Walulya committed Mar 1, 2022
1 parent fcce24c commit 341c8bd7f2744d1899e78da34e613817b43b44e5
Showing 10 changed files with 436 additions and 335 deletions.
@@ -35,11 +35,7 @@ G1CardSetAllocator<Slot>::G1CardSetAllocator(const char* name,
const G1CardSetAllocOptions* alloc_options,
G1CardSetFreeList* free_segment_list) :
_segmented_array(alloc_options, free_segment_list),
_transfer_lock(false),
_free_slots_list(),
_pending_slots_list(),
_num_pending_slots(0),
_num_free_slots(0)
_free_slots_list(name, &_segmented_array)
{
uint slot_size = _segmented_array.slot_size();
assert(slot_size >= sizeof(G1CardSetContainer), "Slot instance size %u for allocator %s too small", slot_size, name);
@@ -50,74 +46,16 @@ G1CardSetAllocator<Slot>::~G1CardSetAllocator() {
drop_all();
}

template <class Slot>
bool G1CardSetAllocator<Slot>::try_transfer_pending() {
// Attempt to claim the lock.
if (Atomic::load_acquire(&_transfer_lock) || // Skip CAS if likely to fail.
Atomic::cmpxchg(&_transfer_lock, false, true)) {
return false;
}
// Have the lock; perform the transfer.

// Claim all the pending slots.
G1CardSetContainer* first = _pending_slots_list.pop_all();

if (first != nullptr) {
// Prepare to add the claimed slots, and update _num_pending_slots.
G1CardSetContainer* last = first;
Atomic::load_acquire(&_num_pending_slots);

uint count = 1;
for (G1CardSetContainer* next = first->next(); next != nullptr; next = next->next()) {
last = next;
++count;
}

Atomic::sub(&_num_pending_slots, count);

// Wait for any in-progress pops to avoid ABA for them.
GlobalCounter::write_synchronize();
// Add synchronized slots to _free_slots_list.
// Update count first so there can be no underflow in allocate().
Atomic::add(&_num_free_slots, count);
_free_slots_list.prepend(*first, *last);
}
Atomic::release_store(&_transfer_lock, false);
return true;
}

template <class Slot>
void G1CardSetAllocator<Slot>::free(Slot* slot) {
assert(slot != nullptr, "precondition");
// Desired minimum transfer batch size. There is relatively little
// importance to the specific number. It shouldn't be too big, else
// we're wasting space when the release rate is low. If the release
// rate is high, we might accumulate more than this before being
// able to start a new transfer, but that's okay. Also note that
// the allocation rate and the release rate are going to be fairly
// similar, due to how the slots are used. - kbarret
uint const trigger_transfer = 10;

uint pending_count = Atomic::add(&_num_pending_slots, 1u, memory_order_relaxed);

G1CardSetContainer* container = reinterpret_cast<G1CardSetContainer*>(reinterpret_cast<char*>(slot));

container->set_next(nullptr);
assert(container->next() == nullptr, "precondition");

_pending_slots_list.push(*container);

if (pending_count > trigger_transfer) {
try_transfer_pending();
}
slot->~Slot();
_free_slots_list.release(slot);
}

template <class Slot>
void G1CardSetAllocator<Slot>::drop_all() {
_free_slots_list.pop_all();
_pending_slots_list.pop_all();
_num_pending_slots = 0;
_num_free_slots = 0;
_free_slots_list.reset();
_segmented_array.drop_all();
}

@@ -129,12 +67,13 @@ void G1CardSetAllocator<Slot>::print(outputStream* os) {
? _segmented_array.first_array_segment()->num_slots()
: 0;
uint num_segments = _segmented_array.num_segments();
uint num_pending_slots = (uint)_free_slots_list.pending_count();
os->print("MA " PTR_FORMAT ": %u slots pending (allocated %u available %u) used %.3f highest %u segments %u size %zu ",
p2i(this),
_num_pending_slots,
num_pending_slots,
num_allocated_slots,
num_available_slots,
percent_of(num_allocated_slots - _num_pending_slots, num_available_slots),
percent_of(num_allocated_slots - num_pending_slots, num_available_slots),
highest,
num_segments,
mem_size());
@@ -29,9 +29,9 @@
#include "gc/g1/g1CardSetContainers.hpp"
#include "gc/g1/g1SegmentedArray.hpp"
#include "gc/g1/g1SegmentedArrayFreePool.hpp"
#include "gc/shared/freeListAllocator.hpp"
#include "memory/allocation.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/lockFreeStack.hpp"

class G1CardSetConfiguration;
class outputStream;
@@ -91,23 +91,9 @@ class G1CardSetAllocator {
typedef G1SegmentedArray<Slot, mtGCCardSet> SegmentedArray;
// G1CardSetContainer slot management within the G1CardSetSegments allocated
// by this allocator.
static G1CardSetContainer* volatile* next_ptr(G1CardSetContainer& slot);
typedef LockFreeStack<G1CardSetContainer, &G1CardSetAllocator::next_ptr> SlotStack;

SegmentedArray _segmented_array;
volatile bool _transfer_lock;
SlotStack _free_slots_list;
SlotStack _pending_slots_list;

volatile uint _num_pending_slots; // Number of slots in the pending list.
volatile uint _num_free_slots; // Number of slots in the free list.

// Try to transfer slots from _pending_slots_list to _free_slots_list, with a
// synchronization delay for any in-progress pops from the _free_slots_list
// to solve ABA here.
bool try_transfer_pending();

uint num_free_slots() const;
FreeListAllocator _free_slots_list;

public:
G1CardSetAllocator(const char* name,
@@ -124,13 +110,15 @@ class G1CardSetAllocator {

size_t mem_size() const {
return sizeof(*this) +
_segmented_array.num_segments() * sizeof(G1CardSetSegment) + _segmented_array.num_available_slots() *
_segmented_array.slot_size();
_segmented_array.num_segments() * sizeof(G1CardSetSegment) +
_segmented_array.num_available_slots() * _segmented_array.slot_size();
}

size_t wasted_mem_size() const {
return (_segmented_array.num_available_slots() - (_segmented_array.num_allocated_slots() - _num_pending_slots)) *
_segmented_array.slot_size();
uint num_wasted_slots = _segmented_array.num_available_slots() -
_segmented_array.num_allocated_slots() -
(uint)_free_slots_list.pending_count();
return num_wasted_slots * _segmented_array.slot_size();
}

inline uint num_segments() { return _segmented_array.num_segments(); }
@@ -33,30 +33,9 @@
#include "gc/g1/g1CardSetContainers.inline.hpp"
#include "utilities/globalCounter.inline.hpp"

template <class Slot>
G1CardSetContainer* volatile* G1CardSetAllocator<Slot>::next_ptr(G1CardSetContainer& slot) {
return slot.next_addr();
}

template <class Slot>
Slot* G1CardSetAllocator<Slot>::allocate() {
assert(_segmented_array.slot_size() > 0, "instance size not set.");

if (num_free_slots() > 0) {
// Pop under critical section to deal with ABA problem
// Other solutions to the same problem are more complicated (ref counting, HP)
GlobalCounter::CriticalSection cs(Thread::current());

G1CardSetContainer* container = _free_slots_list.pop();
if (container != nullptr) {
Slot* slot = reinterpret_cast<Slot*>(reinterpret_cast<char*>(container));
Atomic::sub(&_num_free_slots, 1u);
guarantee(is_aligned(slot, 8), "result " PTR_FORMAT " not aligned", p2i(slot));
return slot;
}
}

Slot* slot = _segmented_array.allocate();
Slot* slot = ::new (_free_slots_list.allocate()) Slot();
assert(slot != nullptr, "must be");
return slot;
}
@@ -74,9 +53,4 @@ inline void G1CardSetMemoryManager::free_node(void* value) {
free(0, value);
}

template <class Slot>
inline uint G1CardSetAllocator<Slot>::num_free_slots() const {
return Atomic::load(&_num_free_slots);
}

#endif // SHARE_GC_G1_G1CARDSETMEMORY_INLINE_HPP
@@ -26,6 +26,7 @@
#ifndef SHARE_GC_G1_G1SEGMENTEDARRAY_HPP
#define SHARE_GC_G1_G1SEGMENTEDARRAY_HPP

#include "gc/shared/freeListAllocator.hpp"
#include "memory/allocation.hpp"
#include "utilities/lockFreeStack.hpp"

@@ -181,7 +182,7 @@ class G1SegmentedArrayAllocOptions {
// Their values are only consistent within each other with extra global
// synchronization.
template <class Slot, MEMFLAGS flag>
class G1SegmentedArray {
class G1SegmentedArray : public FreeListConfig {
// G1SegmentedArrayAllocOptions provides parameters for allocation segment
// sizing and expansion.
const G1SegmentedArrayAllocOptions* _alloc_options;
@@ -222,7 +223,10 @@ class G1SegmentedArray {
// be called in a globally synchronized area.
void drop_all();

inline Slot* allocate();
inline void* allocate() override;

// We do not deallocate individual slots
inline void deallocate(void* node) override { ShouldNotReachHere(); }

inline uint num_segments() const;

@@ -210,7 +210,7 @@ void G1SegmentedArray<Slot, flag>::drop_all() {
}

template <class Slot, MEMFLAGS flag>
Slot* G1SegmentedArray<Slot, flag>::allocate() {
void* G1SegmentedArray<Slot, flag>::allocate() {
assert(slot_size() > 0, "instance size not set.");

G1SegmentedArraySegment<flag>* cur = Atomic::load_acquire(&_first);

1 comment on commit 341c8bd

@openjdk-notifier
Copy link

@openjdk-notifier openjdk-notifier bot commented on 341c8bd Mar 1, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.