Skip to content
This repository has been archived by the owner. It is now read-only.
Permalink
Browse files
8240873: Shenandoah: Short-cut arraycopy barriers
Reviewed-by: shade
  • Loading branch information
rkennke committed Mar 12, 2020
1 parent 70e730f commit e5ab701157a2ba6810587484dc971745cb4567d9
@@ -124,6 +124,7 @@ class ShenandoahBarrierSet: public BarrierSet {
template <class T>
oop load_reference_barrier_native_impl(oop obj, T* load_addr);

inline bool skip_bulk_update(HeapWord* dst);
public:
// Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = ShenandoahBarrierSet>
@@ -293,15 +293,18 @@ void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) {

template <class T>
void ShenandoahBarrierSet::arraycopy_pre_work(T* src, T* dst, size_t count) {
if (_heap->is_concurrent_mark_in_progress()) {
if (_heap->is_concurrent_mark_in_progress() &&
!_heap->marking_context()->allocated_after_mark_start(reinterpret_cast<HeapWord*>(dst))) {
if (_heap->has_forwarded_objects()) {
arraycopy_work<T, true, false, true>(dst, count);
} else {
arraycopy_work<T, false, false, true>(dst, count);
}
}

arraycopy_update_impl(src, count);
if (_heap->has_forwarded_objects()) {
arraycopy_update_impl(src, count);
}
}

void ShenandoahBarrierSet::arraycopy_pre(oop* src, oop* dst, size_t count) {
@@ -312,8 +315,13 @@ void ShenandoahBarrierSet::arraycopy_pre(narrowOop* src, narrowOop* dst, size_t
arraycopy_pre_work(src, dst, count);
}

inline bool ShenandoahBarrierSet::skip_bulk_update(HeapWord* dst) {
return dst >= _heap->heap_region_containing(dst)->get_update_watermark();
}

template <class T>
void ShenandoahBarrierSet::arraycopy_update_impl(T* src, size_t count) {
if (skip_bulk_update(reinterpret_cast<HeapWord*>(src))) return;
if (_heap->is_evacuation_in_progress()) {
ShenandoahEvacOOMScope oom_evac;
arraycopy_work<T, true, true, false>(src, count);
@@ -82,6 +82,7 @@ void ShenandoahBarrierSet::clone_barrier(oop obj) {
// that potentially need to be updated.

shenandoah_assert_correct(NULL, obj);
if (skip_bulk_update(cast_from_oop<HeapWord*>(obj))) return;
if (_heap->is_evacuation_in_progress()) {
ShenandoahEvacOOMScope evac_scope;
ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ false> cl;
@@ -2403,6 +2403,7 @@ class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
if (r->is_active() && !r->is_cset()) {
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
}
r->set_update_watermark(r->bottom());
if (ShenandoahPacing) {
_heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
}
@@ -261,7 +261,7 @@ class ShenandoahHeapRegion : public ContiguousSpace {
volatile size_t _live_data;
volatile size_t _critical_pins;

HeapWord* _update_watermark;
HeapWord* volatile _update_watermark;

// Claim some space at the end to protect next region
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
@@ -432,12 +432,12 @@ class ShenandoahHeapRegion : public ContiguousSpace {

HeapWord* get_update_watermark() const {
assert(bottom() <= _update_watermark && _update_watermark <= top(), "within bounds");
return _update_watermark;
return Atomic::load_acquire(&_update_watermark);
}

void set_update_watermark(HeapWord* w) {
assert(bottom() <= w && w <= top(), "within bounds");
_update_watermark = w;
Atomic::release_store(&_update_watermark, w);
}

private:
@@ -56,6 +56,7 @@ class ShenandoahMarkingContext : public CHeapObj<mtGC> {
inline bool is_marked(oop obj) const;

inline bool allocated_after_mark_start(oop obj) const;
inline bool allocated_after_mark_start(HeapWord* addr) const;

inline MarkBitMap* mark_bit_map();

@@ -42,6 +42,10 @@ inline bool ShenandoahMarkingContext::is_marked(oop obj) const {

inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const {
HeapWord* addr = cast_from_oop<HeapWord*>(obj);
return allocated_after_mark_start(addr);
}

inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const {
uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
HeapWord* top_at_mark_start = _top_at_mark_starts[index];
bool alloc_after_mark_start = addr >= top_at_mark_start;
@@ -314,6 +314,7 @@ void ShenandoahTraversalGC::prepare_regions() {
ShenandoahMarkingContext* const ctx = _heap->marking_context();
for (size_t i = 0; i < num_regions; i++) {
ShenandoahHeapRegion* region = _heap->get_region(i);
region->set_update_watermark(region->top());
if (_heap->is_bitmap_slice_committed(region)) {
if (_traversal_set.is_in(i)) {
ctx->capture_top_at_mark_start(region);

0 comments on commit e5ab701

Please sign in to comment.