Skip to content
Permalink
Browse files
8260941: Remove the conc_scan parameter for CardTable
Reviewed-by: ayang, kbarrett
  • Loading branch information
Thomas Schatzl committed Feb 12, 2021
1 parent da9895a commit 9c0ec8d8485e1f1e44aee9bbde711143508759aa
@@ -38,9 +38,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind");

CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();

__ lsr(obj, obj, CardTable::card_shift);

assert(CardTable::dirty_card_val() == 0, "must be");
@@ -55,19 +52,12 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
__ strb(zr, Address(obj, rscratch1));
__ bind(L_already_dirty);
} else {
if (ct->scanned_concurrently()) {
__ membar(Assembler::StoreStore);
}
__ strb(zr, Address(obj, rscratch1));
}
}

void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register count, Register scratch, RegSet saved_regs) {
BarrierSet* bs = BarrierSet::barrier_set();
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();

Label L_loop, L_done;
const Register end = count;

@@ -81,9 +71,6 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl

__ load_byte_map_base(scratch);
__ add(start, start, scratch);
if (ct->scanned_concurrently()) {
__ membar(__ StoreStore);
}
__ bind(L_loop);
__ strb(zr, Address(start, count));
__ subs(count, count, 1);
@@ -366,9 +366,6 @@ void LIRGenerator::set_card(LIR_Opr value, LIR_Address* card_addr) {
void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
assert(addr->is_register(), "must be a register at this point");

CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();

LIR_Opr tmp = FrameMap::LR_ptr_opr;

bool load_card_table_base_const = VM_Version::supports_movw();
@@ -382,9 +379,6 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
// byte instruction does not support the addressing mode we need.
LIR_Address* card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTable::card_shift, 0, T_BOOLEAN);
if (UseCondCardMark) {
if (ct->scanned_concurrently()) {
__ membar_storeload();
}
LIR_Opr cur_value = new_register(T_INT);
__ move(card_addr, cur_value);

@@ -394,9 +388,6 @@ void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LI
set_card(tmp, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
if (ct->scanned_concurrently()) {
__ membar_storestore();
}
set_card(tmp, card_addr);
}
}
@@ -128,16 +128,10 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");

CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();

assert(CardTable::dirty_card_val() == 0, "Dirty card value must be 0 due to optimizations.");
Address card_table_addr(card_table_base, obj, lsr, CardTable::card_shift);

if (UseCondCardMark) {
if (ct->scanned_concurrently()) {
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), noreg);
}
Label already_dirty;

__ ldrb(tmp, card_table_addr);
@@ -147,9 +141,6 @@ void CardTableBarrierSetAssembler::store_check_part2(MacroAssembler* masm, Regis
__ bind(already_dirty);

} else {
if (ct->scanned_concurrently()) {
__ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreStore), noreg);
}
set_card(masm, card_table_base, card_table_addr, tmp);
}
}
@@ -43,14 +43,10 @@

void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
Register count, Register preserve) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert_different_registers(addr, count, R0);

Label Lskip_loop, Lstore_loop;

if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }

__ sldi_(count, count, LogBytesPerHeapOop);
__ beq(CCR0, Lskip_loop); // zero length
__ addi(count, count, -BytesPerHeapOop);
@@ -74,13 +70,10 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
CardTable::CardValue* byte_map_base,
Register tmp, Register obj) {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
assert_different_registers(obj, tmp, R0);
__ load_const_optimized(tmp, (address)byte_map_base, R0);
__ srdi(obj, obj, CardTable::card_shift);
__ li(R0, CardTable::dirty_card_val());
if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
__ stbx(R0, tmp, obj);
}

@@ -118,9 +118,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
int dirty = CardTable::dirty_card_val();
if (UseCondCardMark) {
Label L_already_dirty;
if (ct->scanned_concurrently()) {
__ membar(Assembler::StoreLoad);
}
__ cmpb(card_addr, dirty);
__ jcc(Assembler::equal, L_already_dirty);
__ movb(card_addr, dirty);
@@ -53,6 +53,10 @@ class G1BarrierSet: public CardTableBarrierSet {
G1BarrierSet(G1CardTable* table);
~G1BarrierSet() { }

virtual bool card_mark_must_follow_store() const {
return true;
}

// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph.
static void enqueue(oop pre_val);
@@ -79,7 +79,7 @@ class G1CardTable : public CardTable {
STATIC_ASSERT(BitsPerByte == 8);
static const size_t WordAlreadyScanned = (SIZE_MAX / 255) * g1_card_already_scanned;

G1CardTable(MemRegion whole_heap): CardTable(whole_heap, /* scanned concurrently */ true), _listener() {
G1CardTable(MemRegion whole_heap): CardTable(whole_heap), _listener() {
_listener.set_card_table(this);
}

@@ -51,7 +51,7 @@ class PSCardTable: public CardTable {
};

public:
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap, /* scanned_concurrently */ false) {}
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap) {}

static CardValue youngergen_card_val() { return youngergen_card; }
static CardValue verify_card_val() { return verify_card; }
@@ -83,9 +83,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
if (UseCondCardMark) {
LIR_Opr cur_value = gen->new_register(T_INT);
if (ct->scanned_concurrently()) {
__ membar_storeload();
}
__ move(card_addr, cur_value);

LabelObj* L_already_dirty = new LabelObj();
@@ -94,9 +91,6 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, L
__ move(dirty, card_addr);
__ branch_destination(L_already_dirty->label());
} else {
if (ct->scanned_concurrently()) {
__ membar_storestore();
}
__ move(dirty, card_addr);
}
#endif
@@ -58,8 +58,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
Node* val,
BasicType bt,
bool use_precise) const {
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
CardTable* ct = ctbs->card_table();
// No store check needed if we're storing a NULL or an old object
// (latter case is probably a string constant). The concurrent
// mark sweep garbage collector, however, needs to have all nonNull
@@ -105,10 +103,6 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
Node* zero = __ ConI(0); // Dirty card value

if (UseCondCardMark) {
if (ct->scanned_concurrently()) {
kit->insert_mem_bar(Op_MemBarVolatile, oop_store);
__ sync_kit(kit);
}
// The classic GC reference write barrier is typically implemented
// as a store into the global card mark table. Unfortunately
// unconditional stores can result in false sharing and excessive
@@ -121,12 +115,7 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
}

// Smash zero into card
if (!ct->scanned_concurrently()) {
__ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
} else {
// Specialized path for CM store barrier
__ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, T_BYTE, adr_type);
}
__ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);

if (UseCondCardMark) {
__ end_if();
@@ -41,8 +41,7 @@ size_t CardTable::compute_byte_map_size() {
return align_up(_guard_index + 1, MAX2(_page_size, granularity));
}

CardTable::CardTable(MemRegion whole_heap, bool conc_scan) :
_scanned_concurrently(conc_scan),
CardTable::CardTable(MemRegion whole_heap) :
_whole_heap(whole_heap),
_guard_index(0),
_last_valid_index(0),
@@ -43,7 +43,6 @@ class CardTable: public CHeapObj<mtGC> {
protected:
// The declaration order of these const fields is important; see the
// constructor before changing.
const bool _scanned_concurrently;
const MemRegion _whole_heap; // the region covered by the card table
size_t _guard_index; // index of very last element in the card
// table; it is set to a guard value
@@ -113,7 +112,7 @@ class CardTable: public CHeapObj<mtGC> {
static const intptr_t clean_card_row = (intptr_t)(-1);

public:
CardTable(MemRegion whole_heap, bool conc_scan);
CardTable(MemRegion whole_heap);
virtual ~CardTable();
virtual void initialize();

@@ -245,7 +244,6 @@ class CardTable: public CHeapObj<mtGC> {
// But since the heap starts at some higher address, this points to somewhere
// before the beginning of the actual _byte_map.
CardValue* byte_map_base() const { return _byte_map_base; }
bool scanned_concurrently() const { return _scanned_concurrently; }

virtual bool is_in_young(oop obj) const = 0;

@@ -192,5 +192,5 @@ void CardTableBarrierSet::on_thread_detach(Thread* thread) {
}

bool CardTableBarrierSet::card_mark_must_follow_store() const {
return _card_table->scanned_concurrently();
return false;
}
@@ -32,12 +32,7 @@
template <DecoratorSet decorators, typename T>
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
volatile CardValue* byte = _card_table->byte_for(field);
if (_card_table->scanned_concurrently()) {
// Perform a releasing store if the card table is scanned concurrently
Atomic::release_store(byte, CardTable::dirty_card_val());
} else {
*byte = CardTable::dirty_card_val();
}
*byte = CardTable::dirty_card_val();
}

#endif // SHARE_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP
@@ -434,8 +434,8 @@ void CardTableRS::verify() {
CardTable::verify();
}

CardTableRS::CardTableRS(MemRegion whole_heap, bool scanned_concurrently) :
CardTable(whole_heap, scanned_concurrently) { }
CardTableRS::CardTableRS(MemRegion whole_heap) :
CardTable(whole_heap) { }

void CardTableRS::initialize() {
CardTable::initialize();
@@ -45,7 +45,7 @@ class CardTableRS : public CardTable {
void verify_space(Space* s, HeapWord* gen_start);

public:
CardTableRS(MemRegion whole_heap, bool scanned_concurrently);
CardTableRS(MemRegion whole_heap);

void younger_refs_in_space_iterate(Space* sp, HeapWord* gen_boundary, OopIterateClosure* cl);

@@ -142,7 +142,7 @@ jint GenCollectedHeap::initialize() {
}

CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
return new CardTableRS(reserved_region, false /* scan_concurrently */);
return new CardTableRS(reserved_region);
}

void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,

0 comments on commit 9c0ec8d

Please sign in to comment.