Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
a03671e
WIP: MMTkUnlogBitBarrierSetRuntime
wks Sep 19, 2025
6cd0fdd
Use one fewer scratch register
wks Sep 19, 2025
153a459
No. We can't. We can't use either tmp1 or tmp2.
wks Sep 19, 2025
b826a07
Extract assembler fast path
wks Sep 19, 2025
43f1ac5
Apply unlog bit runtime barrier fast path to SATB
wks Sep 19, 2025
3f3c193
Apply unlog bit assembler barrier fast path to SATB
wks Sep 19, 2025
9be6ae1
Extract common part of pre and post from assembler
wks Sep 19, 2025
efc92a6
Define __ separately for each function impl
wks Sep 20, 2025
5d49e81
Extract c1 pre/post write barrier runtime stub
wks Sep 20, 2025
0b0915f
Hoist C1 runtime stubs up to BarrierSetAssembler
wks Sep 20, 2025
19df558
Adjust visibility
wks Sep 20, 2025
a6cb14d
Object barrier C1 no use slot or new_val
wks Sep 20, 2025
344f546
MMTkC1PreBarrierStub: only src
wks Sep 20, 2025
236b578
No needs_patching
wks Sep 20, 2025
4c3847e
No touching new_val
wks Sep 20, 2025
7d72ed3
No touching slot operand in SATB C1 barrier
wks Sep 21, 2025
822c58e
C1 pre/post barrier method only access arg
wks Sep 21, 2025
cd81d20
Extract C1 unlog bit check fast paths
wks Sep 21, 2025
53d68a0
Extract C1 pre/post barrier to UnlogBarrierSet
wks Sep 21, 2025
fbdba71
Comments
wks Sep 21, 2025
24b82ab
Revert Cargo.toml to online mmtk-core repo
wks Sep 22, 2025
5969878
Remove commented-out hack
wks Sep 22, 2025
684bfa3
Update comments to link to bug tracker
wks Sep 22, 2025
2443252
Comments and spacing
wks Sep 22, 2025
4c2eddb
Ensure all cpp files include precompiled.hpp
wks Sep 22, 2025
e0ae0bc
One runtime code blob per function
wks Sep 22, 2025
4abf58a
Why do we push registers when we can save_live_registers_no_oop_map?
wks Sep 22, 2025
0ebac36
Name
wks Sep 22, 2025
8bc5eab
Rewrite some assembly in SATB barrier
wks Sep 22, 2025
de68de8
Remove irrelevant comment.
wks Sep 22, 2025
43de746
MMTkC1ReferenceLoadBarrierStub no code emit info
wks Sep 22, 2025
f6e0659
Comment and minor fix for code stubs
wks Sep 22, 2025
cdc6455
Merged C2 pre/post barriers for unlog barriers
wks Sep 23, 2025
3af9665
Remove unused macros.
wks Sep 23, 2025
2e80f07
Replace WEAK_REFERENCE_LOAD_BARRIER with a runtime flag
wks Sep 23, 2025
e507195
Remove unused macros
wks Sep 24, 2025
e9fa0d0
Revert changes to C1 write barrier signatures.
wks Sep 24, 2025
73fa6c4
Section separators and file header comments
wks Sep 24, 2025
75e467a
Merge branch 'master' into fix/mmtk-openjdk-barrier-refactoring
wks Sep 24, 2025
1c6990b
Bump mmtk-core revision
wks Sep 24, 2025
1770104
Remove new-idea comment
wks Sep 24, 2025
3ee9112
Don't disrupt the code order too much for no reason
wks Sep 24, 2025
85c61b3
Cite the source of reference_load_barrier_for_unknown_load
wks Sep 25, 2025
a31a3ba
Add missing /* pre = */ comment
wks Sep 25, 2025
63bffdd
Fix comment
wks Sep 25, 2025
940c691
Shouldn't remove SATB barrier if val is null
wks Oct 16, 2025
9f5efe0
Bump mmtk-core revision
wks Oct 17, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 14 additions & 14 deletions mmtk/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion mmtk/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ probe = "0.5"
# - change branch
# - change repo name
# But other changes including adding/removing whitespaces in commented lines may break the CI.
mmtk = { git = "https://github.com/mmtk/mmtk-core.git", rev = "a4dd70cb70a116a32b1bbb20501c48f77f49181b" }
mmtk = { git = "https://github.com/mmtk/mmtk-core.git", rev = "1ffa5b325a07d4fe99fe7e7008b295f8febea407" }
# Uncomment the following to build locally
# mmtk = { path = "../repos/mmtk-core" }

Expand Down
203 changes: 12 additions & 191 deletions openjdk/barriers/mmtkObjectBarrier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,13 @@
#include "mmtkObjectBarrier.hpp"
#include "runtime/interfaceSupport.inline.hpp"

//////////////////// Runtime ////////////////////

void MMTkObjectBarrierSetRuntime::object_probable_write(oop new_obj) const {
if (mmtk_enable_barrier_fastpath) {
// Do fast-path check before entering mmtk rust code, to improve mutator performance.
// This is identical to calling `mmtk_object_probable_write` directly without a fast-path.
intptr_t addr = (intptr_t) (void*) new_obj;
uint8_t* meta_addr = (uint8_t*) (SIDE_METADATA_BASE_ADDRESS + (addr >> 6));
intptr_t shift = (addr >> 3) & 0b111;
uint8_t byte_val = *meta_addr;
if (((byte_val >> shift) & 1) == 1) {
if (is_unlog_bit_set(new_obj)) {
// Only promoted objects will reach here.
// The duplicated unlog bit check inside slow-path still remains correct.
mmtk_object_probable_write((MMTk_Mutator) &Thread::current()->third_party_heap_mutator, (void*) new_obj);
Expand All @@ -23,70 +21,21 @@ void MMTkObjectBarrierSetRuntime::object_probable_write(oop new_obj) const {

void MMTkObjectBarrierSetRuntime::object_reference_write_post(oop src, oop* slot, oop target) const {
if (mmtk_enable_barrier_fastpath) {
intptr_t addr = (intptr_t) (void*) src;
uint8_t* meta_addr = (uint8_t*) (SIDE_METADATA_BASE_ADDRESS + (addr >> 6));
intptr_t shift = (addr >> 3) & 0b111;
uint8_t byte_val = *meta_addr;
if (((byte_val >> shift) & 1) == 1) {
// MMTkObjectBarrierSetRuntime::object_reference_write_pre_slow()((void*) src);
if (is_unlog_bit_set(src)) {
object_reference_write_slow_call((void*) src, (void*) slot, (void*) target);
}
} else {
object_reference_write_post_call((void*) src, (void*) slot, (void*) target);
}
}

//////////////////// Assembler ////////////////////

#define __ masm->

void MMTkObjectBarrierSetAssembler::object_reference_write_post(MacroAssembler* masm, DecoratorSet decorators, Address dst, Register val, Register tmp1, Register tmp2, bool compensate_val_reg) const {
if (can_remove_barrier(decorators, val, /* skip_const_null */ true)) return;

bool is_not_null = (decorators & IS_NOT_NULL) != 0;

Label done;
Register obj = dst.base();
if (mmtk_enable_barrier_fastpath) {
Register tmp3 = rscratch1;
Register tmp4 = rscratch2;
assert_different_registers(obj, tmp2, tmp3);
assert_different_registers(tmp4, rcx);

// tmp2 = load-byte (SIDE_METADATA_BASE_ADDRESS + (obj >> 6));
__ movptr(tmp3, obj);
__ shrptr(tmp3, 6);
__ movptr(tmp2, SIDE_METADATA_BASE_ADDRESS);
__ movb(tmp2, Address(tmp2, tmp3));
// tmp3 = (obj >> 3) & 7
__ movptr(tmp3, obj);
__ shrptr(tmp3, 3);
__ andptr(tmp3, 7);
// tmp2 = tmp2 >> tmp3
__ movptr(tmp4, rcx);
__ movl(rcx, tmp3);
__ shrptr(tmp2);
__ movptr(rcx, tmp4);
// if ((tmp2 & 1) == 1) goto slowpath;
__ andptr(tmp2, 1);
__ cmpptr(tmp2, 1);
__ jcc(Assembler::notEqual, done);
}

__ movptr(c_rarg0, obj);
__ xorptr(c_rarg1, c_rarg1);
// Note: If `compensate_val_reg == true && UseCompressedOops === true`, the `val` register will be
// holding a compressed pointer to the target object. If the write barrier needs to know the
// target, we will need to decompress it before passing it to the barrier slow path. However,
// since we know the semantics of `mmtk::plan::barriers::ObjectBarrier`, i.e. it logs the object
// without looking at the `slot` or the `target` parameter at all, we simply pass nullptr to both
// parameters.
__ xorptr(c_rarg2, c_rarg2);

if (mmtk_enable_barrier_fastpath) {
__ call_VM_leaf_base(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_slow_call), 3);
__ bind(done);
} else {
__ call_VM_leaf_base(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_post_call), 3);
}
object_reference_write_pre_or_post(masm, decorators, dst, val, /* pre = */ false);
}

void MMTkObjectBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count) {
Expand Down Expand Up @@ -135,56 +84,7 @@ void MMTkObjectBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, Dec

#undef __

#define __ sasm->

void MMTkObjectBarrierSetAssembler::generate_c1_post_write_barrier_runtime_stub(StubAssembler* sasm) const {
__ prologue("mmtk_object_barrier", false);

Label done, runtime;

__ push(c_rarg0);
__ push(c_rarg1);
__ push(c_rarg2);
__ push(rax);

__ load_parameter(0, c_rarg0);
__ load_parameter(1, c_rarg1);
__ load_parameter(2, c_rarg2);

__ bind(runtime);

__ save_live_registers_no_oop_map(true);

if (mmtk_enable_barrier_fastpath) {
__ call_VM_leaf_base(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_slow_call), 3);
} else {
__ call_VM_leaf_base(FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_post_call), 3);
}

__ restore_live_registers(true);

__ bind(done);
__ pop(rax);
__ pop(c_rarg2);
__ pop(c_rarg1);
__ pop(c_rarg0);

__ epilogue();
}

#undef __
#define __ ce->masm()->

void MMTkObjectBarrierSetAssembler::generate_c1_post_write_barrier_stub(LIR_Assembler* ce, MMTkC1PostBarrierStub* stub) const {
MMTkBarrierSetC1* bs = (MMTkBarrierSetC1*) BarrierSet::barrier_set()->barrier_set_c1();
__ bind(*stub->entry());
ce->store_parameter(stub->src->as_pointer_register(), 0);
ce->store_parameter(stub->slot->as_pointer_register(), 1);
ce->store_parameter(stub->new_val->as_pointer_register(), 2);
__ call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
__ jmp(*stub->continuation());
}
#undef __
//////////////////// C1 ////////////////////

#ifdef ASSERT
#define __ gen->lir(__FILE__, __LINE__)->
Expand All @@ -193,100 +93,21 @@ void MMTkObjectBarrierSetAssembler::generate_c1_post_write_barrier_stub(LIR_Asse
#endif

void MMTkObjectBarrierSetC1::object_reference_write_post(LIRAccess& access, LIR_Opr src, LIR_Opr slot, LIR_Opr new_val) const {
LIRGenerator* gen = access.gen();
DecoratorSet decorators = access.decorators();
if ((decorators & IN_HEAP) == 0) return;
if (!src->is_register()) {
LIR_Opr reg = gen->new_pointer_register();
if (src->is_constant()) {
__ move(src, reg);
} else {
__ leal(src, reg);
}
src = reg;
}
assert(src->is_register(), "must be a register at this point");
if (!slot->is_register()) {
LIR_Opr reg = gen->new_pointer_register();
if (slot->is_constant()) {
__ move(slot, reg);
} else {
__ leal(slot, reg);
}
slot = reg;
}
assert(slot->is_register(), "must be a register at this point");
if (!new_val->is_register()) {
LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
if (new_val->is_constant()) {
__ move(new_val, new_val_reg);
} else {
__ leal(new_val, new_val_reg);
}
new_val = new_val_reg;
}
assert(new_val->is_register(), "must be a register at this point");
CodeStub* slow = new MMTkC1PostBarrierStub(src, slot, new_val);

if (mmtk_enable_barrier_fastpath) {
LIR_Opr addr = src;
// uint8_t* meta_addr = (uint8_t*) (SIDE_METADATA_BASE_ADDRESS + (addr >> 6));
LIR_Opr offset = gen->new_pointer_register();
__ move(addr, offset);
__ unsigned_shift_right(offset, 6, offset);
LIR_Opr base = gen->new_pointer_register();
__ move(LIR_OprFact::longConst(SIDE_METADATA_BASE_ADDRESS), base);
LIR_Address* meta_addr = new LIR_Address(base, offset, T_BYTE);
// uint8_t byte_val = *meta_addr;
LIR_Opr byte_val = gen->new_register(T_INT);
__ move(meta_addr, byte_val);
// intptr_t shift = (addr >> 3) & 0b111;
LIR_Opr shift = gen->new_register(T_INT);
__ move(addr, shift);
__ unsigned_shift_right(shift, 3, shift);
__ logical_and(shift, LIR_OprFact::intConst(0b111), shift);
// if (((byte_val >> shift) & 1) == 1) slow;
LIR_Opr result = byte_val;
__ unsigned_shift_right(result, shift, result, LIR_OprFact::illegalOpr);
__ logical_and(result, LIR_OprFact::intConst(1), result);
__ cmp(lir_cond_equal, result, LIR_OprFact::intConst(1));
__ branch(lir_cond_equal, T_BYTE, slow);
} else {
__ jump(slow);
}

__ branch_destination(slow->continuation());
object_reference_write_pre_or_post(access, src, /* pre = */ false);
}

#undef __

//////////////////// C2 ////////////////////

#define __ ideal.

void MMTkObjectBarrierSetC2::object_reference_write_post(GraphKit* kit, Node* src, Node* slot, Node* val) const {
if (can_remove_barrier(kit, &kit->gvn(), src, slot, val, /* skip_const_null */ true)) return;

MMTkIdealKit ideal(kit, true);

if (mmtk_enable_barrier_fastpath) {
Node* no_base = __ top();
float unlikely = PROB_UNLIKELY(0.999);

Node* zero = __ ConI(0);
Node* addr = __ CastPX(__ ctrl(), src);
Node* meta_addr = __ AddP(no_base, __ ConP(SIDE_METADATA_BASE_ADDRESS), __ URShiftX(addr, __ ConI(6)));
Node* byte = __ load(__ ctrl(), meta_addr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
Node* shift = __ URShiftX(addr, __ ConI(3));
shift = __ AndI(__ ConvL2I(shift), __ ConI(7));
Node* result = __ AndI(__ URShiftI(byte, shift), __ ConI(1));

__ if_then(result, BoolTest::ne, zero, unlikely); {
const TypeFunc* tf = __ func_type(TypeOopPtr::BOTTOM, TypeOopPtr::BOTTOM, TypeOopPtr::BOTTOM);
Node* x = __ make_leaf_call(tf, FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_slow_call), "mmtk_barrier_call", src, slot, val);
} __ end_if();
} else {
const TypeFunc* tf = __ func_type(TypeOopPtr::BOTTOM, TypeOopPtr::BOTTOM, TypeOopPtr::BOTTOM);
Node* x = __ make_leaf_call(tf, FN_ADDR(MMTkBarrierSetRuntime::object_reference_write_post_call), "mmtk_barrier_call", src, slot, val);
}
object_reference_write_pre_or_post(ideal, src, /* pre = */ false);

kit->final_sync(ideal); // Final sync IdealKit and GraphKit.
}
Expand Down
Loading
Loading