Skip to content

Commit

Permalink
8293007: riscv: failed to build after JDK-8290025
Browse files Browse the repository at this point in the history
Reviewed-by: fyang, fjiang, shade
  • Loading branch information
Yadong Wang authored and RealFYang committed Aug 30, 2022
1 parent 9424d6d commit e016363
Show file tree
Hide file tree
Showing 16 changed files with 256 additions and 73 deletions.
2 changes: 1 addition & 1 deletion src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {

// Insert nmethod entry barrier into frame.
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(this);
bs->nmethod_entry_barrier(this, NULL /* slow_path */, NULL /* continuation */, NULL /* guard */);
}

void C1_MacroAssembler::remove_frame(int framesize) {
Expand Down
30 changes: 30 additions & 0 deletions src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include "asm/assembler.inline.hpp"
#include "opto/c2_MacroAssembler.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/output.hpp"
#include "opto/subnode.hpp"
#include "runtime/stubRoutines.hpp"

Expand Down Expand Up @@ -241,6 +242,35 @@ void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1,

typedef void (MacroAssembler::* load_chr_insn)(Register rd, const Address &adr, Register temp);

void C2_MacroAssembler::emit_entry_barrier_stub(C2EntryBarrierStub* stub) {
// make guard value 4-byte aligned so that it can be accessed by atomic instructions on riscv
int alignment_bytes = align(4);

bind(stub->slow_path());

int32_t _offset = 0;
movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), _offset);
jalr(ra, t0, _offset);
j(stub->continuation());

bind(stub->guard());
relocate(entry_guard_Relocation::spec());
assert(offset() % 4 == 0, "bad alignment");
emit_int32(0); // nmethod guard value
// make sure the stub with a fixed code size
if (alignment_bytes == 2) {
assert(UseRVC, "bad alignment");
c_nop();
} else {
assert(alignment_bytes == 0, "bad alignment");
nop();
}
}

int C2_MacroAssembler::entry_barrier_stub_size() {
return 8 * 4 + 4; // 4 bytes for alignment margin
}

// Search for needle in haystack and return index or -1
// x10: result
// x11: haystack
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
VectorRegister vrs,
bool is_latin, Label& DONE);
public:
void emit_entry_barrier_stub(C2EntryBarrierStub* stub) {}
static int entry_barrier_stub_size() { return 0; }
void emit_entry_barrier_stub(C2EntryBarrierStub* stub);
static int entry_barrier_stub_size();

void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,
Expand Down
8 changes: 2 additions & 6 deletions src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,12 +157,8 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
__ j(done);

__ bind(runtime);
// save the live input values
RegSet saved = RegSet::of(pre_val);
if (tosca_live) { saved += RegSet::of(x10); }
if (obj != noreg) { saved += RegSet::of(obj); }

__ push_reg(saved, sp);
__ push_call_clobbered_registers();

if (expand_call) {
assert(pre_val != c_rarg1, "smashed arg");
Expand All @@ -171,7 +167,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
__ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
}

__ pop_reg(saved, sp);
__ pop_call_clobbered_registers();

__ bind(done);

Expand Down
104 changes: 85 additions & 19 deletions src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,38 +178,104 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
__ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
}

void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
static volatile uint32_t _patching_epoch = 0;

address BarrierSetAssembler::patching_epoch_addr() {
return (address)&_patching_epoch;
}

void BarrierSetAssembler::increment_patching_epoch() {
Atomic::inc(&_patching_epoch);
}

void BarrierSetAssembler::clear_patching_epoch() {
_patching_epoch = 0;
}

void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();

if (bs_nm == NULL) {
return;
}

// RISCV atomic operations require that the memory address be naturally aligned.
__ align(4);
Label local_guard;
NMethodPatchingType patching_type = nmethod_patching_type();

Label skip, guard;
Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
if (slow_path == NULL) {
guard = &local_guard;

__ lwu(t0, guard);
// RISCV atomic operations require that the memory address be naturally aligned.
__ align(4);
}

// Subsequent loads of oops must occur after load of guard value.
// BarrierSetNMethod::disarm sets guard with release semantics.
__ membar(MacroAssembler::LoadLoad);
__ lwu(t1, thread_disarmed_addr);
__ beq(t0, t1, skip);
__ lwu(t0, *guard);

switch (patching_type) {
case NMethodPatchingType::conc_data_patch:
// Subsequent loads of oops must occur after load of guard value.
// BarrierSetNMethod::disarm sets guard with release semantics.
__ membar(MacroAssembler::LoadLoad); // fall through to stw_instruction_and_data_patch
case NMethodPatchingType::stw_instruction_and_data_patch:
{
// With STW patching, no data or instructions are updated concurrently,
// which means there isn't really any need for any fencing for neither
// data nor instruction modification happening concurrently. The
// instruction patching is synchronized with global icache_flush() by
// the write hart on riscv. So here we can do a plain conditional
// branch with no fencing.
Address thread_disarmed_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
__ lwu(t1, thread_disarmed_addr);
break;
}
case NMethodPatchingType::conc_instruction_and_data_patch:
{
// If we patch code we need both a code patching and a loadload
// fence. It's not super cheap, so we use a global epoch mechanism
// to hide them in a slow path.
// The high level idea of the global epoch mechanism is to detect
// when any thread has performed the required fencing, after the
// last nmethod was disarmed. This implies that the required
// fencing has been performed for all preceding nmethod disarms
// as well. Therefore, we do not need any further fencing.
__ la(t1, ExternalAddress((address)&_patching_epoch));
// Embed an artificial data dependency to order the guard load
// before the epoch load.
__ srli(ra, t0, 32);
__ orr(t1, t1, ra);
// Read the global epoch value.
__ lwu(t1, t1);
// Combine the guard value (low order) with the epoch value (high order).
__ slli(t1, t1, 32);
__ orr(t0, t0, t1);
// Compare the global values with the thread-local values
Address thread_disarmed_and_epoch_addr(xthread, in_bytes(bs_nm->thread_disarmed_offset()));
__ ld(t1, thread_disarmed_and_epoch_addr);
break;
}
default:
ShouldNotReachHere();
}

int32_t offset = 0;
__ movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), offset);
__ jalr(ra, t0, offset);
__ j(skip);
if (slow_path == NULL) {
Label skip_barrier;
__ beq(t0, t1, skip_barrier);

__ bind(guard);
int32_t offset = 0;
__ movptr_with_offset(t0, StubRoutines::riscv::method_entry_barrier(), offset);
__ jalr(ra, t0, offset);
__ j(skip_barrier);

assert(__ offset() % 4 == 0, "bad alignment");
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
__ bind(local_guard);

__ bind(skip);
assert(__ offset() % 4 == 0, "bad alignment");
__ emit_int32(0); // nmethod guard value. Skipped over in common case.
__ bind(skip_barrier);
} else {
__ beq(t0, t1, *continuation);
__ j(*slow_path);
__ bind(*continuation);
}
}

void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
Expand Down
15 changes: 13 additions & 2 deletions src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,12 @@
#include "memory/allocation.hpp"
#include "oops/access.hpp"

enum class NMethodPatchingType {
stw_instruction_and_data_patch,
conc_instruction_and_data_patch,
conc_data_patch
};

class BarrierSetAssembler: public CHeapObj<mtGC> {
private:
void incr_allocated_bytes(MacroAssembler* masm,
Expand Down Expand Up @@ -63,9 +69,14 @@ class BarrierSetAssembler: public CHeapObj<mtGC> {

virtual void barrier_stubs_init() {}

virtual void nmethod_entry_barrier(MacroAssembler* masm);
virtual NMethodPatchingType nmethod_patching_type() { return NMethodPatchingType::stw_instruction_and_data_patch; }

virtual void nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard);
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual ~BarrierSetAssembler() {}

static address patching_epoch_addr();
static void clear_patching_epoch();
static void increment_patching_epoch();
};

#endif // CPU_RISCV_GC_SHARED_BARRIERSETASSEMBLER_RISCV_HPP

1 comment on commit e016363

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.