Skip to content
This repository has been archived by the owner on Aug 27, 2022. It is now read-only.

Commit

Permalink
8260372: [PPC64] Add support for JDK-8210498 and JDK-8222841
Browse files Browse the repository at this point in the history
Reviewed-by: mdoerr, goetz
  • Loading branch information
Quaffel authored and TheRealMDoerr committed Feb 9, 2021
1 parent b38d5be commit 906faca
Show file tree
Hide file tree
Showing 10 changed files with 264 additions and 10 deletions.
3 changes: 3 additions & 0 deletions src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
Expand Up @@ -81,6 +81,9 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by

std(return_pc, _abi0(lr), R1_SP); // SP->lr = return_pc
push_frame(frame_size_in_bytes, R0); // SP -= frame_size_in_bytes

BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(this, R20);
}


Expand Down
70 changes: 70 additions & 0 deletions src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp
Expand Up @@ -23,12 +23,16 @@
*
*/

#include "nativeInst_ppc.hpp"
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "interpreter/interp_masm.hpp"
#include "oops/compressedOops.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"

#define __ masm->

Expand Down Expand Up @@ -125,3 +129,69 @@ void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Re
__ clrrdi(dst, obj, JNIHandles::weak_tag_size);
__ ld(dst, 0, dst); // Resolve (untagged) jobject.
}

void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Register tmp) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == nullptr) {
return;
}

assert_different_registers(tmp, R0);

// Load stub address using toc (fixed instruction size, unlike load_const_optimized)
__ calculate_address_from_global_toc(tmp, StubRoutines::ppc::nmethod_entry_barrier(),
true, true, false); // 2 instructions
__ mtctr(tmp);

// This is a compound instruction. Patching support is provided by NativeMovRegMem.
// Actual patching is done in (platform-specific part of) BarrierSetNMethod.
__ load_const32(tmp, 0 /* Value is patched */); // 2 instructions

__ lwz(R0, in_bytes(bs_nm->thread_disarmed_offset()), R16_thread);
__ cmpw(CCR0, R0, tmp);

__ bnectrl(CCR0);

// Oops may have been changed; exploiting isync semantics (used as acquire) to make those updates observable.
__ isync();
}

void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1, Register tmp2, Register tmp3) {
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm == nullptr) {
return;
}

assert_different_registers(tmp1, tmp2, tmp3);

Register tmp1_class_loader_data = tmp1;

Label bad_call, skip_barrier;

// Fast path: If no method is given, the call is definitely bad.
__ cmpdi(CCR0, R19_method, 0);
__ beq(CCR0, bad_call);

// Load class loader data to determine whether the method's holder is concurrently unloading.
__ load_method_holder(tmp1, R19_method);
__ ld(tmp1_class_loader_data, in_bytes(InstanceKlass::class_loader_data_offset()), tmp1);

// Fast path: If class loader is strong, the holder cannot be unloaded.
__ ld(tmp2, in_bytes(ClassLoaderData::keep_alive_offset()), tmp1_class_loader_data);
__ cmpdi(CCR0, tmp2, 0);
__ bne(CCR0, skip_barrier);

// Class loader is weak. Determine whether the holder is still alive.
__ ld(tmp2, in_bytes(ClassLoaderData::holder_offset()), tmp1_class_loader_data);
__ resolve_weak_handle(tmp2, tmp1, tmp3, MacroAssembler::PreservationLevel::PRESERVATION_FRAME_LR_GP_FP_REGS);
__ cmpdi(CCR0, tmp2, 0);
__ bne(CCR0, skip_barrier);

__ bind(bad_call);

__ calculate_address_from_global_toc(tmp1, SharedRuntime::get_handle_wrong_method_stub(), true, true, false);
__ mtctr(tmp1);
__ bctr();

__ bind(skip_barrier);
}
3 changes: 3 additions & 0 deletions src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp
Expand Up @@ -57,6 +57,9 @@ class BarrierSetAssembler: public CHeapObj<mtGC> {
Register obj, Register tmp, Label& slowpath);

virtual void barrier_stubs_init() {}

virtual void nmethod_entry_barrier(MacroAssembler* masm, Register tmp);
virtual void c2i_entry_barrier(MacroAssembler* masm, Register tmp1, Register tmp2, Register tmp3);
};

#endif // CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
99 changes: 95 additions & 4 deletions src/hotspot/cpu/ppc/gc/shared/barrierSetNMethod_ppc.cpp
Expand Up @@ -23,18 +23,109 @@
*/

#include "precompiled.hpp"
#include "code/codeBlob.hpp"
#include "code/nmethod.hpp"
#include "code/nativeInst.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "utilities/debug.hpp"

class NativeNMethodBarrier: public NativeInstruction {

address get_barrier_start_address() const {
return NativeInstruction::addr_at(0);
}

NativeMovRegMem* get_patchable_instruction_handle() const {
// Endianness is handled by NativeMovRegMem
return reinterpret_cast<NativeMovRegMem*>(get_barrier_start_address() + 3 * 4);
}

public:
int get_guard_value() const {
// Retrieve the guard value (naming of 'offset' function is misleading).
return get_patchable_instruction_handle()->offset();
}

void release_set_guard_value(int value) {
// Patching is not atomic.
// Stale observations of the "armed" state is okay as invoking the barrier stub in that case has no
// unwanted side effects. Disarming is thus a non-critical operation.
// The visibility of the "armed" state must be ensured by safepoint/handshake.

OrderAccess::release(); // Release modified oops

// Set the guard value (naming of 'offset' function is misleading).
get_patchable_instruction_handle()->set_offset(value);
}

void verify() const {
// Although it's possible to just validate the to-be-patched instruction,
// all instructions are validated to ensure that the barrier is hit properly - especially since
// the pattern used in load_const32 is a quite common one.

uint* current_instruction = reinterpret_cast<uint*>(get_barrier_start_address());

// calculate_address_from_global_toc (compound instruction)
verify_op_code_manually(current_instruction, MacroAssembler::is_addis(*current_instruction));
verify_op_code_manually(current_instruction, MacroAssembler::is_addi(*current_instruction));

verify_op_code_manually(current_instruction, MacroAssembler::is_mtctr(*current_instruction));

get_patchable_instruction_handle()->verify();
current_instruction += 2;

verify_op_code(current_instruction, Assembler::LWZ_OPCODE);

// cmpw (mnemonic)
verify_op_code(current_instruction, Assembler::CMP_OPCODE);

// bnectrl (mnemonic) (weak check; not checking the exact type)
verify_op_code(current_instruction, Assembler::BCCTR_OPCODE);

verify_op_code(current_instruction, Assembler::ISYNC_OPCODE);
}

private:
static void verify_op_code_manually(uint*& current_instruction, bool result) {
assert(result, "illegal instruction sequence for nmethod entry barrier");
current_instruction++;
}
static void verify_op_code(uint*& current_instruction, uint expected,
unsigned int mask = 63u << Assembler::OPCODE_SHIFT) {
// Masking both, current instruction and opcode, as some opcodes in Assembler contain additional information
// to uniquely identify simplified mnemonics.
// As long as the caller doesn't provide a custom mask, that additional information is discarded.
verify_op_code_manually(current_instruction, (*current_instruction & mask) == (expected & mask));
}
};

static NativeNMethodBarrier* get_nmethod_barrier(nmethod* nm) {
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + (-9 * 4);

auto barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
return barrier;
}

void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
ShouldNotReachHere();
// As PPC64 always has a valid back chain (unlike other platforms), the stub can simply pop the frame.
// Thus, there's nothing to do here.
}

void BarrierSetNMethod::disarm(nmethod* nm) {
ShouldNotReachHere();
if (!supports_entry_barrier(nm)) {
return;
}

NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
barrier->release_set_guard_value(disarmed_value());
}

bool BarrierSetNMethod::is_armed(nmethod* nm) {
ShouldNotReachHere();
return false;
if (!supports_entry_barrier(nm)) {
return false;
}

NativeNMethodBarrier* barrier = get_nmethod_barrier(nm);
return barrier->get_guard_value() != disarmed_value();
}
2 changes: 1 addition & 1 deletion src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
Expand Up @@ -3379,7 +3379,7 @@ int MacroAssembler::crc32_table_columns(Register table, Register tc0, Register t
assert(!VM_Version::has_vpmsumb(), "Vector version should be used instead!");

// Point to 4 byte folding tables (byte-reversed version for Big Endian)
// Layout: See StubRoutines::generate_crc_constants.
// Layout: See StubRoutines::ppc::generate_crc_constants.
#ifdef VM_LITTLE_ENDIAN
const int ix0 = 3 * CRC32_TABLE_SIZE;
const int ix1 = 2 * CRC32_TABLE_SIZE;
Expand Down
9 changes: 9 additions & 0 deletions src/hotspot/cpu/ppc/ppc.ad
Expand Up @@ -1376,6 +1376,10 @@ void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print("save return pc\n\t");
st->print("push frame %ld\n\t", -framesize);
}

if (C->stub_function() == NULL) {
st->print("nmethod entry barrier\n\t");
}
}
#endif

Expand Down Expand Up @@ -1529,6 +1533,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
__ std(return_pc, _abi0(lr), callers_sp);
}

if (C->stub_function() == NULL) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(&_masm, push_frame_temp);
}

C->output()->set_frame_complete(cbuf.insts_size());
}

Expand Down
10 changes: 9 additions & 1 deletion src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
Expand Up @@ -1301,9 +1301,13 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
c2i_no_clinit_check_entry = __ pc();
}

BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);

gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);

return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
c2i_no_clinit_check_entry);
}

// An oop arg. Must pass a handle not the oop itself.
Expand Down Expand Up @@ -1973,6 +1977,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
__ mr(r_callers_sp, R1_SP); // Remember frame pointer.
__ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.

BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
bs->nmethod_entry_barrier(masm, r_temp_1);

frame_done_pc = (intptr_t)__ pc();

__ verify_thread();
Expand Down
59 changes: 57 additions & 2 deletions src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
Expand Up @@ -27,6 +27,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/instanceOop.hpp"
Expand Down Expand Up @@ -3548,6 +3549,54 @@ class StubGenerator: public StubCodeGenerator {
return start;
}

address generate_nmethod_entry_barrier() {
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");

address stub_address = __ pc();

int nbytes_save = MacroAssembler::num_volatile_regs * BytesPerWord;
__ save_volatile_gprs(R1_SP, -nbytes_save, true);

// Link register points to instruction in prologue of the guarded nmethod.
// As the stub requires one layer of indirection (argument is of type address* and not address),
// passing the link register's value directly doesn't work.
// Since we have to save the link register on the stack anyway, we calculate the corresponding stack address
// and pass that one instead.
__ add(R3_ARG1, _abi0(lr), R1_SP);

__ save_LR_CR(R0);
__ push_frame_reg_args(nbytes_save, R0);

__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetNMethod::nmethod_stub_entry_barrier));
__ mr(R0, R3_RET);

__ pop_frame();
__ restore_LR_CR(R3_RET /* used as tmp register */);
__ restore_volatile_gprs(R1_SP, -nbytes_save, true);

__ cmpdi(CCR0, R0, 0);

// Return to prologue if no deoptimization is required (bnelr)
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintIsTaken);

// Deoptimization required.
// For actually handling the deoptimization, the 'wrong method stub' is invoked.
__ load_const_optimized(R0, SharedRuntime::get_handle_wrong_method_stub());
__ mtctr(R0);

// Pop the frame built in the prologue.
__ pop_frame();

// Restore link register. Required as the 'wrong method stub' needs the caller's frame
// to properly deoptimize this method (e.g. by re-resolving the call site for compiled methods).
// This method's prologue is aborted.
__ restore_LR_CR(R0);

__ bctr();
return stub_address;
}

#ifdef VM_LITTLE_ENDIAN
// The following Base64 decode intrinsic is based on an algorithm outlined
// in here:
Expand Down Expand Up @@ -4462,13 +4511,13 @@ class StubGenerator: public StubCodeGenerator {

// CRC32 Intrinsics.
if (UseCRC32Intrinsics) {
StubRoutines::_crc_table_adr = StubRoutines::generate_crc_constants(REVERSE_CRC32_POLY);
StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY);
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(false);
}

// CRC32C Intrinsics.
if (UseCRC32CIntrinsics) {
StubRoutines::_crc32c_table_addr = StubRoutines::generate_crc_constants(REVERSE_CRC32C_POLY);
StubRoutines::_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY);
StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(true);
}

Expand All @@ -4494,6 +4543,12 @@ class StubGenerator: public StubCodeGenerator {
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();

// nmethod entry barriers for concurrent class unloading
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) {
StubRoutines::ppc::_nmethod_entry_barrier = generate_nmethod_entry_barrier();
}

// arraycopy stubs used by compilers
generate_arraycopy_stubs();

Expand Down
12 changes: 11 additions & 1 deletion src/hotspot/cpu/ppc/stubRoutines_ppc.hpp
Expand Up @@ -46,6 +46,16 @@ enum platform_dependent_constants {
#define CRC32_UNROLL_FACTOR 2048
#define CRC32_UNROLL_FACTOR2 8

static address generate_crc_constants(juint reverse_poly);
class ppc {
friend class StubGenerator;

private:
static address _nmethod_entry_barrier;

public:
static address nmethod_entry_barrier();

static address generate_crc_constants(juint reverse_poly);
};

#endif // CPU_PPC_STUBROUTINES_PPC_HPP

0 comments on commit 906faca

Please sign in to comment.