|
23 | 23 | */ |
24 | 24 |
|
25 | 25 | #include "precompiled.hpp" |
| 26 | +#include "code/nativeInst.hpp" |
| 27 | +#include "gc/shared/barrierSetAssembler.hpp" |
26 | 28 | #include "gc/shared/barrierSetNMethod.hpp" |
| 29 | +#include "logging/log.hpp" |
| 30 | +#include "memory/resourceArea.hpp" |
| 31 | +#include "runtime/frame.inline.hpp" |
| 32 | +#include "runtime/javaThread.hpp" |
| 33 | +#include "runtime/sharedRuntime.hpp" |
| 34 | +#include "runtime/registerMap.hpp" |
| 35 | +#include "utilities/align.hpp" |
27 | 36 | #include "utilities/debug.hpp" |
28 | 37 |
|
| 38 | +// The constant below reflects the size of the barrier |
| 39 | +// in barrierSetAssembler_arm.cpp |
| 40 | +static const int entry_barrier_bytes = 9 * NativeInstruction::size(); |
| 41 | + |
| 42 | +class NativeNMethodBarrier: public NativeInstruction { |
| 43 | + address instruction_address() const { return addr_at(0); } |
| 44 | + |
| 45 | + int *guard_addr() const { |
| 46 | + // Last instruction in a barrier |
| 47 | + return reinterpret_cast<int*>(instruction_address() + entry_barrier_bytes - wordSize); |
| 48 | + } |
| 49 | + |
| 50 | +public: |
| 51 | + int get_value() { |
| 52 | + return Atomic::load_acquire(guard_addr()); |
| 53 | + } |
| 54 | + |
| 55 | + void set_value(int value) { |
| 56 | + Atomic::release_store(guard_addr(), value); |
| 57 | + } |
| 58 | + |
| 59 | + void verify() const; |
| 60 | +}; |
| 61 | + |
| 62 | +// Check the first instruction of the nmethod entry barrier |
| 63 | +// to make sure that the offsets are not skewed. |
| 64 | +void NativeNMethodBarrier::verify() const { |
| 65 | + NativeInstruction *ni = (NativeInstruction *) instruction_address(); |
| 66 | + if (!ni->is_ldr()) { |
| 67 | + uint32_t *addr = (uint32_t *) ni; |
| 68 | + tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", (intptr_t) addr, (uint32_t) *addr); |
| 69 | + fatal("not an ldr instruction."); |
| 70 | + } |
| 71 | +} |
| 72 | + |
| 73 | +static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) { |
| 74 | + address barrier_address = nm->code_begin() + nm->frame_complete_offset() - entry_barrier_bytes; |
| 75 | + NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address); |
| 76 | + debug_only(barrier->verify()); |
| 77 | + return barrier; |
| 78 | +} |
| 79 | + |
| 80 | +/* We're called from an nmethod when we need to deoptimize it. We do |
| 81 | + this by throwing away the nmethod's frame and jumping to the |
| 82 | + ic_miss stub. This looks like there has been an IC miss at the |
| 83 | + entry of the nmethod, so we resolve the call, which will fall back |
| 84 | + to the interpreter if the nmethod has been unloaded. */ |
29 | 85 | void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { |
30 | | - ShouldNotReachHere(); |
| 86 | + |
| 87 | + typedef struct { |
| 88 | + intptr_t *sp; intptr_t *fp; address lr; address pc; |
| 89 | + } frame_pointers_t; |
| 90 | + |
| 91 | + frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5); |
| 92 | + |
| 93 | + JavaThread *thread = JavaThread::current(); |
| 94 | + RegisterMap reg_map(thread, |
| 95 | + RegisterMap::UpdateMap::skip, |
| 96 | + RegisterMap::ProcessFrames::include, |
| 97 | + RegisterMap::WalkContinuation::skip); |
| 98 | + frame frame = thread->last_frame(); |
| 99 | + |
| 100 | + assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be"); |
| 101 | + assert(frame.cb() == nm, "must be"); |
| 102 | + frame = frame.sender(®_map); |
| 103 | + |
| 104 | + LogTarget(Trace, nmethod, barrier) out; |
| 105 | + if (out.is_enabled()) { |
| 106 | + ResourceMark mark; |
| 107 | + log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p", |
| 108 | + nm->method()->name_and_sig_as_C_string(), |
| 109 | + nm, *(address *) return_address_ptr, nm->is_osr_method(), thread, |
| 110 | + thread->name(), frame.sp(), nm->verified_entry_point()); |
| 111 | + } |
| 112 | + |
| 113 | + new_frame->sp = frame.sp(); |
| 114 | + new_frame->fp = frame.fp(); |
| 115 | + new_frame->lr = frame.pc(); |
| 116 | + new_frame->pc = SharedRuntime::get_handle_wrong_method_stub(); |
31 | 117 | } |
32 | 118 |
|
33 | 119 | void BarrierSetNMethod::disarm(nmethod* nm) { |
34 | | - ShouldNotReachHere(); |
| 120 | + if (!supports_entry_barrier(nm)) { |
| 121 | + return; |
| 122 | + } |
| 123 | + |
| 124 | + // Disarms the nmethod guard emitted by BarrierSetAssembler::nmethod_entry_barrier. |
| 125 | + // Symmetric "LDR; DMB ISHLD" is in the nmethod barrier. |
| 126 | + NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); |
| 127 | + barrier->set_value(disarmed_value()); |
| 128 | +} |
| 129 | + |
| 130 | +void BarrierSetNMethod::arm(nmethod* nm, int arm_value) { |
| 131 | + if (!supports_entry_barrier(nm)) { |
| 132 | + return; |
| 133 | + } |
| 134 | + |
| 135 | + NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); |
| 136 | + barrier->set_value(arm_value); |
35 | 137 | } |
36 | 138 |
|
37 | 139 | bool BarrierSetNMethod::is_armed(nmethod* nm) { |
38 | | - ShouldNotReachHere(); |
39 | | - return false; |
| 140 | + if (!supports_entry_barrier(nm)) { |
| 141 | + return false; |
| 142 | + } |
| 143 | + |
| 144 | + NativeNMethodBarrier* barrier = native_nmethod_barrier(nm); |
| 145 | + return barrier->get_value() != disarmed_value(); |
40 | 146 | } |
0 commit comments