Skip to content

Commit f0374a0

Browse files
committed
8337987: Relocate jfr and throw_exception stubs from StubGenerator to SharedRuntime
Reviewed-by: fyang, kvn, yzheng
1 parent 15b20cb commit f0374a0

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+1392
-1545
lines changed

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -744,7 +744,7 @@ void MacroAssembler::reserved_stack_check() {
744744
// We have already removed our own frame.
745745
// throw_delayed_StackOverflowError will think that it's been
746746
// called by our caller.
747-
lea(rscratch1, RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry()));
747+
lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
748748
br(rscratch1);
749749
should_not_reach_here();
750750

src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
120120
__ ldr(rscratch1,Address(method, entry_offset));
121121
__ br(rscratch1);
122122
__ bind(L_no_such_method);
123-
__ far_jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
123+
__ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry()));
124124
}
125125

126126
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@@ -451,7 +451,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
451451
jump_from_method_handle(_masm, rmethod, temp1, for_compiler_entry);
452452
if (iid == vmIntrinsics::_linkToInterface) {
453453
__ bind(L_incompatible_class_change_error);
454-
__ far_jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
454+
__ far_jump(RuntimeAddress(SharedRuntime::throw_IncompatibleClassChangeError_entry()));
455455
}
456456
}
457457
}

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Lines changed: 196 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,12 @@
6666

6767
#define __ masm->
6868

69+
#ifdef PRODUCT
70+
#define BLOCK_COMMENT(str) /* nothing */
71+
#else
72+
#define BLOCK_COMMENT(str) __ block_comment(str)
73+
#endif
74+
6975
const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
7076

7177
// FIXME -- this is used by C1
@@ -2764,3 +2770,193 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const cha
27642770
// frame_size_words or bytes??
27652771
return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
27662772
}
2773+
2774+
// Continuation point for throwing of implicit exceptions that are
2775+
// not handled in the current activation. Fabricates an exception
2776+
// oop and initiates normal exception dispatching in this
2777+
// frame. Since we need to preserve callee-saved values (currently
2778+
// only for C2, but done for C1 as well) we need a callee-saved oop
2779+
// map and therefore have to make these stubs into RuntimeStubs
2780+
// rather than BufferBlobs. If the compiler needs all registers to
2781+
// be preserved between the fault point and the exception handler
2782+
// then it must assume responsibility for that in
2783+
// AbstractCompiler::continuation_for_implicit_null_exception or
2784+
// continuation_for_implicit_division_by_zero_exception. All other
2785+
// implicit exceptions (e.g., NullPointerException or
2786+
// AbstractMethodError on entry) are either at call sites or
2787+
// otherwise assume that stack unwinding will be initiated, so
2788+
// caller saved registers were assumed volatile in the compiler.
2789+
2790+
RuntimeStub* SharedRuntime::generate_throw_exception(const char* name, address runtime_entry) {
2791+
// Information about frame layout at time of blocking runtime call.
2792+
// Note that we only have to preserve callee-saved registers since
2793+
// the compilers are responsible for supplying a continuation point
2794+
// if they expect all registers to be preserved.
2795+
// n.b. aarch64 asserts that frame::arg_reg_save_area_bytes == 0
2796+
enum layout {
2797+
rfp_off = 0,
2798+
rfp_off2,
2799+
return_off,
2800+
return_off2,
2801+
framesize // inclusive of return address
2802+
};
2803+
2804+
int insts_size = 512;
2805+
int locs_size = 64;
2806+
2807+
ResourceMark rm;
2808+
const char* timer_msg = "SharedRuntime generate_throw_exception";
2809+
TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
2810+
2811+
CodeBuffer code(name, insts_size, locs_size);
2812+
OopMapSet* oop_maps = new OopMapSet();
2813+
MacroAssembler* masm = new MacroAssembler(&code);
2814+
2815+
address start = __ pc();
2816+
2817+
// This is an inlined and slightly modified version of call_VM
2818+
// which has the ability to fetch the return PC out of
2819+
// thread-local storage and also sets up last_Java_sp slightly
2820+
// differently than the real call_VM
2821+
2822+
__ enter(); // Save FP and LR before call
2823+
2824+
assert(is_even(framesize/2), "sp not 16-byte aligned");
2825+
2826+
// lr and fp are already in place
2827+
__ sub(sp, rfp, ((uint64_t)framesize-4) << LogBytesPerInt); // prolog
2828+
2829+
int frame_complete = __ pc() - start;
2830+
2831+
// Set up last_Java_sp and last_Java_fp
2832+
address the_pc = __ pc();
2833+
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2834+
2835+
__ mov(c_rarg0, rthread);
2836+
BLOCK_COMMENT("call runtime_entry");
2837+
__ mov(rscratch1, runtime_entry);
2838+
__ blr(rscratch1);
2839+
2840+
// Generate oop map
2841+
OopMap* map = new OopMap(framesize, 0);
2842+
2843+
oop_maps->add_gc_map(the_pc - start, map);
2844+
2845+
__ reset_last_Java_frame(true);
2846+
2847+
// Reinitialize the ptrue predicate register, in case the external runtime
2848+
// call clobbers ptrue reg, as we may return to SVE compiled code.
2849+
__ reinitialize_ptrue();
2850+
2851+
__ leave();
2852+
2853+
// check for pending exceptions
2854+
#ifdef ASSERT
2855+
Label L;
2856+
__ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
2857+
__ cbnz(rscratch1, L);
2858+
__ should_not_reach_here();
2859+
__ bind(L);
2860+
#endif // ASSERT
2861+
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2862+
2863+
// codeBlob framesize is in words (not VMRegImpl::slot_size)
2864+
RuntimeStub* stub =
2865+
RuntimeStub::new_runtime_stub(name,
2866+
&code,
2867+
frame_complete,
2868+
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
2869+
oop_maps, false);
2870+
return stub;
2871+
}
2872+
2873+
#if INCLUDE_JFR
2874+
2875+
static void jfr_prologue(address the_pc, MacroAssembler* masm, Register thread) {
2876+
__ set_last_Java_frame(sp, rfp, the_pc, rscratch1);
2877+
__ mov(c_rarg0, thread);
2878+
}
2879+
2880+
// The handle is dereferenced through a load barrier.
2881+
static void jfr_epilogue(MacroAssembler* masm) {
2882+
__ reset_last_Java_frame(true);
2883+
}
2884+
2885+
// For c2: c_rarg0 is junk, call to runtime to write a checkpoint.
2886+
// It returns a jobject handle to the event writer.
2887+
// The handle is dereferenced and the return value is the event writer oop.
2888+
RuntimeStub* SharedRuntime::generate_jfr_write_checkpoint() {
2889+
enum layout {
2890+
rbp_off,
2891+
rbpH_off,
2892+
return_off,
2893+
return_off2,
2894+
framesize // inclusive of return address
2895+
};
2896+
2897+
int insts_size = 1024;
2898+
int locs_size = 64;
2899+
CodeBuffer code("jfr_write_checkpoint", insts_size, locs_size);
2900+
OopMapSet* oop_maps = new OopMapSet();
2901+
MacroAssembler* masm = new MacroAssembler(&code);
2902+
2903+
address start = __ pc();
2904+
__ enter();
2905+
int frame_complete = __ pc() - start;
2906+
address the_pc = __ pc();
2907+
jfr_prologue(the_pc, masm, rthread);
2908+
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::write_checkpoint), 1);
2909+
jfr_epilogue(masm);
2910+
__ resolve_global_jobject(r0, rscratch1, rscratch2);
2911+
__ leave();
2912+
__ ret(lr);
2913+
2914+
OopMap* map = new OopMap(framesize, 1); // rfp
2915+
oop_maps->add_gc_map(the_pc - start, map);
2916+
2917+
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2918+
RuntimeStub::new_runtime_stub("jfr_write_checkpoint", &code, frame_complete,
2919+
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
2920+
oop_maps, false);
2921+
return stub;
2922+
}
2923+
2924+
// For c2: call to return a leased buffer.
2925+
RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
2926+
enum layout {
2927+
rbp_off,
2928+
rbpH_off,
2929+
return_off,
2930+
return_off2,
2931+
framesize // inclusive of return address
2932+
};
2933+
2934+
int insts_size = 1024;
2935+
int locs_size = 64;
2936+
2937+
CodeBuffer code("jfr_return_lease", insts_size, locs_size);
2938+
OopMapSet* oop_maps = new OopMapSet();
2939+
MacroAssembler* masm = new MacroAssembler(&code);
2940+
2941+
address start = __ pc();
2942+
__ enter();
2943+
int frame_complete = __ pc() - start;
2944+
address the_pc = __ pc();
2945+
jfr_prologue(the_pc, masm, rthread);
2946+
__ call_VM_leaf(CAST_FROM_FN_PTR(address, JfrIntrinsicSupport::return_lease), 1);
2947+
jfr_epilogue(masm);
2948+
2949+
__ leave();
2950+
__ ret(lr);
2951+
2952+
OopMap* map = new OopMap(framesize, 1); // rfp
2953+
oop_maps->add_gc_map(the_pc - start, map);
2954+
2955+
RuntimeStub* stub = // codeBlob framesize is in words (not VMRegImpl::slot_size)
2956+
RuntimeStub::new_runtime_stub("jfr_return_lease", &code, frame_complete,
2957+
(framesize >> (LogBytesPerWord - LogBytesPerInt)),
2958+
oop_maps, false);
2959+
return stub;
2960+
}
2961+
2962+
#endif // INCLUDE_JFR

0 commit comments

Comments
 (0)