Skip to content
Permalink
Browse files
8253717: Relocate stack overflow code out of thread.hpp/cpp
Reviewed-by: rehn, dcubed, dholmes, stuefe
  • Loading branch information
coleenp committed Oct 8, 2020
1 parent 782d45b commit 6bc4931
Show file tree
Hide file tree
Showing 50 changed files with 773 additions and 661 deletions.
@@ -4392,7 +4392,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down to and including i=StackShadowPages.
for (int i = 0; i < (int)(JavaThread::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / os::vm_page_size()) - 1; i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
lea(tmp, Address(tmp, -os::vm_page_size()));
@@ -1524,7 +1524,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,

// Generate stack overflow check
if (UseStackBanging) {
__ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
__ bang_stack_with_offset(StackOverflow::stack_shadow_zone_size());
} else {
Unimplemented();
}
@@ -1893,7 +1893,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
Label reguard;
Label reguard_done;
__ ldrb(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
__ cmpw(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled);
__ cmpw(rscratch1, StackOverflow::stack_guard_yellow_reserved_disabled);
__ br(Assembler::EQ, reguard);
__ bind(reguard_done);

@@ -1120,7 +1120,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// an interpreter frame with greater than a page of locals, so each page
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int n_shadow_pages = JavaThread::stack_shadow_zone_size() / os::vm_page_size();
const int n_shadow_pages = StackOverflow::stack_shadow_zone_size() / os::vm_page_size();
const int start_page = native_call ? n_shadow_pages : 1;
const int page_size = os::vm_page_size();
for (int pages = start_page; pages <= n_shadow_pages ; pages++) {
@@ -1445,7 +1445,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
Label no_reguard;
__ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset())));
__ ldrw(rscratch1, Address(rscratch1));
__ cmp(rscratch1, (u1)JavaThread::stack_guard_yellow_reserved_disabled);
__ cmp(rscratch1, (u1)StackOverflow::stack_guard_yellow_reserved_disabled);
__ br(Assembler::NE, no_reguard);

__ pusha(); // XXX only save smashed registers
@@ -994,7 +994,7 @@ void MacroAssembler::arm_stack_overflow_check(int frame_size_in_bytes, Register
if (UseStackBanging) {
const int page_size = os::vm_page_size();

sub_slow(tmp, SP, JavaThread::stack_shadow_zone_size());
sub_slow(tmp, SP, StackOverflow::stack_shadow_zone_size());
strb(R0, Address(tmp));
for (; frame_size_in_bytes >= page_size; frame_size_in_bytes -= 0xff0) {
strb(R0, Address(tmp, -0xff0, pre_indexed));
@@ -1007,7 +1007,7 @@ void MacroAssembler::arm_stack_overflow_check(Register Rsize, Register tmp) {
Label loop;

mov(tmp, SP);
add_slow(Rsize, Rsize, JavaThread::stack_shadow_zone_size() - os::vm_page_size());
add_slow(Rsize, Rsize, StackOverflow::stack_shadow_zone_size() - os::vm_page_size());
bind(loop);
subs(Rsize, Rsize, 0xff0);
strb(R0, Address(tmp, -0xff0, pre_indexed));
@@ -1238,7 +1238,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ ldr_s32(R2, Address(Rthread, JavaThread::stack_guard_state_offset()));
__ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset()));

__ cmp(R2, JavaThread::stack_guard_yellow_reserved_disabled);
__ cmp(R2, StackOverflow::stack_guard_yellow_reserved_disabled);
__ b(reguard, eq);
__ bind(reguard_done);

@@ -485,10 +485,10 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(void) {
const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size;

// Pages reserved for VM runtime calls and subsequent Java calls.
const int reserved_pages = JavaThread::stack_shadow_zone_size();
const int reserved_pages = StackOverflow::stack_shadow_zone_size();

// Thread::stack_size() includes guard pages, and they should not be touched.
const int guard_pages = JavaThread::stack_guard_zone_size();
const int guard_pages = StackOverflow::stack_guard_zone_size();

__ ldr(R0, Address(Rthread, Thread::stack_base_offset()));
__ ldr(R1, Address(Rthread, Thread::stack_size_offset()));
@@ -1016,7 +1016,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// reguard stack if StackOverflow exception happened while in native.
{
__ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset()));
__ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled);
__ cmp_32(Rtemp, StackOverflow::stack_guard_yellow_reserved_disabled);
__ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq);
#if R9_IS_SCRATCHED
__ restore_method();
@@ -1427,7 +1427,7 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// insert the code of generate_stack_overflow_check(), see
// assembler.cpp for some illuminative comments.
const int page_size = os::vm_page_size();
int bang_end = JavaThread::stack_shadow_zone_size();
int bang_end = StackOverflow::stack_shadow_zone_size();

// This is how far the previous frame's stack banging extended.
const int bang_end_safe = bang_end;
@@ -2507,7 +2507,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,

Label no_reguard;
__ lwz(r_temp_1, thread_(stack_guard_state));
__ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_reserved_disabled);
__ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
__ bne(CCR0, no_reguard);

save_native_result(masm, ret_type, workspace_slot_offset);
@@ -1186,7 +1186,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int page_size = os::vm_page_size();
const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size;
const int start_page = native_call ? n_shadow_pages : 1;
BLOCK_COMMENT("bang_stack_shadow_pages:");
for (int pages = start_page; pages <= n_shadow_pages; pages++) {
@@ -2209,8 +2209,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,

Label no_reguard;

__ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(JavaThread::StackGuardState) - 1)),
JavaThread::stack_guard_yellow_reserved_disabled);
__ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)),
StackOverflow::stack_guard_yellow_reserved_disabled);

__ z_bre(no_reguard);

@@ -2067,7 +2067,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// needs to be checked. Only true for non-native. For native, we only bang the last page.
if (UseStackBanging) {
const int page_size = os::vm_page_size();
const int n_shadow_pages = (int)(JavaThread::stack_shadow_zone_size()/page_size);
const int n_shadow_pages = (int)(StackOverflow::stack_shadow_zone_size()/page_size);
const int start_page_num = native_call ? n_shadow_pages : 1;
for (int pages = start_page_num; pages <= n_shadow_pages; pages++) {
__ bang_stack_with_offset(pages*page_size);
@@ -1130,7 +1130,7 @@ void InterpreterMacroAssembler::remove_activation(

NOT_LP64(get_thread(rthread);)

cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_enabled);
cmpl(Address(rthread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_enabled);
jcc(Assembler::equal, no_reserved_zone_enabling);

cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset()));
@@ -1058,7 +1058,7 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
// was post-decremented.) Skip this address by starting at i=1, and
// touch a few more pages below. N.B. It is important to touch all
// the way down including all pages in the shadow zone.
for (int i = 1; i < ((int)JavaThread::stack_shadow_zone_size() / os::vm_page_size()); i++) {
for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / os::vm_page_size()); i++) {
// this could be any sized move but this is can be a debugging crumb
// so the bigger the better.
movptr(Address(tmp, (-i*os::vm_page_size())), size );
@@ -1868,7 +1868,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Generate stack overflow check

if (UseStackBanging) {
__ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
__ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
} else {
// need a 5 byte instruction to allow MT safe patching to non-entrant
__ fat_nop();
@@ -2279,7 +2279,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,

Label reguard;
Label reguard_done;
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
__ jcc(Assembler::equal, reguard);

// slow path reguard re-enters here
@@ -2174,7 +2174,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Generate stack overflow check

if (UseStackBanging) {
__ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
__ bang_stack_with_offset((int)StackOverflow::stack_shadow_zone_size());
} else {
// need a 5 byte instruction to allow MT safe patching to non-entrant
__ fat_nop();
@@ -2638,7 +2638,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,

Label reguard;
Label reguard_done;
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
__ jcc(Assembler::equal, reguard);
__ bind(reguard_done);

@@ -771,7 +771,7 @@ void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
// needs to be checked. Only true for non-native.
if (UseStackBanging) {
const int page_size = os::vm_page_size();
const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
const int n_shadow_pages = ((int)StackOverflow::stack_shadow_zone_size()) / page_size;
const int start_page = native_call ? n_shadow_pages : 1;
for (int pages = start_page; pages <= n_shadow_pages; pages++) {
__ bang_stack_with_offset(pages*page_size);
@@ -1180,7 +1180,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
{
Label no_reguard;
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
JavaThread::stack_guard_yellow_reserved_disabled);
StackOverflow::stack_guard_yellow_reserved_disabled);
__ jcc(Assembler::notEqual, no_reguard);

__ pusha(); // XXX only save smashed registers
@@ -35,7 +35,7 @@
// Inlined causes circular inclusion with thread.hpp
ZeroStack::ZeroStack()
: _base(NULL), _top(NULL), _sp(NULL) {
_shadow_pages_size = JavaThread::stack_shadow_zone_size();
_shadow_pages_size = StackOverflow::stack_shadow_zone_size();
}

int ZeroStack::suggest_size(Thread *thread) const {
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -49,7 +49,7 @@ inline void ZeroStack::overflow_check(int required_words, TRAPS) {
inline int ZeroStack::abi_stack_available(Thread *thread) const {
guarantee(Thread::current() == thread, "should run in the same thread");
int stack_used = thread->stack_base() - (address) &stack_used
+ (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
+ (StackOverflow::stack_guard_zone_size() + StackOverflow::stack_shadow_zone_size());
int stack_free = thread->stack_size() - stack_used;
return stack_free;
}
@@ -935,9 +935,10 @@ bool os::create_attached_thread(JavaThread* thread) {
// enabling yellow zone first will crash JVM on SuSE Linux), so there
// is no gap between the last two virtual memory regions.

address addr = thread->stack_reserved_zone_base();
StackOverflow* overflow_state = thread->stack_overflow_state();
address addr = overflow_state->stack_reserved_zone_base();
assert(addr != NULL, "initialization problem?");
assert(thread->stack_available(addr) > 0, "stack guard should not be enabled");
assert(overflow_state->stack_available(addr) > 0, "stack guard should not be enabled");

osthread->set_expanding_stack();
os::Linux::manually_expand_stack(thread, addr);
@@ -1931,9 +1932,10 @@ void * os::Linux::dll_load_in_vmthread(const char *filename, char *ebuf,

if (!_stack_is_executable) {
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized
jt->stack_guards_enabled()) { // No pending stack overflow exceptions
if (!os::guard_memory((char *)jt->stack_end(), jt->stack_guard_zone_size())) {
StackOverflow* overflow_state = jt->stack_overflow_state();
if (!overflow_state->stack_guard_zone_unused() && // Stack not yet fully initialized
overflow_state->stack_guards_enabled()) { // No pending stack overflow exceptions
if (!os::guard_memory((char *)jt->stack_end(), overflow_state->stack_guard_zone_size())) {
warning("Attempt to reguard stack yellow zone failed.");
}
}
@@ -5314,7 +5316,7 @@ bool os::start_debugging(char *buf, int buflen) {
// | |\
// | HotSpot Guard Pages | - red, yellow and reserved pages
// | |/
// +------------------------+ JavaThread::stack_reserved_zone_base()
// +------------------------+ StackOverflow::stack_reserved_zone_base()
// | |\
// | Normal Stack | -
// | |/
@@ -800,8 +800,8 @@ jint os::Posix::set_minimum_stack_sizes() {
size_t os_min_stack_allowed = PTHREAD_STACK_MIN;

_java_thread_min_stack_allowed = _java_thread_min_stack_allowed +
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size();
StackOverflow::stack_guard_zone_size() +
StackOverflow::stack_shadow_zone_size();

_java_thread_min_stack_allowed = align_up(_java_thread_min_stack_allowed, vm_page_size());
_java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, os_min_stack_allowed);
@@ -824,8 +824,8 @@ jint os::Posix::set_minimum_stack_sizes() {

// Reminder: a compiler thread is a Java thread.
_compiler_thread_min_stack_allowed = _compiler_thread_min_stack_allowed +
JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size();
StackOverflow::stack_guard_zone_size() +
StackOverflow::stack_shadow_zone_size();

_compiler_thread_min_stack_allowed = align_up(_compiler_thread_min_stack_allowed, vm_page_size());
_compiler_thread_min_stack_allowed = MAX2(_compiler_thread_min_stack_allowed, os_min_stack_allowed);
@@ -2473,7 +2473,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {

// Handle potential stack overflows up front.
if (exception_code == EXCEPTION_STACK_OVERFLOW) {
if (thread->stack_guards_enabled()) {
StackOverflow* overflow_state = thread->stack_overflow_state();
if (overflow_state->stack_guards_enabled()) {
if (in_java) {
frame fr;
if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
@@ -2485,14 +2486,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
// zone page for us. Note: must call disable_stack_yellow_zone to
// update the enabled status, even if the zone contains only one page.
assert(!in_vm, "Undersized StackShadowPages");
thread->disable_stack_yellow_reserved_zone();
overflow_state->disable_stack_yellow_reserved_zone();
// If not in java code, return and hope for the best.
return in_java
? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
: EXCEPTION_CONTINUE_EXECUTION;
} else {
// Fatal red zone violation.
thread->disable_stack_red_zone();
overflow_state->disable_stack_red_zone();
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
report_error(t, exception_code, pc, exception_record,
@@ -4091,8 +4092,8 @@ jint os::init_2(void) {
// Add in 4*BytesPerWord 4K pages to account for VM stack during
// class initialization depending on 32 or 64 bit VM.
size_t min_stack_allowed =
(size_t)(JavaThread::stack_guard_zone_size() +
JavaThread::stack_shadow_zone_size() +
(size_t)(StackOverflow::stack_guard_zone_size() +
StackOverflow::stack_shadow_zone_size() +
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);

min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@ inline void os::map_stack_shadow_pages(address sp) {
// the OS may not map an intervening page into our space
// and may fault on a memory access to interior of our frame.
const int page_size = os::win32::vm_page_size();
const size_t n_pages = JavaThread::stack_shadow_zone_size() / page_size;
const size_t n_pages = StackOverflow::stack_shadow_zone_size() / page_size;
for (size_t pages = 1; pages <= n_pages; pages++) {
sp -= page_size;
*sp = 0;

1 comment on commit 6bc4931

@bridgekeeper
Copy link

@bridgekeeper bridgekeeper bot commented on 6bc4931 Oct 8, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.