diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp index 8a89fb56a8389..8318393bcb82a 100644 --- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp @@ -38,6 +38,30 @@ #define __ _masm-> +//describe amount of space in bytes occupied by type on native stack +#ifdef __APPLE__ + const int nativeByteSpace = sizeof(jbyte); + const int nativeShortSpace = sizeof(jshort); + const int nativeIntSpace = sizeof(jint); + const int nativeLongSpace = wordSize; + const int nativeFloatSpace = nativeIntSpace; + const int nativeDoubleSpace = nativeLongSpace; +#else + const int nativeByteSpace = wordSize; + const int nativeShortSpace = wordSize; + const int nativeIntSpace = wordSize; + const int nativeLongSpace = wordSize; + const int nativeFloatSpace = nativeIntSpace; + const int nativeDoubleSpace = nativeLongSpace; +#endif + +template +static inline void store_and_inc(char* &to, T value, int inc_size) { + to = align_up(to, inc_size); + *(T *)to = value; + to = to + inc_size; +} + // Implementation of SignatureHandlerGenerator Register InterpreterRuntime::SignatureHandlerGenerator::from() { return rlocals; } Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; } @@ -51,6 +75,95 @@ InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator( _stack_offset = 0; } +// On macos/aarch64 native stack is packed, int/float are using only 4 bytes +// on stack. Natural alignment for types are still in place, +// for example double/long should be 8 bytes alligned + +void InterpreterRuntime::SignatureHandlerGenerator::pass_byte() { + const Address src(from(), Interpreter::local_offset_in_bytes(offset())); + + switch (_num_int_args) { + case 0: + __ ldr(c_rarg1, src); + _num_int_args++; + break; + case 1: + __ ldr(c_rarg2, src); + _num_int_args++; + break; + case 2: + __ ldr(c_rarg3, src); + _num_int_args++; + break; + case 3: + __ ldr(c_rarg4, src); + _num_int_args++; + break; + case 4: + __ ldr(c_rarg5, src); + _num_int_args++; + break; + case 5: + __ ldr(c_rarg6, src); + _num_int_args++; + break; + case 6: + __ ldr(c_rarg7, src); + _num_int_args++; + break; + default: + __ ldrb(r0, src); + __ strb(r0, Address(to(), _stack_offset)); + _stack_offset += nativeByteSpace; + + _num_int_args++; + break; + } +} + +void InterpreterRuntime::SignatureHandlerGenerator::pass_short() { + const Address src(from(), Interpreter::local_offset_in_bytes(offset())); + + switch (_num_int_args) { + case 0: + __ ldr(c_rarg1, src); + _num_int_args++; + break; + case 1: + __ ldr(c_rarg2, src); + _num_int_args++; + break; + case 2: + __ ldr(c_rarg3, src); + _num_int_args++; + break; + case 3: + __ ldr(c_rarg4, src); + _num_int_args++; + break; + case 4: + __ ldr(c_rarg5, src); + _num_int_args++; + break; + case 5: + __ ldr(c_rarg6, src); + _num_int_args++; + break; + case 6: + __ ldr(c_rarg7, src); + _num_int_args++; + break; + default: + _stack_offset = align_up(_stack_offset, nativeShortSpace); + __ ldrh(r0, src); + __ strh(r0, Address(to(), _stack_offset)); + _stack_offset += nativeShortSpace; + + _num_int_args++; + break; + } +} + void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { const Address src(from(), Interpreter::local_offset_in_bytes(offset())); @@ -84,9 +197,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_int() { _num_int_args++; break; default: + _stack_offset = align_up(_stack_offset, nativeIntSpace); __ ldr(r0, src); __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; + _stack_offset += nativeIntSpace; _num_int_args++; break; } @@ -125,9 +239,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { _num_int_args++; break; default: + _stack_offset = align_up(_stack_offset, nativeLongSpace); __ ldr(r0, src); __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; + _stack_offset += nativeLongSpace; _num_int_args++; break; } @@ -139,9 +254,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { if (_num_fp_args < Argument::n_float_register_parameters_c) { __ ldrs(as_FloatRegister(_num_fp_args++), src); } else { + _stack_offset = align_up(_stack_offset, nativeFloatSpace); __ ldrw(r0, src); __ strw(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; + _stack_offset += nativeFloatSpace; _num_fp_args++; } } @@ -152,9 +268,10 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_double() { if (_num_fp_args < Argument::n_float_register_parameters_c) { __ ldrd(as_FloatRegister(_num_fp_args++), src); } else { + _stack_offset = align_up(_stack_offset, nativeDoubleSpace); __ ldr(r0, src); __ str(r0, Address(to(), _stack_offset)); - _stack_offset += wordSize; + _stack_offset += nativeDoubleSpace; _num_fp_args++; } } @@ -247,6 +364,7 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { __ cbnz(temp(), L); __ mov(r0, zr); __ bind(L); + _stack_offset = align_up(_stack_offset, wordSize); __ str(r0, Address(to(), _stack_offset)); _stack_offset += wordSize; _num_int_args++; @@ -276,13 +394,45 @@ class SlowSignatureHandler : public NativeSignatureIterator { private: address _from; - intptr_t* _to; + char* _to; intptr_t* _int_args; intptr_t* _fp_args; intptr_t* _fp_identifiers; unsigned int _num_int_args; unsigned int _num_fp_args; + + virtual void pass_byte() + { + NOT_MACOS(return pass_int();) + jbyte from_obj = *(jbyte *)(_from+Interpreter::local_offset_in_bytes(0)); + _from -= Interpreter::stackElementSize; + + if (_num_int_args < Argument::n_int_register_parameters_c-1) { + *_int_args++ = from_obj; + _num_int_args++; + } else { + store_and_inc(_to, from_obj, nativeByteSpace); + + _num_int_args++; + } + } + + virtual void pass_short() + { + NOT_MACOS(return pass_int();) + jshort from_obj = *(jshort *)(_from+Interpreter::local_offset_in_bytes(0)); + _from -= Interpreter::stackElementSize; + + if (_num_int_args < Argument::n_int_register_parameters_c-1) { + *_int_args++ = from_obj; + _num_int_args++; + } else { + store_and_inc(_to, from_obj, nativeShortSpace); + + _num_int_args++; + } + } virtual void pass_int() { jint from_obj = *(jint *)(_from+Interpreter::local_offset_in_bytes(0)); @@ -292,7 +442,8 @@ class SlowSignatureHandler *_int_args++ = from_obj; _num_int_args++; } else { - *_to++ = from_obj; + store_and_inc(_to, from_obj, nativeIntSpace); + _num_int_args++; } } @@ -306,7 +457,7 @@ class SlowSignatureHandler *_int_args++ = from_obj; _num_int_args++; } else { - *_to++ = from_obj; + store_and_inc(_to, from_obj, nativeLongSpace); _num_int_args++; } } @@ -320,7 +471,7 @@ class SlowSignatureHandler *_int_args++ = (*from_addr == 0) ? NULL : (intptr_t)from_addr; _num_int_args++; } else { - *_to++ = (*from_addr == 0) ? NULL : (intptr_t) from_addr; + store_and_inc(_to, (*from_addr == 0) ? (intptr_t)NULL : (intptr_t) from_addr, wordSize); _num_int_args++; } } @@ -334,7 +485,8 @@ class SlowSignatureHandler *_fp_args++ = from_obj; _num_fp_args++; } else { - *_to++ = from_obj; + store_and_inc(_to, from_obj, nativeFloatSpace); + _num_fp_args++; } } @@ -349,7 +501,7 @@ class SlowSignatureHandler *_fp_identifiers |= (1ull << _num_fp_args); // mark as double _num_fp_args++; } else { - *_to++ = from_obj; + store_and_inc(_to, from_obj, nativeDoubleSpace); _num_fp_args++; } } @@ -359,7 +511,7 @@ class SlowSignatureHandler : NativeSignatureIterator(method) { _from = from; - _to = to; + _to = (char *)to; _int_args = to - (method->is_static() ? 16 : 17); _fp_args = to - 8; diff --git a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp index ee7c2d1bf7677..2190cf269f69f 100644 --- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp @@ -38,6 +38,8 @@ class SignatureHandlerGenerator: public NativeSignatureIterator { unsigned int _num_int_args; int _stack_offset; + void pass_byte(); + void pass_short(); void pass_int(); void pass_long(); void pass_float(); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 7eae56955694c..61bbae2381dcc 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -5243,7 +5243,7 @@ void MacroAssembler::char_array_compress(Register src, Register dst, Register le // aarch64_get_thread_helper() clobbers only r0, r1, and flags. // void MacroAssembler::get_thread(Register dst) { - RegSet saved_regs = RegSet::range(r0, r1) + lr - dst; + RegSet saved_regs = RegSet::range(r0, r1) + BSD_ONLY(RegSet::range(r2, r17)) + lr - dst; push(saved_regs, sp); mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index cac3e00ff9d12..6b0f11d69bb13 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -801,6 +801,11 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, if (int_args < Argument::n_int_register_parameters_c) { regs[i].set1(INT_ArgReg[int_args++]->as_VMReg()); } else { +#ifdef __APPLE__ + // Less-than word types are stored one after another. + // The code unable to handle this, bailout. + return -1; +#endif regs[i].set1(VMRegImpl::stack2reg(stk_args)); stk_args += 2; } @@ -823,6 +828,11 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, if (fp_args < Argument::n_float_register_parameters_c) { regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg()); } else { +#ifdef __APPLE__ + // Less-than word types are stored one after another. + // The code unable to handle this, bailout. + return -1; +#endif regs[i].set1(VMRegImpl::stack2reg(stk_args)); stk_args += 2; } @@ -1384,6 +1394,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, int out_arg_slots; out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); + if (out_arg_slots < 0) { + return NULL; + } + // Compute framesize for the wrapper. We need to handlize all oops in // incoming registers diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp index 552c345563a51..33cacba2f48b5 100644 --- a/src/hotspot/os/aix/os_aix.cpp +++ b/src/hotspot/os/aix/os_aix.cpp @@ -1976,7 +1976,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, pd_commit_memory_or_exit(addr, size, exec, mesg); } -bool os::pd_uncommit_memory(char* addr, size_t size) { +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { assert(is_aligned_to(addr, os::vm_page_size()), "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")", p2i(addr), os::vm_page_size()); @@ -2053,7 +2053,7 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info } // Reserves and attaches a shared memory segment. -char* os::pd_reserve_memory(size_t bytes) { +char* os::pd_reserve_memory(size_t bytes, bool executable) { // Always round to os::vm_page_size(), which may be larger than 4K. bytes = align_up(bytes, os::vm_page_size()); diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp index 6819675573204..ebe3539024226 100644 --- a/src/hotspot/os/bsd/os_bsd.cpp +++ b/src/hotspot/os/bsd/os_bsd.cpp @@ -209,6 +209,8 @@ static char cpu_arch[] = "i386"; static char cpu_arch[] = "amd64"; #elif defined(ARM) static char cpu_arch[] = "arm"; +#elif defined(AARCH64) +static char cpu_arch[] = "aarch64"; #elif defined(PPC32) static char cpu_arch[] = "ppc"; #else @@ -1692,12 +1694,16 @@ static void warn_fail_commit_memory(char* addr, size_t size, bool exec, // problem. bool os::pd_commit_memory(char* addr, size_t size, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; -#ifdef __OpenBSD__ +#if defined(__OpenBSD__) // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with protection modes %x", p2i(addr), p2i(addr+size), prot); if (::mprotect(addr, size, prot) == 0) { return true; } +#elif defined(__APPLE__) + if (::mprotect(addr, size, prot) == 0) { + return true; + } #else uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); @@ -1780,11 +1786,22 @@ char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info } -bool os::pd_uncommit_memory(char* addr, size_t size) { -#ifdef __OpenBSD__ +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { +#if defined(__OpenBSD__) // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD Events::log(NULL, "Protecting memory [" INTPTR_FORMAT "," INTPTR_FORMAT "] with PROT_NONE", p2i(addr), p2i(addr+size)); return ::mprotect(addr, size, PROT_NONE) == 0; +#elif defined(__APPLE__) + if (exec) { + if (::madvise(addr, size, MADV_FREE) != 0) { + return false; + } + return ::mprotect(addr, size, PROT_NONE) == 0; + } else { + uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, + MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); + return res != (uintptr_t) MAP_FAILED; + } #else uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); @@ -1799,15 +1816,21 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) { // If this is a growable mapping, remove the guard pages entirely by // munmap()ping them. If not, just call uncommit_memory(). bool os::remove_stack_guard_pages(char* addr, size_t size) { - return os::uncommit_memory(addr, size); + return os::uncommit_memory(addr, size, !ExecMem); } // 'requested_addr' is only treated as a hint, the return value may or // may not start from the requested address. Unlike Bsd mmap(), this // function returns NULL to indicate failure. -static char* anon_mmap(char* requested_addr, size_t bytes) { - // MAP_FIXED is intentionally left out, to leave existing mappings intact. - const int flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; +static char* anon_mmap(char* requested_addr, size_t bytes, bool executable) { + int flags; + + flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; +#ifdef __APPLE__ + if (executable) { + flags |= MAP_JIT; + } +#endif // Map reserved/uncommitted pages PROT_NONE so we fail early if we // touch an uncommitted page. Otherwise, the read/write might @@ -1821,8 +1844,8 @@ static int anon_munmap(char * addr, size_t size) { return ::munmap(addr, size) == 0; } -char* os::pd_reserve_memory(size_t bytes) { - return anon_mmap(NULL /* addr */, bytes); +char* os::pd_reserve_memory(size_t bytes, bool executable) { + return anon_mmap(NULL /* addr */, bytes, executable); } bool os::pd_release_memory(char* addr, size_t size) { @@ -1932,7 +1955,7 @@ char* os::pd_attempt_reserve_memory_at(char* requested_addr, size_t bytes) { // Bsd mmap allows caller to pass an address as hint; give it a try first, // if kernel honors the hint then we can return immediately. - char * addr = anon_mmap(requested_addr, bytes); + char * addr = anon_mmap(requested_addr, bytes, false/*executable*/); if (addr == requested_addr) { return requested_addr; } @@ -2229,7 +2252,7 @@ int os::active_processor_count() { return _processor_count; } -#ifdef __APPLE__ +#if defined(__APPLE__) && defined(__x86_64__) uint os::processor_id() { // Get the initial APIC id and return the associated processor id. The initial APIC // id is limited to 8-bits, which means we can have at most 256 unique APIC ids. If diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index cf20034772fc3..3f5a02fc34168 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -3239,7 +3239,7 @@ struct bitmask* os::Linux::_numa_nodes_ptr; struct bitmask* os::Linux::_numa_interleave_bitmask; struct bitmask* os::Linux::_numa_membind_bitmask; -bool os::pd_uncommit_memory(char* addr, size_t size) { +bool os::pd_uncommit_memory(char* addr, size_t size, bool exec) { uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); return res != (uintptr_t) MAP_FAILED; @@ -3424,7 +3424,7 @@ bool os::remove_stack_guard_pages(char* addr, size_t size) { return ::munmap(addr, size) == 0; } - return os::uncommit_memory(addr, size); + return os::uncommit_memory(addr, size, !ExecMem); } // 'requested_addr' is only treated as a hint, the return value may or @@ -3483,7 +3483,7 @@ static int anon_munmap(char * addr, size_t size) { return ::munmap(addr, size) == 0; } -char* os::pd_reserve_memory(size_t bytes) { +char* os::pd_reserve_memory(size_t bytes, bool executable) { return anon_mmap(NULL, bytes); } diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index 0416605e30903..2131c025e0a36 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -3168,7 +3168,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { return aligned_base; } -char* os::pd_reserve_memory(size_t bytes) { +char* os::pd_reserve_memory(size_t bytes, bool executable) { return pd_attempt_reserve_memory_at(NULL /* addr */, bytes); } @@ -3369,7 +3369,7 @@ void os::pd_commit_memory_or_exit(char* addr, size_t size, pd_commit_memory_or_exit(addr, size, exec, mesg); } -bool os::pd_uncommit_memory(char* addr, size_t bytes) { +bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) { if (bytes == 0) { // Don't bother the OS with noops. return true; @@ -3388,7 +3388,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) { } bool os::remove_stack_guard_pages(char* addr, size_t size) { - return os::uncommit_memory(addr, size); + return os::uncommit_memory(addr, size, !ExecMem); } static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.hpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.hpp index a3d89699135f7..e8b16b4ad5985 100644 --- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.hpp +++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.hpp @@ -36,4 +36,10 @@ static bool platform_print_native_stack(outputStream* st, void* context, char *buf, int buf_size); +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_AIX_PPC_OS_AIX_PPC_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp new file mode 100644 index 0000000000000..a64d9f3f36f11 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/atomic_bsd_aarch64.hpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP + +// Implementation of class atomic +// Note that memory_order_conservative requires a full barrier after atomic stores. +// See https://patchwork.kernel.org/patch/3575821/ + +template +struct Atomic::PlatformAdd { + template + D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { + D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); + FULL_MEM_BARRIER; + return res; + } + + template + D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const { + return add_and_fetch(dest, add_value, order) - add_value; + } +}; + +template +template +inline T Atomic::PlatformXchg::operator()(T volatile* dest, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(byte_size == sizeof(T)); + T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); + FULL_MEM_BARRIER; + return res; +} + +// __attribute__((unused)) on dest is to get rid of spurious GCC warnings. +template +template +inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest __attribute__((unused)), + T compare_value, + T exchange_value, + atomic_memory_order order) const { + STATIC_ASSERT(byte_size == sizeof(T)); + if (order == memory_order_relaxed) { + T value = compare_value; + __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + return value; + } else { + T value = compare_value; + FULL_MEM_BARRIER; + __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + FULL_MEM_BARRIER; + return value; + } +} + +template +struct Atomic::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { T data; __atomic_load(const_cast(p), &data, __ATOMIC_ACQUIRE); return data; } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(volatile T* p, T v) const { __atomic_store(const_cast(p), &v, __ATOMIC_RELEASE); } +}; + +template +struct Atomic::PlatformOrderedStore +{ + template + void operator()(volatile T* p, T v) const { release_store(p, v); OrderAccess::fence(); } +}; + + +#endif // OS_CPU_BSD_AARCH64_ATOMIC_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/bytes_bsd_aarch64.inline.hpp b/src/hotspot/os_cpu/bsd_aarch64/bytes_bsd_aarch64.inline.hpp new file mode 100644 index 0000000000000..1c46c14f092c6 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/bytes_bsd_aarch64.inline.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP +#define OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP + +#ifdef __APPLE__ +#include +#endif + +#if defined(__APPLE__) +# define bswap_16(x) OSSwapInt16(x) +# define bswap_32(x) OSSwapInt32(x) +# define bswap_64(x) OSSwapInt64(x) +#else +# error "Unimplemented" +#endif + +// Efficient swapping of data bytes from Java byte +// ordering to native byte ordering and vice versa. +inline u2 Bytes::swap_u2(u2 x) { + return bswap_16(x); +} + +inline u4 Bytes::swap_u4(u4 x) { + return bswap_32(x); +} + +inline u8 Bytes::swap_u8(u8 x) { + return bswap_64(x); +} + +#endif // OS_CPU_BSD_AARCH64_BYTES_BSD_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.inline.hpp b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.inline.hpp new file mode 100644 index 0000000000000..e353971ad8d55 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.inline.hpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP +#define OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP + +#define COPY_SMALL(from, to, count) \ +{ \ + long tmp0, tmp1, tmp2, tmp3; \ + long tmp4, tmp5, tmp6, tmp7; \ + __asm volatile( \ +" adr %[t0], 0f;\n" \ +" add %[t0], %[t0], %[cnt], lsl #5;\n" \ +" br %[t0];\n" \ +" .align 5;\n" \ +"0:" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldr %[t0], [%[s], #0];\n" \ +" str %[t0], [%[d], #0];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldr %[t2], [%[s], #16];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" str %[t2], [%[d], #16];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" ldr %[t4], [%[s], #32];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" str %[t4], [%[d], #32];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" ldp %[t4], %[t5], [%[s], #32];\n" \ +"2:" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" stp %[t4], %[t5], [%[d], #32];\n" \ +" b 1f;\n" \ +" .align 5;\n" \ +" ldr %[t6], [%[s], #0];\n" \ +" ldp %[t0], %[t1], [%[s], #8];\n" \ +" ldp %[t2], %[t3], [%[s], #24];\n" \ +" ldp %[t4], %[t5], [%[s], #40];\n" \ +" str %[t6], [%[d]], #8;\n" \ +" b 2b;\n" \ +" .align 5;\n" \ +" ldp %[t0], %[t1], [%[s], #0];\n" \ +" ldp %[t2], %[t3], [%[s], #16];\n" \ +" ldp %[t4], %[t5], [%[s], #32];\n" \ +" ldp %[t6], %[t7], [%[s], #48];\n" \ +" stp %[t0], %[t1], [%[d], #0];\n" \ +" stp %[t2], %[t3], [%[d], #16];\n" \ +" stp %[t4], %[t5], [%[d], #32];\n" \ +" stp %[t6], %[t7], [%[d], #48];\n" \ +"1:" \ + \ + : [s]"+r"(from), [d]"+r"(to), [cnt]"+r"(count), \ + [t0]"=&r"(tmp0), [t1]"=&r"(tmp1), [t2]"=&r"(tmp2), [t3]"=&r"(tmp3), \ + [t4]"=&r"(tmp4), [t5]"=&r"(tmp5), [t6]"=&r"(tmp6), [t7]"=&r"(tmp7) \ + : \ + : "memory", "cc"); \ +} + +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); + if (__builtin_expect(count <= 8, 1)) { + COPY_SMALL(from, to, count); + return; + } + _Copy_conjoint_words(from, to, count); +} + +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + if (__builtin_constant_p(count)) { + memcpy(to, from, count * sizeof(HeapWord)); + return; + } + __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); + if (__builtin_expect(count <= 8, 1)) { + COPY_SMALL(from, to, count); + return; + } + _Copy_disjoint_words(from, to, count); +} + +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { + __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); + if (__builtin_expect(count <= 8, 1)) { + COPY_SMALL(from, to, count); + return; + } + _Copy_disjoint_words(from, to, count); +} + +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_words(from, to, count); +} + +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { + pd_disjoint_words(from, to, count); +} + +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { + (void)memmove(to, from, count); +} + +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { + pd_conjoint_bytes(from, to, count); +} + +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { + _Copy_conjoint_jshorts_atomic(from, to, count); +} + +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { + _Copy_conjoint_jints_atomic(from, to, count); +} + +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { + _Copy_conjoint_jlongs_atomic(from, to, count); +} + +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { + assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); +} + +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_bytes(from, to, count); +} + +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_jshorts(from, to, count); +} + +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_jints(from, to, count); +} + +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + _Copy_arrayof_conjoint_jlongs(from, to, count); +} + +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + assert(!UseCompressedOops, "foo!"); + assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); + _Copy_arrayof_conjoint_jlongs(from, to, count); +} + +#endif // OS_CPU_BSD_AARCH64_COPY_BSD_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s new file mode 100644 index 0000000000000..70b0c24660e1f --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.s @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2016, Linaro Ltd. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#define CFUNC(x) _##x + + .global CFUNC(_Copy_conjoint_words) + .global CFUNC(_Copy_disjoint_words) + +s .req x0 +d .req x1 +count .req x2 +t0 .req x3 +t1 .req x4 +t2 .req x5 +t3 .req x6 +t4 .req x7 +t5 .req x8 +t6 .req x9 +t7 .req x10 + + .align 6 +CFUNC(_Copy_disjoint_words): + // Ensure 2 word aligned + tbz s, #3, fwd_copy_aligned + ldr t0, [s], #8 + str t0, [d], #8 + sub count, count, #1 + +fwd_copy_aligned: + // Bias s & d so we only pre index on the last copy + sub s, s, #16 + sub d, d, #16 + + ldp t0, t1, [s, #16] + ldp t2, t3, [s, #32] + ldp t4, t5, [s, #48] + ldp t6, t7, [s, #64]! + + subs count, count, #16 + blo fwd_copy_drain + +fwd_copy_again: + prfm pldl1keep, [s, #256] + stp t0, t1, [d, #16] + ldp t0, t1, [s, #16] + stp t2, t3, [d, #32] + ldp t2, t3, [s, #32] + stp t4, t5, [d, #48] + ldp t4, t5, [s, #48] + stp t6, t7, [d, #64]! + ldp t6, t7, [s, #64]! + subs count, count, #8 + bhs fwd_copy_again + +fwd_copy_drain: + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + stp t4, t5, [d, #48] + stp t6, t7, [d, #64]! + + // count is now -8..-1 for 0..7 words to copy + adr t0, 0f + add t0, t0, count, lsl #5 + br t0 + + .align 5 + ret // -8 == 0 words + .align 5 + ldr t0, [s, #16] // -7 == 1 word + str t0, [d, #16] + ret + .align 5 + ldp t0, t1, [s, #16] // -6 = 2 words + stp t0, t1, [d, #16] + ret + .align 5 + ldp t0, t1, [s, #16] // -5 = 3 words + ldr t2, [s, #32] + stp t0, t1, [d, #16] + str t2, [d, #32] + ret + .align 5 + ldp t0, t1, [s, #16] // -4 = 4 words + ldp t2, t3, [s, #32] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + ret + .align 5 + ldp t0, t1, [s, #16] // -3 = 5 words + ldp t2, t3, [s, #32] + ldr t4, [s, #48] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + str t4, [d, #48] + ret + .align 5 + ldp t0, t1, [s, #16] // -2 = 6 words + ldp t2, t3, [s, #32] + ldp t4, t5, [s, #48] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + stp t4, t5, [d, #48] + ret + .align 5 + ldp t0, t1, [s, #16] // -1 = 7 words + ldp t2, t3, [s, #32] + ldp t4, t5, [s, #48] + ldr t6, [s, #64] + stp t0, t1, [d, #16] + stp t2, t3, [d, #32] + stp t4, t5, [d, #48] + str t6, [d, #64] + // Is always aligned here, code for 7 words is one instruction + // too large so it just falls through. + .align 5 +0: + ret + + .align 6 +CFUNC(_Copy_conjoint_words): + sub t0, d, s + cmp t0, count, lsl #3 + bhs CFUNC(_Copy_disjoint_words) + + add s, s, count, lsl #3 + add d, d, count, lsl #3 + + // Ensure 2 word aligned + tbz s, #3, bwd_copy_aligned + ldr t0, [s, #-8]! + str t0, [d, #-8]! + sub count, count, #1 + +bwd_copy_aligned: + ldp t0, t1, [s, #-16] + ldp t2, t3, [s, #-32] + ldp t4, t5, [s, #-48] + ldp t6, t7, [s, #-64]! + + subs count, count, #16 + blo bwd_copy_drain + +bwd_copy_again: + prfum pldl1keep, [s, #-256] + stp t0, t1, [d, #-16] + ldp t0, t1, [s, #-16] + stp t2, t3, [d, #-32] + ldp t2, t3, [s, #-32] + stp t4, t5, [d, #-48] + ldp t4, t5, [s, #-48] + stp t6, t7, [d, #-64]! + ldp t6, t7, [s, #-64]! + subs count, count, #8 + bhs bwd_copy_again + +bwd_copy_drain: + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + stp t4, t5, [d, #-48] + stp t6, t7, [d, #-64]! + + // count is now -8..-1 for 0..7 words to copy + adr t0, 0f + add t0, t0, count, lsl #5 + br t0 + + .align 5 + ret // -8 == 0 words + .align 5 + ldr t0, [s, #-8] // -7 == 1 word + str t0, [d, #-8] + ret + .align 5 + ldp t0, t1, [s, #-16] // -6 = 2 words + stp t0, t1, [d, #-16] + ret + .align 5 + ldp t0, t1, [s, #-16] // -5 = 3 words + ldr t2, [s, #-24] + stp t0, t1, [d, #-16] + str t2, [d, #-24] + ret + .align 5 + ldp t0, t1, [s, #-16] // -4 = 4 words + ldp t2, t3, [s, #-32] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + ret + .align 5 + ldp t0, t1, [s, #-16] // -3 = 5 words + ldp t2, t3, [s, #-32] + ldr t4, [s, #-40] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + str t4, [d, #-40] + ret + .align 5 + ldp t0, t1, [s, #-16] // -2 = 6 words + ldp t2, t3, [s, #-32] + ldp t4, t5, [s, #-48] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + stp t4, t5, [d, #-48] + ret + .align 5 + ldp t0, t1, [s, #-16] // -1 = 7 words + ldp t2, t3, [s, #-32] + ldp t4, t5, [s, #-48] + ldr t6, [s, #-56] + stp t0, t1, [d, #-16] + stp t2, t3, [d, #-32] + stp t4, t5, [d, #-48] + str t6, [d, #-56] + // Is always aligned here, code for 7 words is one instruction + // too large so it just falls through. + .align 5 +0: + ret diff --git a/src/hotspot/os_cpu/bsd_aarch64/globals_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/globals_bsd_aarch64.hpp new file mode 100644 index 0000000000000..dbe8ffc51b67a --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/globals_bsd_aarch64.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP + + +// Sets the default values for platform dependent flags used by the runtime system. +// (see globals.hpp) + +define_pd_global(bool, DontYieldALot, false); +define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default +define_pd_global(intx, VMThreadStackSize, 2048); + +define_pd_global(intx, CompilerThreadStackSize, 2048); + +define_pd_global(uintx,JVMInvokeMethodSlack, 8192); + +// Used on 64 bit platforms for UseCompressedOops base address +define_pd_global(uintx,HeapBaseMinAddress, 2*G); + +#endif // OS_CPU_BSD_AARCH64_GLOBALS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/icache_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/icache_bsd_aarch64.hpp new file mode 100644 index 0000000000000..39d5ffe9bbe61 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/icache_bsd_aarch64.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP + +// Interface for updating the instruction cache. Whenever the VM +// modifies code, part of the processor instruction cache potentially +// has to be flushed. + +class ICache : public AbstractICache { + public: + static void initialize(); + static void invalidate_word(address addr) { + __clear_cache((char *)addr, (char *)(addr + 4)); + } + static void invalidate_range(address start, int nbytes) { + __clear_cache((char *)start, (char *)(start + nbytes)); + } +}; + +#endif // OS_CPU_BSD_AARCH64_ICACHE_AARCH64_HPP \ No newline at end of file diff --git a/src/hotspot/os_cpu/bsd_aarch64/orderAccess_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/orderAccess_bsd_aarch64.hpp new file mode 100644 index 0000000000000..0039fc89fd72f --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/orderAccess_bsd_aarch64.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP + +// Included in orderAccess.hpp header file. + +// Implementation of class OrderAccess. + +inline void OrderAccess::loadload() { acquire(); } +inline void OrderAccess::storestore() { release(); } +inline void OrderAccess::loadstore() { acquire(); } +inline void OrderAccess::storeload() { fence(); } + +#define FULL_MEM_BARRIER __sync_synchronize() +#define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE); +#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE); + +inline void OrderAccess::acquire() { + READ_MEM_BARRIER; +} + +inline void OrderAccess::release() { + WRITE_MEM_BARRIER; +} + +inline void OrderAccess::fence() { + FULL_MEM_BARRIER; +} + +inline void OrderAccess::cross_modify_fence() { } + +#endif // OS_CPU_BSD_AARCH64_ORDERACCESS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp new file mode 100644 index 0000000000000..bdb6b7d84c007 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp @@ -0,0 +1,827 @@ +/* + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// no precompiled headers +#include "jvm.h" +#include "asm/macroAssembler.hpp" +#include "classfile/classLoader.hpp" +#include "classfile/systemDictionary.hpp" +#include "classfile/vmSymbols.hpp" +#include "code/codeCache.hpp" +#include "code/icBuffer.hpp" +#include "code/vtableStubs.hpp" +#include "interpreter/interpreter.hpp" +#include "logging/log.hpp" +#include "memory/allocation.inline.hpp" +#include "os_share_bsd.hpp" +#include "prims/jniFastGetField.hpp" +#include "prims/jvm_misc.hpp" +#include "runtime/arguments.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/java.hpp" +#include "runtime/javaCalls.hpp" +#include "runtime/mutexLocker.hpp" +#include "runtime/osThread.hpp" +#include "runtime/safepointMechanism.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/stubRoutines.hpp" +#include "runtime/thread.inline.hpp" +#include "runtime/timer.hpp" +#include "signals_posix.hpp" +#include "utilities/align.hpp" +#include "utilities/events.hpp" +#include "utilities/vmError.hpp" + +// put OS-includes here +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +#ifndef __OpenBSD__ +# include +#endif + +#if !defined(__APPLE__) && !defined(__NetBSD__) +# include +#endif + +// needed by current_stack_region() workaround for Mavericks +#if defined(__APPLE__) +# include +# include +# include +# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048 +# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13 +#endif + +#define SPELL_REG_SP "sp" +#define SPELL_REG_FP "fp" + +#ifdef __APPLE__ +// see darwin-xnu/osfmk/mach/arm/_structs.h + +# if __DARWIN_UNIX03 && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5) + // 10.5 UNIX03 member name prefixes + #define DU3_PREFIX(s, m) __ ## s.__ ## m +# else + #define DU3_PREFIX(s, m) s ## . ## m +# endif +#endif + +#define context_x uc_mcontext->DU3_PREFIX(ss,x) +#define context_fp uc_mcontext->DU3_PREFIX(ss,fp) +#define context_lr uc_mcontext->DU3_PREFIX(ss,lr) +#define context_sp uc_mcontext->DU3_PREFIX(ss,sp) +#define context_pc uc_mcontext->DU3_PREFIX(ss,pc) +#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr) + +address os::current_stack_pointer() { +#if defined(__clang__) || defined(__llvm__) + void *sp; + __asm__("mov %0, " SPELL_REG_SP : "=r"(sp)); + return (address) sp; +#else + register void *sp __asm__ (SPELL_REG_SP); + return (address) sp; +#endif +} + +char* os::non_memory_address_word() { + // Must never look like an address returned by reserve_memory, + // even in its subfields (as defined by the CPU immediate fields, + // if the CPU splits constants across multiple instructions). + + // the return value used in computation of Universe::non_oop_word(), which + // is loaded by cpu/aarch64 by MacroAssembler::movptr(Register, uintptr_t) + return (char*) 0xffffffffffff; +} + +address os::Bsd::ucontext_get_pc(const ucontext_t * uc) { + return (address)uc->context_pc; +} + +void os::Bsd::ucontext_set_pc(ucontext_t * uc, address pc) { + uc->context_pc = (intptr_t)pc ; +} + +intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) { + return (intptr_t*)uc->context_sp; +} + +intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) { + return (intptr_t*)uc->context_fp; +} + +address os::fetch_frame_from_context(const void* ucVoid, + intptr_t** ret_sp, intptr_t** ret_fp) { + + address epc; + const ucontext_t* uc = (const ucontext_t*)ucVoid; + + if (uc != NULL) { + epc = os::Bsd::ucontext_get_pc(uc); + if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc); + if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc); + } else { + epc = NULL; + if (ret_sp) *ret_sp = (intptr_t *)NULL; + if (ret_fp) *ret_fp = (intptr_t *)NULL; + } + + return epc; +} + +frame os::fetch_frame_from_context(const void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + address epc = fetch_frame_from_context(ucVoid, &sp, &fp); + return frame(sp, fp, epc); +} + +bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { + address pc = (address) os::Bsd::ucontext_get_pc(uc); + if (Interpreter::contains(pc)) { + // interpreter performs stack banging after the fixed frame header has + // been generated while the compilers perform it before. To maintain + // semantic consistency between interpreted and compiled frames, the + // method returns the Java sender of the current frame. + *fr = os::fetch_frame_from_context(uc); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + // In compiled code, the stack banging is performed before LR + // has been saved in the frame. LR is live, and SP and FP + // belong to the caller. + intptr_t* fp = os::Bsd::ucontext_get_fp(uc); + intptr_t* sp = os::Bsd::ucontext_get_sp(uc); + address pc = (address)(uc->context_lr + - NativeInstruction::instruction_size); + *fr = frame(sp, fp, pc); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + assert(!fr->is_first_frame(), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + +// By default, gcc always saves frame pointer rfp on this stack. This +// may get turned off by -fomit-frame-pointer. +frame os::get_sender_for_C_frame(frame* fr) { + return frame(fr->link(), fr->link(), fr->sender_pc()); +} + +NOINLINE frame os::current_frame() { + intptr_t *fp = *(intptr_t **)__builtin_frame_address(0); + frame myframe((intptr_t*)os::current_stack_pointer(), + (intptr_t*)fp, + CAST_FROM_FN_PTR(address, os::current_frame)); + if (os::is_first_C_frame(&myframe)) { + // stack is not walkable + return frame(); + } else { + return os::get_sender_for_C_frame(&myframe); + } +} + +extern "C" JNIEXPORT int +JVM_handle_bsd_signal(int sig, + siginfo_t* info, + void* ucVoid, + int abort_if_unrecognized) { + ucontext_t* uc = (ucontext_t*) ucVoid; + + Thread* t = Thread::current_or_null_safe(); + + // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away + // (no destructors can be run) + os::ThreadCrashProtection::check_crash_protection(sig, t); + + SignalHandlerMark shm(t); + + // Note: it's not uncommon that JNI code uses signal/sigset to install + // then restore certain signal handler (e.g. to temporarily block SIGPIPE, + // or have a SIGILL handler when detecting CPU type). When that happens, + // JVM_handle_bsd_signal() might be invoked with junk info/ucVoid. To + // avoid unnecessary crash when libjsig is not preloaded, try handle signals + // that do not require siginfo/ucontext first. + + if (sig == SIGPIPE || sig == SIGXFSZ) { + // allow chained handler to go first + if (PosixSignals::chained_handler(sig, info, ucVoid)) { + return true; + } else { + // Ignoring SIGPIPE/SIGXFSZ - see bugs 4229104 or 6499219 + return true; + } + } + +#ifdef CAN_SHOW_REGISTERS_ON_ASSERT + if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) { + if (handle_assert_poison_fault(ucVoid, info->si_addr)) { + return 1; + } + } +#endif + + JavaThread* thread = NULL; + VMThread* vmthread = NULL; + if (PosixSignals::are_signal_handlers_installed()) { + if (t != NULL ){ + if(t->is_Java_thread()) { + thread = (JavaThread*)t; + } + else if(t->is_VM_thread()){ + vmthread = (VMThread *)t; + } + } + } +/* + NOTE: does not seem to work on bsd. + if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { + // can't decode this kind of signal + info = NULL; + } else { + assert(sig == info->si_signo, "bad siginfo"); + } +*/ + // decide if this trap can be handled by a stub + address stub = NULL; + + address pc = NULL; + + //%note os_trap_1 + if (info != NULL && uc != NULL && thread != NULL) { + pc = (address) os::Bsd::ucontext_get_pc(uc); + + if (StubRoutines::is_safefetch_fault(pc)) { + os::Bsd::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc)); + return 1; + } + + // Handle ALL stack overflow variations here + if (sig == SIGSEGV || sig == SIGBUS) { + address addr = (address) info->si_addr; + + // Make sure the high order byte is sign extended, as it may be masked away by the hardware. + if ((uintptr_t(addr) & (uintptr_t(1) << 55)) != 0) { + addr = address(uintptr_t(addr) | (uintptr_t(0xFF) << 56)); + } + + // check if fault address is within thread stack + if (thread->is_in_full_stack(addr)) { + Thread::WXWriteFromExecSetter wx_write; + // stack overflow + StackOverflow* overflow_state = thread->stack_overflow_state(); + if (overflow_state->in_stack_yellow_reserved_zone(addr)) { + if (thread->thread_state() == _thread_in_Java) { + if (overflow_state->in_stack_reserved_zone(addr)) { + frame fr; + if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + frame activation = + SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != NULL) { + overflow_state->disable_stack_reserved_zone(); + if (activation.is_interpreted_frame()) { + overflow_state->set_reserved_stack_activation((address)( + activation.fp() + frame::interpreter_frame_initial_sp_offset)); + } else { + overflow_state->set_reserved_stack_activation((address)activation.unextended_sp()); + } + return 1; + } + } + } + // Throw a stack overflow exception. Guard pages will be reenabled + // while unwinding the stack. + overflow_state->disable_stack_yellow_reserved_zone(); + stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); + } else { + // Thread was in the vm or native code. Return and try to finish. + overflow_state->disable_stack_yellow_reserved_zone(); + return 1; + } + } else if (overflow_state->in_stack_red_zone(addr)) { + // Fatal red zone violation. Disable the guard pages and fall through + // to handle_unexpected_exception way down below. + overflow_state->disable_stack_red_zone(); + tty->print_raw_cr("An irrecoverable stack overflow has occurred."); + } + } + } + + // We test if stub is already set (by the stack overflow code + // above) so it is not overwritten by the code that follows. This + // check is not required on other platforms, because on other + // platforms we check for SIGSEGV only or SIGBUS only, where here + // we have to check for both SIGSEGV and SIGBUS. + if (thread->thread_state() == _thread_in_Java && stub == NULL) { + // Java thread running in Java code => find exception handler if any + // a fault inside compiled code, the interpreter, or a stub + Thread::WXWriteFromExecSetter wx_write; + + // Handle signal from NativeJump::patch_verified_entry(). + if ((sig == SIGILL) + && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) { + if (TraceTraps) { + tty->print_cr("trap: zombie_not_entrant"); + } + stub = SharedRuntime::get_handle_wrong_method_stub(); + } else if ((sig == SIGSEGV || sig == SIGBUS) && SafepointMechanism::is_poll_address((address)info->si_addr)) { + stub = SharedRuntime::get_poll_stub(pc); +#if defined(__APPLE__) + // 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions. + // 64-bit Darwin may also use a SIGBUS (seen with compressed oops). + // Catching SIGBUS here prevents the implicit SIGBUS NULL check below from + // being called, so only do so if the implicit NULL check is not necessary. + } else if (sig == SIGBUS && !MacroAssembler::uses_implicit_null_check(info->si_addr)) { +#else + } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { +#endif + // BugId 4454115: A read from a MappedByteBuffer can fault + // here if the underlying file has been truncated. + // Do not crash the VM in such a case. + CodeBlob* cb = CodeCache::find_blob_unsafe(pc); + CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; + bool is_unsafe_arraycopy = (thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc)); + if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) { + address next_pc = pc + NativeCall::instruction_size; + if (is_unsafe_arraycopy) { + next_pc = UnsafeCopyMemory::page_error_continue_pc(pc); + } + stub = SharedRuntime::handle_unsafe_access(thread, next_pc); + } + } else if (sig == SIGILL && nativeInstruction_at(pc)->is_stop()) { + // Pull a pointer to the error message out of the instruction + // stream. + const uint64_t *detail_msg_ptr + = (uint64_t*)(pc + NativeInstruction::instruction_size); + const char *detail_msg = (const char *)*detail_msg_ptr; + const char *msg = "stop"; + if (TraceTraps) { + tty->print_cr("trap: %s: (SIGILL)", msg); + } + +PRAGMA_DIAG_PUSH +PRAGMA_DISABLE_GCC_WARNING("-Wformat-nonliteral") +PRAGMA_DISABLE_GCC_WARNING("-Wuninitialized") + va_list detail_args; + VMError::report_and_die(INTERNAL_ERROR, msg, detail_msg, detail_args, thread, + pc, info, ucVoid, NULL, 0, 0); + va_end(detail_args); +PRAGMA_DIAG_POP + } + else + + if (sig == SIGFPE && + (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { + stub = + SharedRuntime:: + continuation_for_implicit_exception(thread, + pc, + SharedRuntime:: + IMPLICIT_DIVIDE_BY_ZERO); +#ifdef __APPLE__ + } else if (sig == SIGFPE && info->si_code == FPE_NOOP) { + Unimplemented(); +#endif /* __APPLE__ */ + + } else if ((sig == SIGSEGV || sig == SIGBUS) && + MacroAssembler::uses_implicit_null_check(info->si_addr)) { + // Determination of interpreter/vtable stub/compiled code null exception + stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); + } + } else if ((thread->thread_state() == _thread_in_vm || + thread->thread_state() == _thread_in_native) && + sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ + thread->doing_unsafe_access()) { + address next_pc = pc + NativeCall::instruction_size; + if (UnsafeCopyMemory::contains_pc(pc)) { + next_pc = UnsafeCopyMemory::page_error_continue_pc(pc); + } + stub = SharedRuntime::handle_unsafe_access(thread, next_pc); + } + + // jni_fast_GetField can trap at certain pc's if a GC kicks in + // and the heap gets shrunk before the field access. + if ((sig == SIGSEGV) || (sig == SIGBUS)) { + address addr = JNI_FastGetField::find_slowcase_pc(pc); + if (addr != (address)-1) { + stub = addr; + } + } + } + + if (stub != NULL) { + // save all thread context in case we need to restore it + if (thread != NULL) thread->set_saved_exception_pc(pc); + + os::Bsd::ucontext_set_pc(uc, stub); + return true; + } + + // signal-chaining + if (PosixSignals::chained_handler(sig, info, ucVoid)) { + return true; + } + + if (!abort_if_unrecognized) { + // caller wants another chance, so give it to him + return false; + } + + if (pc == NULL && uc != NULL) { + pc = os::Bsd::ucontext_get_pc(uc); + } + + // unmask current signal + sigset_t newset; + sigemptyset(&newset); + sigaddset(&newset, sig); + sigprocmask(SIG_UNBLOCK, &newset, NULL); + + VMError::report_and_die(t, sig, pc, info, ucVoid); + + ShouldNotReachHere(); + return true; // Mute compiler +} + +void os::Bsd::init_thread_fpu_state(void) { +} + +bool os::is_allocatable(size_t bytes) { + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// thread stack + +// Minimum usable stack sizes required to get to user code. Space for +// HotSpot guard pages is added later. +size_t os::Posix::_compiler_thread_min_stack_allowed = 72 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 72 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 72 * K; + +// return default stack size for thr_type +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { + // default stack size (compiler thread needs larger stack) + size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); + return s; +} + + +// Java thread: +// +// Low memory addresses +// +------------------------+ +// | |\ Java thread created by VM does not have glibc +// | glibc guard page | - guard, attached Java thread usually has +// | |/ 1 glibc guard page. +// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() +// | |\ +// | HotSpot Guard Pages | - red, yellow and reserved pages +// | |/ +// +------------------------+ JavaThread::stack_reserved_zone_base() +// | |\ +// | Normal Stack | - +// | |/ +// P2 +------------------------+ Thread::stack_base() +// +// Non-Java thread: +// +// Low memory addresses +// +------------------------+ +// | |\ +// | glibc guard page | - usually 1 page +// | |/ +// P1 +------------------------+ Thread::stack_base() - Thread::stack_size() +// | |\ +// | Normal Stack | - +// | |/ +// P2 +------------------------+ Thread::stack_base() +// +// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from +// pthread_attr_getstack() + +static void current_stack_region(address * bottom, size_t * size) { +#ifdef __APPLE__ + pthread_t self = pthread_self(); + void *stacktop = pthread_get_stackaddr_np(self); + *size = pthread_get_stacksize_np(self); + // workaround for OS X 10.9.0 (Mavericks) + // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages + if (pthread_main_np() == 1) { + // At least on Mac OS 10.12 we have observed stack sizes not aligned + // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the + // shell). Apparently Mac OS actually rounds upwards to next multiple of page size, + // however, we round downwards here to be on the safe side. + *size = align_down(*size, getpagesize()); + + if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { + char kern_osrelease[256]; + size_t kern_osrelease_size = sizeof(kern_osrelease); + int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); + if (ret == 0) { + // get the major number, atoi will ignore the minor amd micro portions of the version string + if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) { + *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize()); + } + } + } + } + *bottom = (address) stacktop - *size; +#elif defined(__OpenBSD__) + stack_t ss; + int rslt = pthread_stackseg_np(pthread_self(), &ss); + + if (rslt != 0) + fatal("pthread_stackseg_np failed with error = %d", rslt); + + *bottom = (address)((char *)ss.ss_sp - ss.ss_size); + *size = ss.ss_size; +#else + pthread_attr_t attr; + + int rslt = pthread_attr_init(&attr); + + // JVM needs to know exact stack location, abort if it fails + if (rslt != 0) + fatal("pthread_attr_init failed with error = %d", rslt); + + rslt = pthread_attr_get_np(pthread_self(), &attr); + + if (rslt != 0) + fatal("pthread_attr_get_np failed with error = %d", rslt); + + if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 || + pthread_attr_getstacksize(&attr, size) != 0) { + fatal("Can not locate current stack attributes!"); + } + + pthread_attr_destroy(&attr); +#endif + assert(os::current_stack_pointer() >= *bottom && + os::current_stack_pointer() < *bottom + *size, "just checking"); +} + +address os::current_stack_base() { + address bottom; + size_t size; + current_stack_region(&bottom, &size); + return (bottom + size); +} + +size_t os::current_stack_size() { + // stack size includes normal stack and HotSpot guard pages + address bottom; + size_t size; + current_stack_region(&bottom, &size); + return size; +} + +///////////////////////////////////////////////////////////////////////////// +// helper functions for fatal error handler + +void os::print_context(outputStream *st, const void *context) { + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; + st->print_cr("Registers:"); + st->print( " x0=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 0]); + st->print(" x1=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 1]); + st->print(" x2=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 2]); + st->print(" x3=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 3]); + st->cr(); + st->print( " x4=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 4]); + st->print(" x5=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 5]); + st->print(" x6=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 6]); + st->print(" x7=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 7]); + st->cr(); + st->print( " x8=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 8]); + st->print(" x9=" INTPTR_FORMAT, (intptr_t)uc->context_x[ 9]); + st->print(" x10=" INTPTR_FORMAT, (intptr_t)uc->context_x[10]); + st->print(" x11=" INTPTR_FORMAT, (intptr_t)uc->context_x[11]); + st->cr(); + st->print( "x12=" INTPTR_FORMAT, (intptr_t)uc->context_x[12]); + st->print(" x13=" INTPTR_FORMAT, (intptr_t)uc->context_x[13]); + st->print(" x14=" INTPTR_FORMAT, (intptr_t)uc->context_x[14]); + st->print(" x15=" INTPTR_FORMAT, (intptr_t)uc->context_x[15]); + st->cr(); + st->print( "x16=" INTPTR_FORMAT, (intptr_t)uc->context_x[16]); + st->print(" x17=" INTPTR_FORMAT, (intptr_t)uc->context_x[17]); + st->print(" x18=" INTPTR_FORMAT, (intptr_t)uc->context_x[18]); + st->print(" x19=" INTPTR_FORMAT, (intptr_t)uc->context_x[19]); + st->cr(); + st->print( "x20=" INTPTR_FORMAT, (intptr_t)uc->context_x[20]); + st->print(" x21=" INTPTR_FORMAT, (intptr_t)uc->context_x[21]); + st->print(" x22=" INTPTR_FORMAT, (intptr_t)uc->context_x[22]); + st->print(" x23=" INTPTR_FORMAT, (intptr_t)uc->context_x[23]); + st->cr(); + st->print( "x24=" INTPTR_FORMAT, (intptr_t)uc->context_x[24]); + st->print(" x25=" INTPTR_FORMAT, (intptr_t)uc->context_x[25]); + st->print(" x26=" INTPTR_FORMAT, (intptr_t)uc->context_x[26]); + st->print(" x27=" INTPTR_FORMAT, (intptr_t)uc->context_x[27]); + st->cr(); + st->print( "x28=" INTPTR_FORMAT, (intptr_t)uc->context_x[28]); + st->print(" fp=" INTPTR_FORMAT, (intptr_t)uc->context_fp); + st->print(" lr=" INTPTR_FORMAT, (intptr_t)uc->context_lr); + st->print(" sp=" INTPTR_FORMAT, (intptr_t)uc->context_sp); + st->cr(); + st->print( "pc=" INTPTR_FORMAT, (intptr_t)uc->context_pc); + st->print(" cpsr=" INTPTR_FORMAT, (intptr_t)uc->context_cpsr); + st->cr(); + + intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc); + st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", (intptr_t)sp); + print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); + st->cr(); + + // Note: it may be unsafe to inspect memory near pc. For example, pc may + // point to garbage if entry point in an nmethod is corrupted. Leave + // this at the end, and hope for the best. + address pc = os::Bsd::ucontext_get_pc(uc); + print_instructions(st, pc, 4/*native instruction size*/); + st->cr(); +} + +void os::print_register_info(outputStream *st, const void *context) { + if (context == NULL) return; + + const ucontext_t *uc = (const ucontext_t*)context; + + st->print_cr("Register to memory mapping:"); + st->cr(); + + // this is horrendously verbose but the layout of the registers in the + // context does not match how we defined our abstract Register set, so + // we can't just iterate through the gregs area + + // this is only for the "general purpose" registers + + st->print(" x0="); print_location(st, uc->context_x[ 0]); + st->print(" x1="); print_location(st, uc->context_x[ 1]); + st->print(" x2="); print_location(st, uc->context_x[ 2]); + st->print(" x3="); print_location(st, uc->context_x[ 3]); + st->print(" x4="); print_location(st, uc->context_x[ 4]); + st->print(" x5="); print_location(st, uc->context_x[ 5]); + st->print(" x6="); print_location(st, uc->context_x[ 6]); + st->print(" x7="); print_location(st, uc->context_x[ 7]); + st->print(" x8="); print_location(st, uc->context_x[ 8]); + st->print(" x9="); print_location(st, uc->context_x[ 9]); + st->print("x10="); print_location(st, uc->context_x[10]); + st->print("x11="); print_location(st, uc->context_x[11]); + st->print("x12="); print_location(st, uc->context_x[12]); + st->print("x13="); print_location(st, uc->context_x[13]); + st->print("x14="); print_location(st, uc->context_x[14]); + st->print("x15="); print_location(st, uc->context_x[15]); + st->print("x16="); print_location(st, uc->context_x[16]); + st->print("x17="); print_location(st, uc->context_x[17]); + st->print("x18="); print_location(st, uc->context_x[18]); + st->print("x19="); print_location(st, uc->context_x[19]); + st->print("x20="); print_location(st, uc->context_x[20]); + st->print("x21="); print_location(st, uc->context_x[21]); + st->print("x22="); print_location(st, uc->context_x[22]); + st->print("x23="); print_location(st, uc->context_x[23]); + st->print("x24="); print_location(st, uc->context_x[24]); + st->print("x25="); print_location(st, uc->context_x[25]); + st->print("x26="); print_location(st, uc->context_x[26]); + st->print("x27="); print_location(st, uc->context_x[27]); + st->print("x28="); print_location(st, uc->context_x[28]); + + st->cr(); +} + +void os::setup_fpu() { +} + +#ifndef PRODUCT +void os::verify_stack_alignment() { + assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); +} +#endif + +int os::extra_bang_size_in_bytes() { + // AArch64 does not require the additional stack bang. + return 0; +} + +extern "C" { + int SpinPause() { + return 0; + } + + void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { + if (from > to) { + const jshort *end = from + count; + while (from < end) + *(to++) = *(from++); + } + else if (from < to) { + const jshort *end = from; + from += count - 1; + to += count - 1; + while (from >= end) + *(to--) = *(from--); + } + } + void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { + if (from > to) { + const jint *end = from + count; + while (from < end) + *(to++) = *(from++); + } + else if (from < to) { + const jint *end = from; + from += count - 1; + to += count - 1; + while (from >= end) + *(to--) = *(from--); + } + } + void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { + if (from > to) { + const jlong *end = from + count; + while (from < end) + os::atomic_copy64(from++, to++); + } + else if (from < to) { + const jlong *end = from; + from += count - 1; + to += count - 1; + while (from >= end) + os::atomic_copy64(from--, to--); + } + } + + void _Copy_arrayof_conjoint_bytes(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count); + } + void _Copy_arrayof_conjoint_jshorts(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count * 2); + } + void _Copy_arrayof_conjoint_jints(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count * 4); + } + void _Copy_arrayof_conjoint_jlongs(const HeapWord* from, + HeapWord* to, + size_t count) { + memmove(to, from, count * 8); + } +}; diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp new file mode 100644 index 0000000000000..a17c6a2e71939 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.hpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP + + static void setup_fpu(); + + static bool is_allocatable(size_t bytes); + + // Used to register dynamic code cache area with the OS + // Note: Currently only used in 64 bit Windows implementations + static bool register_code_area(char *low, char *high) { return true; } + +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + + // Atomically copy 64 bits of data + static void atomic_copy64(const volatile void *src, volatile void *dst) { + *(jlong *) dst = *(const jlong *) src; + } + +#endif // OS_CPU_BSD_AARCH64_OS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/prefetch_bsd_aarch64.inline.hpp b/src/hotspot/os_cpu/bsd_aarch64/prefetch_bsd_aarch64.inline.hpp new file mode 100644 index 0000000000000..3b5f31b35b882 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/prefetch_bsd_aarch64.inline.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP +#define OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP + +#include "runtime/prefetch.hpp" + + +inline void Prefetch::read (void *loc, intx interval) { + if (interval >= 0) + asm("prfm PLDL1KEEP, [%0, %1]" : : "r"(loc), "r"(interval)); +} + +inline void Prefetch::write(void *loc, intx interval) { + if (interval >= 0) + asm("prfm PSTL1KEEP, [%0, %1]" : : "r"(loc), "r"(interval)); +} + +#endif // OS_CPU_BSD_AARCH64_PREFETCH_BSD_AARCH64_INLINE_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp new file mode 100644 index 0000000000000..efdeed96cfd54 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/metaspaceShared.hpp" +#include "runtime/frame.inline.hpp" +#include "runtime/thread.inline.hpp" + +frame JavaThread::pd_last_frame() { + assert(has_last_Java_frame(), "must have last_Java_sp() when suspended"); + vmassert(_anchor.last_Java_pc() != NULL, "not walkable"); + return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc()); +} + +// For Forte Analyzer AsyncGetCallTrace profiling support - thread is +// currently interrupted by SIGPROF +bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, + void* ucontext, bool isInJava) { + + assert(Thread::current() == this, "caller must be current thread"); + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) { + return pd_get_top_frame(fr_addr, ucontext, isInJava); +} + +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { + assert(this->is_Java_thread(), "must be JavaThread"); + JavaThread* jt = (JavaThread *)this; + + // If we have a last_Java_frame, then we should use it even if + // isInJava == true. It should be more reliable than ucontext info. + if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { + *fr_addr = jt->pd_last_frame(); + return true; + } + + // At this point, we don't have a last_Java_frame, so + // we try to glean some information out of the ucontext + // if we were running Java code when SIGPROF came in. + if (isInJava) { + ucontext_t* uc = (ucontext_t*) ucontext; + + intptr_t* ret_fp; + intptr_t* ret_sp; + address addr = os::fetch_frame_from_context(uc, &ret_sp, &ret_fp); + if (addr == NULL || ret_sp == NULL ) { + // ucontext wasn't useful + return false; + } + + if (MetaspaceShared::is_in_trampoline_frame(addr)) { + // In the middle of a trampoline call. Bail out for safety. + // This happens rarely so shouldn't affect profiling. + return false; + } + + frame ret_frame(ret_sp, ret_fp, addr); + if (!ret_frame.safe_for_sender(jt)) { +#ifdef COMPILER2 + frame ret_frame2(ret_sp, NULL, addr); + if (!ret_frame2.safe_for_sender(jt)) { + // nothing else to try if the frame isn't good + return false; + } + ret_frame = ret_frame2; +#else + // nothing else to try if the frame isn't good + return false; +#endif /* COMPILER2 */ + } + *fr_addr = ret_frame; + return true; + } + + // nothing else to try + return false; +} + +void JavaThread::cache_global_variables() { } + diff --git a/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.hpp new file mode 100644 index 0000000000000..83d9a00628ad0 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/thread_bsd_aarch64.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP + + private: + void pd_initialize() { + _anchor.clear(); + } + + frame pd_last_frame(); + + public: + void set_base_of_stack_pointer(intptr_t* base_sp) { + } + + static ByteSize last_Java_fp_offset() { + return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_fp_offset(); + } + + intptr_t* base_of_stack_pointer() { + return NULL; + } + void record_base_of_stack_pointer() { + } + + bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, + bool isInJava); + + bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava); +private: + bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava); +public: + + static Thread *aarch64_get_thread_helper() { + return Thread::current(); + } + + // These routines are only used on cpu architectures that + // have separate register stacks (Itanium). + static bool register_stack_overflow() { return false; } + static void enable_register_stack_guard() {} + static void disable_register_stack_guard() {} + +#endif // OS_CPU_BSD_AARCH64_THREAD_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/vmStructs_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/vmStructs_bsd_aarch64.hpp new file mode 100644 index 0000000000000..1abe32cf16590 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/vmStructs_bsd_aarch64.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP +#define OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP + +// These are the OS and CPU-specific fields, types and integer +// constants required by the Serviceability Agent. This file is +// referenced by vmStructs.cpp. + +#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \ + \ + /******************************/ \ + /* Threads (NOTE: incomplete) */ \ + /******************************/ \ + nonstatic_field(OSThread, _thread_id, OSThread::thread_id_t) \ + nonstatic_field(OSThread, _unique_thread_id, uint64_t) + + +#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \ + \ + /**********************/ \ + /* Thread IDs */ \ + /**********************/ \ + \ + declare_unsigned_integer_type(OSThread::thread_id_t) + +#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) + +#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant) + +#endif // OS_CPU_BSD_AARCH64_VMSTRUCTS_BSD_AARCH64_HPP diff --git a/src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp new file mode 100644 index 0000000000000..259dbcda96bd8 --- /dev/null +++ b/src/hotspot/os_cpu/bsd_aarch64/vm_version_bsd_aarch64.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "runtime/os.hpp" +#include "runtime/vm_version.hpp" + + +int VM_Version::get_current_sve_vector_length() { + ShouldNotReachHere(); + return -1; +} + +int VM_Version::set_and_get_current_sve_vector_lenght(int length) { + ShouldNotReachHere(); + return -1; +} + +void VM_Version::get_os_cpu_info() { +} diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.hpp index bd7ad6344f947..19a62da558536 100644 --- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.hpp +++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.hpp @@ -37,4 +37,10 @@ // Note: Currently only used in 64 bit Windows implementations static bool register_code_area(char *low, char *high) { return true; } +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_BSD_X86_OS_BSD_X86_HPP diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.hpp index 511400ae0d2ec..9914eb031a06d 100644 --- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.hpp +++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.hpp @@ -53,4 +53,10 @@ #endif } +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_BSD_ZERO_OS_BSD_ZERO_HPP diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.hpp index c341be211850f..a3e935da38692 100644 --- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.hpp @@ -39,4 +39,10 @@ *(jlong *) dst = *(const jlong *) src; } +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_LINUX_AARCH64_OS_LINUX_AARCH64_HPP diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp index 9c5d629b09511..95087836868a0 100644 --- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.hpp @@ -70,4 +70,10 @@ int32_t exchange_value, volatile int32_t *dest); +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_LINUX_ARM_OS_LINUX_ARM_HPP diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.hpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.hpp index 1c108de12a038..6644e727a0b30 100644 --- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.hpp +++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.hpp @@ -32,4 +32,10 @@ // Note: Currently only used in 64 bit Windows implementations static bool register_code_area(char *low, char *high) { return true; } +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_LINUX_PPC_OS_LINUX_PPC_HPP diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.hpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.hpp index 35618f4e8f47b..2417f7b17ffac 100644 --- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.hpp +++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.hpp @@ -31,4 +31,10 @@ // Used to register dynamic code cache area with the OS. static bool register_code_area(char *low, char *high) { return true; } +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_LINUX_S390_OS_LINUX_S390_HPP diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.hpp index a60394f9a7398..6a8525c73012b 100644 --- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.hpp @@ -50,4 +50,10 @@ */ static void workaround_expand_exec_shield_cs_limit(); +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_LINUX_X86_OS_LINUX_X86_HPP diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp index 9d4e080855124..3d170f55cdc9d 100644 --- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp +++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.hpp @@ -90,4 +90,10 @@ #endif } +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +private: + #endif // OS_CPU_LINUX_ZERO_OS_LINUX_ZERO_HPP diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp index 7fdd068219ce8..867ddb230998b 100644 --- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp @@ -43,4 +43,10 @@ static bool platform_print_native_stack(outputStream* st, const void* context, char *buf, int buf_size); #endif +private: + + static void current_thread_enable_wx_impl(WXMode mode) { } + +public: + #endif // OS_CPU_WINDOWS_X86_OS_WINDOWS_X86_HPP diff --git a/src/hotspot/share/ci/ciUtilities.inline.hpp b/src/hotspot/share/ci/ciUtilities.inline.hpp index 8b752cd812a66..652bf2e400caf 100644 --- a/src/hotspot/share/ci/ciUtilities.inline.hpp +++ b/src/hotspot/share/ci/ciUtilities.inline.hpp @@ -34,6 +34,7 @@ #define VM_ENTRY_MARK \ CompilerThread* thread=CompilerThread::current(); \ ThreadInVMfromNative __tiv(thread); \ + Thread::WXWriteVerifier __wx_write; \ ResetNoHandleMark rnhm; \ HandleMarkCleaner __hm(thread); \ Thread* THREAD = thread; \ @@ -45,6 +46,7 @@ #define VM_QUICK_ENTRY_MARK \ CompilerThread* thread=CompilerThread::current(); \ ThreadInVMfromNative __tiv(thread); \ + Thread::WXWriteVerifier __wx_write; \ /* \ * [TODO] The NoHandleMark line does nothing but declare a function prototype \ * The NoHandkeMark constructor is NOT executed. If the ()'s are \ diff --git a/src/hotspot/share/classfile/classLoader.cpp b/src/hotspot/share/classfile/classLoader.cpp index 7b34ef939c5d4..72b0b727269f8 100644 --- a/src/hotspot/share/classfile/classLoader.cpp +++ b/src/hotspot/share/classfile/classLoader.cpp @@ -290,6 +290,7 @@ u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_ter // enable call to C land JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; // check whether zip archive contains name jint name_len; jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len); @@ -336,6 +337,7 @@ void ClassPathZipEntry::contents_do(void f(const char* name, void* context), voi JavaThread* thread = JavaThread::current(); HandleMark handle_mark(thread); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; for (int n = 0; ; n++) { jzentry * ze = ((*GetNextEntry)(_zip, n)); if (ze == NULL) break; @@ -748,6 +750,7 @@ ClassPathEntry* ClassLoader::create_class_path_entry(const char *path, const str { // enable call to C land ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; HandleMark hm(thread); load_zip_library_if_needed(); zip = (*ZipOpen)(canonical_path, &error_msg); @@ -798,6 +801,7 @@ ClassPathZipEntry* ClassLoader::create_class_path_zip_entry(const char *path, bo // enable call to C land JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; HandleMark hm(thread); load_zip_library_if_needed(); zip = (*ZipOpen)(canonical_path, &error_msg); diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp index 15e42914b23cb..142920957c17f 100644 --- a/src/hotspot/share/classfile/javaClasses.cpp +++ b/src/hotspot/share/classfile/javaClasses.cpp @@ -411,6 +411,7 @@ Handle java_lang_String::create_from_platform_dependent_str(const char* str, TRA JavaThread* thread = THREAD->as_Java_thread(); HandleMark hm(thread); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; js = (_to_java_string_fn)(thread->jni_environment(), str); } @@ -439,6 +440,7 @@ char* java_lang_String::as_platform_dependent_str(Handle java_string, TRAPS) { bool is_copy; HandleMark hm(thread); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; JNIEnv *env = thread->jni_environment(); native_platform_string = (_to_platform_string_fn)(env, js, &is_copy); assert(is_copy == JNI_TRUE, "is_copy value changed"); diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp index 25511485a6c48..f57f1424a7443 100644 --- a/src/hotspot/share/classfile/verifier.cpp +++ b/src/hotspot/share/classfile/verifier.cpp @@ -314,6 +314,7 @@ Symbol* Verifier::inference_verify( { HandleMark hm(thread); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; // ThreadToNativeFromVM takes care of changing thread_state, so safepoint // code knows that we have left the VM JNIEnv *env = thread->jni_environment(); diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index d204b9dc408f6..ba658fabcc0bb 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -1701,6 +1701,7 @@ bool CompileBroker::init_compiler_runtime() { { // Must switch to native to allocate ci_env ThreadToNativeFromVM ttn(thread); + Thread::WXWriteVerifier wx_write; ciEnv ci_env((CompileTask*)NULL); // Cache Jvmti state ci_env.cache_jvmti_state(); @@ -1997,6 +1998,7 @@ void CompileBroker::maybe_block() { tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); #endif ThreadInVMfromNative tivfn(JavaThread::current()); + Thread::WXWriteVerifier wx_write; } } @@ -2187,6 +2189,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) { { NoHandleMark nhm; ThreadToNativeFromVM ttn(thread); + Thread::WXWriteVerifier wx_write; ciEnv ci_env(task); if (should_break) { diff --git a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp index be7fa937ed2bb..ca86478a5e2cc 100644 --- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp +++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp @@ -216,7 +216,7 @@ void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_pa "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); char* start_addr = page_start(start_page); - os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char))); + os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)), !ExecMem); } void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) { diff --git a/src/hotspot/share/gc/parallel/psCardTable.cpp b/src/hotspot/share/gc/parallel/psCardTable.cpp index 9ad29779ed72b..322802a92c8ad 100644 --- a/src/hotspot/share/gc/parallel/psCardTable.cpp +++ b/src/hotspot/share/gc/parallel/psCardTable.cpp @@ -587,7 +587,8 @@ bool PSCardTable::resize_commit_uncommit(int changed_region, MemRegion(cur_committed.start(), new_start_aligned)); if (!uncommit_region.is_empty()) { if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { + uncommit_region.byte_size(), + !ExecMem)) { // If the uncommit fails, ignore it. Let the // committed table resizing go even though the committed // table will over state the committed space. diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.cpp b/src/hotspot/share/gc/parallel/psVirtualspace.cpp index 473a1f2685451..f25d295093a7c 100644 --- a/src/hotspot/share/gc/parallel/psVirtualspace.cpp +++ b/src/hotspot/share/gc/parallel/psVirtualspace.cpp @@ -113,7 +113,7 @@ bool PSVirtualSpace::shrink_by(size_t bytes) { } char* const base_addr = committed_high_addr() - bytes; - bool result = special() || os::uncommit_memory(base_addr, bytes); + bool result = special() || os::uncommit_memory(base_addr, bytes, !ExecMem); if (result) { _committed_high_addr -= bytes; } diff --git a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp index 9c6ba3867f174..425bcdb26eaa8 100644 --- a/src/hotspot/share/gc/shared/barrierSetNMethod.cpp +++ b/src/hotspot/share/gc/shared/barrierSetNMethod.cpp @@ -48,6 +48,8 @@ bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) { } int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) { + Thread::WXWriteFromExecSetter wx_write; + address return_address = *return_address_ptr; CodeBlob* cb = CodeCache::find_blob(return_address); assert(cb != NULL, "invariant"); diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp index b2a7118e8aa77..a74070c8d4795 100644 --- a/src/hotspot/share/gc/shared/cardTable.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -254,7 +254,8 @@ void CardTable::resize_covered_region(MemRegion new_region) { cur_committed.end())); if (!uncommit_region.is_empty()) { if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { + uncommit_region.byte_size(), + !ExecMem)) { assert(false, "Card table contraction failed"); // The call failed so don't change the end of the // committed region. This is better than taking the diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp index 4248c9d91b83e..221b870a4ce42 100644 --- a/src/hotspot/share/gc/shared/oopStorage.cpp +++ b/src/hotspot/share/gc/shared/oopStorage.cpp @@ -37,7 +37,7 @@ #include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" -#include "runtime/stubRoutines.hpp" +#include "runtime/stubRoutines.inline.hpp" #include "runtime/thread.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index e1449b45199c4..7af54952243cb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1350,7 +1350,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta } void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() { - if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) { + if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), !ExecMem)) { log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration"); } } @@ -2867,7 +2867,7 @@ bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) { size_t slice = r->index() / _bitmap_regions_per_slice; size_t off = _bitmap_bytes_per_slice * slice; size_t len = _bitmap_bytes_per_slice; - if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) { + if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len, !ExecMem)) { return false; } return true; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 2dd5f161d1df4..eba696a98ad78 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -628,7 +628,7 @@ void ShenandoahHeapRegion::do_commit() { void ShenandoahHeapRegion::do_uncommit() { ShenandoahHeap* heap = ShenandoahHeap::heap(); - if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) { + if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes, !ExecMem)) { report_java_out_of_memory("Unable to uncommit region"); } if (!heap->uncommit_bitmap_slice(this)) { diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp index 5ba1500257f83..fcadc4457ae89 100644 --- a/src/hotspot/share/interpreter/oopMapCache.cpp +++ b/src/hotspot/share/interpreter/oopMapCache.cpp @@ -242,6 +242,8 @@ class MaskFillerForNative: public NativeSignatureIterator { } public: + void pass_byte() { /* ignore */ } + void pass_short() { /* ignore */ } void pass_int() { /* ignore */ } void pass_long() { /* ignore */ } void pass_float() { /* ignore */ } diff --git a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp index 4dc9fba7ae5b9..2bca6f1ace850 100644 --- a/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp +++ b/src/hotspot/share/jfr/instrumentation/jfrJvmtiAgent.cpp @@ -84,6 +84,7 @@ extern "C" void JNICALL jfr_on_class_file_load_hook(jvmtiEnv *jvmti_env, } JavaThread* jt = JavaThread::thread_from_jni_environment(jni_env); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(jt));; + Thread::WXWriteFromExecSetter wx_write; ThreadInVMfromNative tvmfn(jt); JfrUpcalls::on_retransform(JfrTraceId::load_raw(class_being_redefined), class_being_redefined, @@ -231,6 +232,7 @@ JfrJvmtiAgent::~JfrJvmtiAgent() { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); if (jfr_jvmti_env != NULL) { ThreadToNativeFromVM transition(jt); + Thread::WXExecFromWriteSetter wx_exec; update_class_file_load_hook_event(JVMTI_DISABLE); unregister_callbacks(jt); jfr_jvmti_env->DisposeEnvironment(); @@ -242,6 +244,7 @@ static bool initialize(JavaThread* jt) { assert(jt != NULL, "invariant"); DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt)); ThreadToNativeFromVM transition(jt); + Thread::WXExecFromWriteSetter wx_exec; if (create_jvmti_env(jt) != JNI_OK) { assert(jfr_jvmti_env == NULL, "invariant"); return false; diff --git a/src/hotspot/share/jvmci/jniAccessMark.inline.hpp b/src/hotspot/share/jvmci/jniAccessMark.inline.hpp index 9e691313c998e..f9b8e8960e0bd 100644 --- a/src/hotspot/share/jvmci/jniAccessMark.inline.hpp +++ b/src/hotspot/share/jvmci/jniAccessMark.inline.hpp @@ -36,11 +36,12 @@ class JNIAccessMark : public StackObj { private: ThreadToNativeFromVM _ttnfv; + Thread::WXExecFromWriteSetter _wx_exec; HandleMark _hm; JNIEnv* _env; public: inline JNIAccessMark(JVMCIEnv* jvmci_env, JavaThread* thread=JavaThread::current()) : - _ttnfv(thread), _hm(thread) { + _ttnfv(thread), _wx_exec(), _hm(thread) { _env = jvmci_env->_env; } JNIEnv* env() const { return _env; } diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp index cd12abb9226c7..b061d9fb341c5 100644 --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp @@ -928,6 +928,7 @@ JVMCI::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer, bo if (SafepointMechanism::should_process(thread)) { // this is a hacky way to force a safepoint check but nothing else was jumping out at me. ThreadToNativeFromVM ttnfv(thread); + Thread::WXWriteVerifier wx_write; } } diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 2306e63fa3370..1d1d42ef791cb 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -122,6 +122,7 @@ Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) { // Bring the JVMCI compiler thread into the VM state. #define JVMCI_VM_ENTRY_MARK \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromNative __tiv(thread); \ ResetNoHandleMark rnhm; \ HandleMarkCleaner __hm(thread); \ diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index eadd0e60a7da7..d9fafb9ea8cf6 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -861,6 +861,7 @@ void JVMCIRuntime::init_JavaVM_info(jlongArray info, JVMCI_TRAPS) { #define JAVAVM_CALL_BLOCK \ guarantee(thread != NULL && _shared_library_javavm != NULL, "npe"); \ ThreadToNativeFromVM ttnfv(thread); \ + Thread::WXExecFromWriteSetter wx_exec; \ JavaVM* javavm = (JavaVM*) _shared_library_javavm; jint JVMCIRuntime::AttachCurrentThread(JavaThread* thread, void **penv, void *args) { @@ -1021,6 +1022,7 @@ JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass)) ResourceMark rm(thread); HandleMark hm(thread); ThreadToNativeFromVM trans(thread); + Thread::WXExecFromWriteSetter wx_exec; // Ensure _non_oop_bits is initialized Universe::non_oop_word(); diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp index c67e9c1c78f45..881d97f118598 100644 --- a/src/hotspot/share/memory/virtualspace.cpp +++ b/src/hotspot/share/memory/virtualspace.cpp @@ -194,7 +194,7 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large, base = NULL; } } else { - base = os::reserve_memory_with_fd(size, _fd_for_heap); + base = os::reserve_memory_with_fd(size, _fd_for_heap, _executable); } if (base == NULL) return; @@ -982,7 +982,7 @@ void VirtualSpace::shrink_by(size_t size) { assert(middle_high_boundary() <= aligned_upper_new_high && aligned_upper_new_high + upper_needs <= upper_high_boundary(), "must not shrink beyond region"); - if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) { + if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) { debug_only(warning("os::uncommit_memory failed")); return; } else { @@ -993,7 +993,7 @@ void VirtualSpace::shrink_by(size_t size) { assert(lower_high_boundary() <= aligned_middle_new_high && aligned_middle_new_high + middle_needs <= middle_high_boundary(), "must not shrink beyond region"); - if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) { + if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) { debug_only(warning("os::uncommit_memory failed")); return; } else { @@ -1004,7 +1004,7 @@ void VirtualSpace::shrink_by(size_t size) { assert(low_boundary() <= aligned_lower_new_high && aligned_lower_new_high + lower_needs <= lower_high_boundary(), "must not shrink beyond region"); - if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) { + if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) { debug_only(warning("os::uncommit_memory failed")); return; } else { diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp index 288e80ca8e1e9..5c30211ad247d 100644 --- a/src/hotspot/share/opto/runtime.cpp +++ b/src/hotspot/share/opto/runtime.cpp @@ -1480,6 +1480,8 @@ address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address r } #endif + Thread::WXWriteFromExecSetter wx_write; + thread->set_vm_result(exception); // Frame not compiled (handles deoptimization blob) return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index bea5f6d54721e..445fd0c30dbe4 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -3803,6 +3803,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving. ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_native); + Thread::enable_wx_from_write(WXExec); } else { // If create_vm exits because of a pending exception, exit with that // exception. In the future when we figure out how to reclaim memory, @@ -3898,6 +3899,7 @@ static jint JNICALL jni_DestroyJavaVM_inner(JavaVM *vm) { // Since this is not a JVM_ENTRY we have to set the thread state manually before entering. JavaThread* thread = JavaThread::current(); ThreadStateTransition::transition_from_native(thread, _thread_in_vm); + Thread::enable_wx_from_exec(WXWrite); if (Threads::destroy_vm()) { // Should not change thread state, VM is gone vm_created = 0; @@ -3958,6 +3960,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae thread->record_stack_base_and_size(); thread->register_thread_stack_with_NMT(); thread->initialize_thread_current(); + thread->init_wx(); if (!os::create_attached_thread(thread)) { thread->smr_delete(); @@ -4031,6 +4034,7 @@ static jint attach_current_thread(JavaVM *vm, void **penv, void *_args, bool dae // needed. ThreadStateTransition::transition(thread, _thread_in_vm, _thread_in_native); + Thread::enable_wx_from_write(WXExec); // Perform any platform dependent FPU setup os::setup_fpu(); @@ -4088,6 +4092,7 @@ jint JNICALL jni_DetachCurrentThread(JavaVM *vm) { // Safepoint support. Have to do call-back to safepoint code, if in the // middle of a safepoint operation ThreadStateTransition::transition_from_native(thread, _thread_in_vm); + Thread::enable_wx_from_exec(WXWrite); // XXX: Note that JavaThread::exit() call below removes the guards on the // stack pages set up via enable_stack_{red,yellow}_zone() calls diff --git a/src/hotspot/share/prims/jniCheck.cpp b/src/hotspot/share/prims/jniCheck.cpp index fd9e2e3bab734..10a222954e296 100644 --- a/src/hotspot/share/prims/jniCheck.cpp +++ b/src/hotspot/share/prims/jniCheck.cpp @@ -64,6 +64,7 @@ #define IN_VM(source_code) { \ { \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromNative __tiv(thr); \ source_code \ } \ diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index c9dd3c34674d2..356eaec21e9da 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -3561,6 +3561,7 @@ JVM_ENTRY_NO_ENV(void*, JVM_LoadLibrary(const char* name)) void *load_result; { ThreadToNativeFromVM ttnfvm(thread); + Thread::WXWriteVerifier wx_write; load_result = os::dll_load(name, ebuf, sizeof ebuf); } if (load_result == NULL) { diff --git a/src/hotspot/share/prims/jvmtiEnter.xsl b/src/hotspot/share/prims/jvmtiEnter.xsl index 2ed29d5cb6fd5..714dbad295e6b 100644 --- a/src/hotspot/share/prims/jvmtiEnter.xsl +++ b/src/hotspot/share/prims/jvmtiEnter.xsl @@ -433,6 +433,8 @@ struct jvmtiInterface_1_ jvmti JavaThread* current_thread = this_thread->as_Java_thread(); + Thread::WXWriteFromExecSetter __wx_write; + ThreadInVMfromNative __tiv(current_thread); VM_ENTRY_BASE(jvmtiError, diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index 139788f902f26..f07004f2f782f 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -176,6 +176,7 @@ JvmtiEnv::GetThreadLocalStorage(jthread thread, void** data_ptr) { // from native so as to resolve the jthread. ThreadInVMfromNative __tiv(current_thread); + Thread::WXExecVerifier __wx_exec; VM_ENTRY_BASE(jvmtiError, JvmtiEnv::GetThreadLocalStorage , current_thread) debug_only(VMNativeEntryWrapper __vew;) diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index bfe5d574a7cc8..178dee350643b 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -87,12 +87,14 @@ class JvmtiJavaThreadEventTransition : StackObj { private: ResourceMark _rm; ThreadToNativeFromVM _transition; + Thread::WXExecFromWriteSetter _wx_exec; HandleMark _hm; public: JvmtiJavaThreadEventTransition(JavaThread *thread) : _rm(), _transition(thread), + _wx_exec(), _hm(thread) {}; }; @@ -102,11 +104,12 @@ class JvmtiThreadEventTransition : StackObj { private: ResourceMark _rm; HandleMark _hm; + Thread::WXExecFromWriteSetter _wx_exec; JavaThreadState _saved_state; JavaThread *_jthread; public: - JvmtiThreadEventTransition(Thread *thread) : _rm(), _hm(thread) { + JvmtiThreadEventTransition(Thread *thread) : _rm(), _hm(thread), _wx_exec() { if (thread->is_Java_thread()) { _jthread = thread->as_Java_thread(); _saved_state = _jthread->thread_state(); @@ -396,6 +399,7 @@ JvmtiExport::get_jvmti_interface(JavaVM *jvm, void **penv, jint version) { JavaThread* current_thread = JavaThread::current(); // transition code: native to VM ThreadInVMfromNative __tiv(current_thread); + Thread::WXExecVerifier __wx_exec; VM_ENTRY_BASE(jvmtiEnv*, JvmtiExport::get_jvmti_interface, current_thread) debug_only(VMNativeEntryWrapper __vew;) diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index 68f589ac3d27f..70973f6412db5 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -82,6 +82,7 @@ JvmtiAgentThread::start_function_wrapper(JavaThread *thread, TRAPS) { void JvmtiAgentThread::call_start_function() { ThreadToNativeFromVM transition(this); + Thread::WXExecFromWriteSetter wx_exec; _start_fn(_env->jvmti_external(), jni_environment(), (void*)_start_arg); } diff --git a/src/hotspot/share/prims/jvmtiRawMonitor.cpp b/src/hotspot/share/prims/jvmtiRawMonitor.cpp index 06efea26c5fc5..f5992080a50a1 100644 --- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp +++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp @@ -237,6 +237,7 @@ int JvmtiRawMonitor::simple_wait(Thread* self, jlong millis) { JavaThread* jt = self->as_Java_thread(); // Transition to VM so we can check interrupt state ThreadInVMfromNative tivm(jt); + Thread::WXExecVerifier wx_exec; if (jt->is_interrupted(true)) { ret = M_INTERRUPTED; } else { @@ -424,6 +425,7 @@ int JvmtiRawMonitor::raw_wait(jlong millis, Thread* self) { simple_exit(jt); { ThreadInVMfromNative tivm(jt); + Thread::WXExecVerifier wx_exec; { ThreadBlockInVM tbivm(jt); jt->java_suspend_self(); diff --git a/src/hotspot/share/prims/methodHandles.cpp b/src/hotspot/share/prims/methodHandles.cpp index 0fe64b027af26..a847779b31acc 100644 --- a/src/hotspot/share/prims/methodHandles.cpp +++ b/src/hotspot/share/prims/methodHandles.cpp @@ -1573,6 +1573,7 @@ JVM_ENTRY(void, JVM_RegisterMethodHandleMethods(JNIEnv *env, jclass MHN_class)) { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; int status = env->RegisterNatives(MHN_class, MHN_methods, sizeof(MHN_methods)/sizeof(JNINativeMethod)); guarantee(status == JNI_OK && !env->ExceptionOccurred(), diff --git a/src/hotspot/share/prims/nativeLookup.cpp b/src/hotspot/share/prims/nativeLookup.cpp index 9e3e96f295319..f48601593da57 100644 --- a/src/hotspot/share/prims/nativeLookup.cpp +++ b/src/hotspot/share/prims/nativeLookup.cpp @@ -289,6 +289,7 @@ address NativeLookup::lookup_critical_entry(const methodHandle& method) { // dll handling requires I/O. Don't do that while in _thread_in_vm (safepoint may get requested). ThreadToNativeFromVM thread_in_native(JavaThread::current()); + Thread::WXWriteVerifier wx_write; void* dll = dll_load(method); address entry = NULL; diff --git a/src/hotspot/share/prims/perf.cpp b/src/hotspot/share/prims/perf.cpp index 895c45200cdf7..84517a8a06cab 100644 --- a/src/hotspot/share/prims/perf.cpp +++ b/src/hotspot/share/prims/perf.cpp @@ -54,6 +54,8 @@ static char* jstr_to_utf(JNIEnv *env, jstring str, TRAPS) { //throw_new(env,"NullPointerException"); } + Thread::WXExecFromWriteSetter wx_exec; + int len = env->GetStringUTFLength(str); int unicode_len = env->GetStringLength(str); @@ -91,6 +93,7 @@ PERF_ENTRY(jobject, Perf_Attach(JNIEnv *env, jobject unused, jstring user, int v { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return env->NewDirectByteBuffer(address, (jlong)capacity); } @@ -111,6 +114,7 @@ PERF_ENTRY(void, Perf_Detach(JNIEnv *env, jobject unused, jobject buffer)) // get buffer address and capacity { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; address = env->GetDirectBufferAddress(buffer); capacity = env->GetDirectBufferCapacity(buffer); } @@ -175,6 +179,7 @@ PERF_ENTRY(jobject, Perf_CreateLong(JNIEnv *env, jobject perf, jstring name, { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return env->NewDirectByteBuffer(lp, sizeof(jlong)); } @@ -217,6 +222,8 @@ PERF_ENTRY(jobject, Perf_CreateByteArray(JNIEnv *env, jobject perf, name_utf = jstr_to_utf(env, name, CHECK_NULL); + Thread::WXExecFromWriteSetter wx_exec; + value_length = env->GetArrayLength(value); value_local = NEW_RESOURCE_ARRAY(jbyte, value_length + 1); @@ -258,6 +265,7 @@ PERF_ENTRY(jobject, Perf_CreateByteArray(JNIEnv *env, jobject perf, { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return env->NewDirectByteBuffer(cp, maxlength+1); } @@ -320,6 +328,7 @@ JVM_ENTRY(void, JVM_RegisterPerfMethods(JNIEnv *env, jclass perfclass)) PerfWrapper("JVM_RegisterPerfMethods"); { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; int ok = env->RegisterNatives(perfclass, perfmethods, sizeof(perfmethods)/sizeof(JNINativeMethod)); guarantee(ok == 0, "register perf natives"); } diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp index fc8ea88313061..60e7a706d6bd1 100644 --- a/src/hotspot/share/prims/unsafe.cpp +++ b/src/hotspot/share/prims/unsafe.cpp @@ -402,6 +402,7 @@ UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcOb void* dst = index_oop_from_field_offset_long(dstp, dstOffset); { GuardUnsafeAccess guard(thread); + Thread::WXExecFromWriteSetter wx_exec; if (StubRoutines::unsafe_arraycopy() != NULL) { StubRoutines::UnsafeArrayCopy_stub()(src, dst, sz); } else { @@ -715,6 +716,7 @@ static jclass Unsafe_DefineClass_impl(JNIEnv *env, jstring name, jbyteArray data UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length, jobject loader, jobject pd)) { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd); } UNSAFE_END @@ -900,6 +902,7 @@ UNSAFE_ENTRY(jclass, Unsafe_DefineAnonymousClass0(JNIEnv *env, jobject unsafe, j UNSAFE_ENTRY(void, Unsafe_ThrowException(JNIEnv *env, jobject unsafe, jthrowable thr)) { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; env->Throw(thr); } UNSAFE_END @@ -1155,6 +1158,7 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = { JVM_ENTRY(void, JVM_RegisterJDKInternalMiscUnsafeMethods(JNIEnv *env, jclass unsafeclass)) { ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; int ok = env->RegisterNatives(unsafeclass, jdk_internal_misc_Unsafe_methods, sizeof(jdk_internal_misc_Unsafe_methods)/sizeof(JNINativeMethod)); guarantee(ok == 0, "register jdk.internal.misc.Unsafe natives"); diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 034a60aa675a4..2a6cae1deeb81 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -774,7 +774,7 @@ WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size WB_END WB_ENTRY(void, WB_NMTUncommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) - os::uncommit_memory((char *)(uintptr_t)addr, size); + os::uncommit_memory((char *)(uintptr_t)addr, size, !ExecMem); WB_END WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) @@ -837,6 +837,7 @@ WB_END static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) { assert(method != NULL, "method should not be null"); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; return env->FromReflectedMethod(method); } @@ -1212,6 +1213,7 @@ static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value) { return false; } ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; const char* flag_name = env->GetStringUTFChars(name, NULL); CHECK_JNI_EXCEPTION_(env, false); const JVMFlag* flag = JVMFlag::find_declared_flag(flag_name); @@ -1226,6 +1228,7 @@ static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value) { return false; } ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; const char* flag_name = env->GetStringUTFChars(name, NULL); CHECK_JNI_EXCEPTION_(env, false); JVMFlag* flag = JVMFlag::find_flag(flag_name); @@ -1266,6 +1269,7 @@ static jobject doubleBox(JavaThread* thread, JNIEnv* env, jdouble value) { static const JVMFlag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; const char* flag_name = env->GetStringUTFChars(name, NULL); CHECK_JNI_EXCEPTION_(env, NULL); const JVMFlag* result = JVMFlag::find_declared_flag(flag_name); @@ -1287,6 +1291,7 @@ WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name)) bool result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return booleanBox(thread, env, result); } return NULL; @@ -1296,6 +1301,7 @@ WB_ENTRY(jobject, WB_GetIntVMFlag(JNIEnv* env, jobject o, jstring name)) int result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1305,6 +1311,7 @@ WB_ENTRY(jobject, WB_GetUintVMFlag(JNIEnv* env, jobject o, jstring name)) uint result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1314,6 +1321,7 @@ WB_ENTRY(jobject, WB_GetIntxVMFlag(JNIEnv* env, jobject o, jstring name)) intx result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1323,6 +1331,7 @@ WB_ENTRY(jobject, WB_GetUintxVMFlag(JNIEnv* env, jobject o, jstring name)) uintx result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1332,6 +1341,7 @@ WB_ENTRY(jobject, WB_GetUint64VMFlag(JNIEnv* env, jobject o, jstring name)) uint64_t result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1341,6 +1351,7 @@ WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name)) size_t result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1350,6 +1361,7 @@ WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name)) double result; if (GetVMFlag (thread, env, name, &result)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; return doubleBox(thread, env, result); } return NULL; @@ -1359,6 +1371,7 @@ WB_ENTRY(jstring, WB_GetStringVMFlag(JNIEnv* env, jobject o, jstring name)) ccstr ccstrResult; if (GetVMFlag (thread, env, name, &ccstrResult)) { ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; jstring result = env->NewStringUTF(ccstrResult); CHECK_JNI_EXCEPTION_(env, NULL); return result; @@ -1408,6 +1421,7 @@ WB_END WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring value)) ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; const char* ccstrValue; if (value == NULL) { ccstrValue = NULL; @@ -1419,6 +1433,7 @@ WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring ccstr ccstrResult = ccstrValue; bool needFree; { + Thread::WXWriteFromExecSetter wx_write; ThreadInVMfromNative ttvfn(thread); // back to VM needFree = SetVMFlag (thread, env, name, &ccstrResult); } @@ -1484,6 +1499,7 @@ WB_END WB_ENTRY(jstring, WB_GetCPUFeatures(JNIEnv* env, jobject o)) const char* features = VM_Version::features_string(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; jstring features_string = env->NewStringUTF(features); CHECK_JNI_EXCEPTION_(env, NULL); @@ -1556,6 +1572,7 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo int insts_size = comp_level == CompLevel_aot ? code->code_end() - code->code_begin() : code->insts_size(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string()); CHECK_JNI_EXCEPTION_(env, NULL); result = env->NewObjectArray(5, clazz, NULL); @@ -1639,6 +1656,7 @@ WB_ENTRY(jobjectArray, WB_GetCodeHeapEntries(JNIEnv* env, jobject o, jint blob_t } } ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; jobjectArray result = NULL; jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string()); CHECK_JNI_EXCEPTION_(env, NULL); @@ -1669,6 +1687,7 @@ WB_ENTRY(jobjectArray, WB_GetCodeBlob(JNIEnv* env, jobject o, jlong addr)) "WB_GetCodeBlob: addr is null"); } ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; CodeBlobStub stub((CodeBlob*) addr); return codeBlob2objectArray(thread, env, &stub); WB_END @@ -1839,6 +1858,7 @@ static bool GetMethodOption(JavaThread* thread, JNIEnv* env, jobject method, jst methodHandle mh(thread, Method::checked_resolve_jmethod_id(jmid)); // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; const char* flag_name = env->GetStringUTFChars(name, NULL); CHECK_JNI_EXCEPTION_(env, false); bool result = CompilerOracle::has_option_value(mh, flag_name, *value); @@ -1851,6 +1871,7 @@ WB_ENTRY(jobject, WB_GetMethodBooleaneOption(JNIEnv* env, jobject wb, jobject me if (GetMethodOption (thread, env, method, name, &result)) { // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return booleanBox(thread, env, result); } return NULL; @@ -1861,6 +1882,7 @@ WB_ENTRY(jobject, WB_GetMethodIntxOption(JNIEnv* env, jobject wb, jobject method if (GetMethodOption (thread, env, method, name, &result)) { // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1871,6 +1893,7 @@ WB_ENTRY(jobject, WB_GetMethodUintxOption(JNIEnv* env, jobject wb, jobject metho if (GetMethodOption (thread, env, method, name, &result)) { // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return longBox(thread, env, result); } return NULL; @@ -1881,6 +1904,7 @@ WB_ENTRY(jobject, WB_GetMethodDoubleOption(JNIEnv* env, jobject wb, jobject meth if (GetMethodOption (thread, env, method, name, &result)) { // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; return doubleBox(thread, env, result); } return NULL; @@ -1891,6 +1915,7 @@ WB_ENTRY(jobject, WB_GetMethodStringOption(JNIEnv* env, jobject wb, jobject meth if (GetMethodOption (thread, env, method, name, &ccstrResult)) { // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; jstring result = env->NewStringUTF(ccstrResult); CHECK_JNI_EXCEPTION_(env, NULL); return result; @@ -1901,6 +1926,7 @@ WB_END WB_ENTRY(jobject, WB_GetDefaultArchivePath(JNIEnv* env, jobject wb)) const char* p = Arguments::get_default_shared_archive_path(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; jstring path_string = env->NewStringUTF(p); CHECK_JNI_EXCEPTION_(env, NULL); @@ -2105,6 +2131,7 @@ bool WhiteBox::lookup_bool(const char* field_name, oop object) { void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, JNINativeMethod* method_array, int method_count) { ResourceMark rm; ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI + Thread::WXExecFromWriteSetter wx_exec; // one by one registration natives for exception catching jclass no_such_method_error_klass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string()); @@ -2136,11 +2163,13 @@ void WhiteBox::register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread, WB_ENTRY(jint, WB_AddCompilerDirective(JNIEnv* env, jobject o, jstring compDirect)) // can't be in VM when we call JNI ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; const char* dir = env->GetStringUTFChars(compDirect, NULL); CHECK_JNI_EXCEPTION_(env, 0); int ret; { ThreadInVMfromNative ttvfn(thread); // back to VM + Thread::WXExecVerifier wx_exec; ret = DirectivesParser::parse_string(dir, tty); } env->ReleaseStringUTFChars(compDirect, dir); @@ -2161,6 +2190,7 @@ WB_ENTRY(jboolean, WB_CheckLibSpecifiesNoexecstack(JNIEnv* env, jobject o, jstri #ifdef LINUX // Can't be in VM when we call JNI. ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; const char* lf = env->GetStringUTFChars(libfile, NULL); CHECK_JNI_EXCEPTION_(env, 0); ret = (jboolean) ElfFile::specifies_noexecstack(lf); @@ -2182,6 +2212,7 @@ WB_ENTRY(jint, WB_ValidateCgroup(JNIEnv* env, jint ret = 0; #ifdef LINUX ThreadToNativeFromVM ttnfv(thread); + Thread::WXExecFromWriteSetter wx_exec; const char* p_cgroups = env->GetStringUTFChars(proc_cgroups, NULL); CHECK_JNI_EXCEPTION_(env, 0); const char* p_s_cgroup = env->GetStringUTFChars(proc_self_cgroup, NULL); diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index eeeb1ae2808be..771deb75c0ce3 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -2333,6 +2333,7 @@ Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, j // This enters VM and may safepoint uncommon_trap_inner(thread, trap_request); } + Thread::WXWriteFromExecSetter wx_write; return fetch_unroll_info_helper(thread, exec_mode); } diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp index cda09b119a663..8664a767c9038 100644 --- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp @@ -304,8 +304,9 @@ class ThreadBlockInVMWithDeadlockCheck : public ThreadStateTransition { // from being installed on vm exit in situations where we can't tolerate them. // See bugs: 4324348, 4854693, 4998314, 5040492, 5050705. class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition { + Thread::WXWriteVerifier _wx_write; public: - ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) { + ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread), _wx_write() { trans_from_java(_thread_in_vm); } ~ThreadInVMfromJavaNoAsyncException() { @@ -391,6 +392,7 @@ class RuntimeHistogramElement : public HistogramElement { #define JRT_ENTRY(result_type, header) \ result_type header { \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromJava __tiv(thread); \ VM_ENTRY_BASE(result_type, header, thread) \ debug_only(VMEntryWrapper __vew;) @@ -417,6 +419,7 @@ class RuntimeHistogramElement : public HistogramElement { #define JRT_ENTRY_NO_ASYNC(result_type, header) \ result_type header { \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromJavaNoAsyncException __tiv(thread); \ VM_ENTRY_BASE(result_type, header, thread) \ debug_only(VMEntryWrapper __vew;) @@ -426,6 +429,7 @@ class RuntimeHistogramElement : public HistogramElement { #define JRT_BLOCK_ENTRY(result_type, header) \ result_type header { \ TRACE_CALL(result_type, header) \ + Thread::WXWriteFromExecSetter __wx_write; \ HandleMarkCleaner __hm(thread); #define JRT_BLOCK \ @@ -455,6 +459,7 @@ extern "C" { \ result_type JNICALL header { \ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromNative __tiv(thread); \ debug_only(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -465,6 +470,7 @@ extern "C" { \ result_type JNICALL header { \ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \ + Thread::WXWriteFromExecSetter __wx_write; \ VM_LEAF_BASE(result_type, header) @@ -479,6 +485,7 @@ extern "C" { \ extern "C" { \ result_type JNICALL header { \ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromNative __tiv(thread); \ debug_only(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -488,6 +495,7 @@ extern "C" { \ extern "C" { \ result_type JNICALL header { \ JavaThread* thread = JavaThread::current(); \ + Thread::WXWriteFromExecSetter __wx_write; \ ThreadInVMfromNative __tiv(thread); \ debug_only(VMNativeEntryWrapper __vew;) \ VM_ENTRY_BASE(result_type, header, thread) @@ -497,6 +505,7 @@ extern "C" { \ extern "C" { \ result_type JNICALL header { \ VM_Exit::block_if_vm_exited(); \ + Thread::WXWriteFromExecSetter __wx_write; \ VM_LEAF_BASE(result_type, header) diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index d3a1c8bfb5076..240b77c14c65a 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -78,7 +78,6 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei } } - // Make sure to set the oop's after the thread transition - since we can block there. No one is GC'ing // the JavaCallWrapper before the entry frame is on the stack. _callee_method = callee_method(); @@ -108,12 +107,16 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei if(clear_pending_exception) { _thread->clear_pending_exception(); } + + Thread::enable_wx_from_write(WXExec); } JavaCallWrapper::~JavaCallWrapper() { assert(_thread == JavaThread::current(), "must still be the same thread"); + Thread::enable_wx_from_exec(WXWrite); + // restore previous handle block & Java frame linkage JNIHandleBlock *_old_handles = _thread->active_handles(); _thread->set_active_handles(_handles); diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index 74671fbbf6c2e..91677e04a18e0 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -44,7 +44,7 @@ #include "runtime/osThread.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" +#include "runtime/stubRoutines.inline.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" #include "utilities/dtrace.hpp" diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 9a7e50f294680..fc47a76d82859 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -54,7 +54,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" +#include "runtime/stubRoutines.inline.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" #include "runtime/vm_version.hpp" @@ -1658,7 +1658,7 @@ char* os::reserve_memory(size_t bytes, MEMFLAGS flags) { return result; } -char* os::reserve_memory_with_fd(size_t bytes, int file_desc) { +char* os::reserve_memory_with_fd(size_t bytes, int file_desc, bool executable) { char* result; if (file_desc != -1) { @@ -1669,7 +1669,7 @@ char* os::reserve_memory_with_fd(size_t bytes, int file_desc) { MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC); } } else { - result = pd_reserve_memory(bytes); + result = pd_reserve_memory(bytes, executable); if (result != NULL) { MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC); } @@ -1723,16 +1723,16 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); } -bool os::uncommit_memory(char* addr, size_t bytes) { +bool os::uncommit_memory(char* addr, size_t bytes, bool exec) { bool res; if (MemTracker::tracking_level() > NMT_minimal) { Tracker tkr(Tracker::uncommit); - res = pd_uncommit_memory(addr, bytes); + res = pd_uncommit_memory(addr, bytes, exec); if (res) { tkr.record((address)addr, bytes); } } else { - res = pd_uncommit_memory(addr, bytes); + res = pd_uncommit_memory(addr, bytes, exec); } return res; } diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 08f3c1780ea53..731ee4050c720 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -76,6 +76,11 @@ enum ThreadPriority { // JLS 20.20.1-3 CriticalPriority = 11 // Critical thread priority }; +enum WXMode { + WXWrite, + WXExec +}; + // Executable parameter flag for os::commit_memory() and // os::commit_memory_or_exit(). const bool ExecMem = true; @@ -113,7 +118,7 @@ class os: AllStatic { _page_sizes[1] = 0; // sentinel } - static char* pd_reserve_memory(size_t bytes); + static char* pd_reserve_memory(size_t bytes, bool executable = false); static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes); static char* pd_attempt_reserve_memory_at(char* addr, size_t bytes, int file_desc); @@ -128,7 +133,7 @@ class os: AllStatic { static void pd_commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, bool executable, const char* mesg); - static bool pd_uncommit_memory(char* addr, size_t bytes); + static bool pd_uncommit_memory(char* addr, size_t bytes, bool exec); static bool pd_release_memory(char* addr, size_t bytes); static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, @@ -318,7 +323,7 @@ class os: AllStatic { // Reserves virtual memory. // if file_desc != -1, also attaches the memory to the file. - static char* reserve_memory_with_fd(size_t bytes, int file_desc); + static char* reserve_memory_with_fd(size_t bytes, int file_desc, bool executable = false); // Reserves virtual memory that starts at an address that is aligned to 'alignment'. static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1); @@ -347,7 +352,7 @@ class os: AllStatic { static void commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, bool executable, const char* mesg); - static bool uncommit_memory(char* addr, size_t bytes); + static bool uncommit_memory(char* addr, size_t bytes, bool exec); static bool release_memory(char* addr, size_t bytes); // Touch memory pages that cover the memory range from start to end (exclusive) @@ -901,6 +906,12 @@ class os: AllStatic { bool _done; }; + // If the JVM is running in W^X mode, enable write or execute access to + // writeable and executable pages. No-op otherwise. + static inline void current_thread_enable_wx(WXMode mode) { + current_thread_enable_wx_impl(mode); + } + #ifndef _WINDOWS // Suspend/resume support // Protocol: @@ -970,7 +981,6 @@ class os: AllStatic { }; #endif // !WINDOWS - protected: static volatile unsigned int _rand_seed; // seed for random number generator static int _processor_count; // number of processors diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index 365102fee5ea7..d425c578dc23f 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -803,6 +803,8 @@ void SafepointSynchronize::block(JavaThread *thread) { void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) { assert(thread->thread_state() == _thread_in_Java, "should come from Java code"); + Thread::WXWriteFromExecSetter wx_write; + if (log_is_enabled(Info, safepoint, stats)) { Atomic::inc(&_nof_threads_hit_polling_page); } diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 0ed5896fc8475..160ce9ebd3363 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -534,6 +534,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thre JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address)) + Thread::WXWriteFromExecSetter wx_write; return raw_exception_handler_for_return_address(thread, return_address); JRT_END @@ -1923,6 +1924,8 @@ bool SharedRuntime::should_fixup_call_destination(address destination, address e JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc)) Method* moop(method); + Thread::WXWriteFromExecSetter wx_write; + address entry_point = moop->from_compiled_entry_no_trampoline(); // It's possible that deoptimization can occur at a call site which hasn't diff --git a/src/hotspot/share/runtime/signature.hpp b/src/hotspot/share/runtime/signature.hpp index c4b6167989a2f..0ed57e424a6a7 100644 --- a/src/hotspot/share/runtime/signature.hpp +++ b/src/hotspot/share/runtime/signature.hpp @@ -381,10 +381,14 @@ class NativeSignatureIterator: public SignatureIterator { void do_type(BasicType type) { switch (type) { case T_BYTE: - case T_SHORT: - case T_INT: case T_BOOLEAN: + pass_byte(); _jni_offset++; _offset++; + break; case T_CHAR: + case T_SHORT: + pass_short(); _jni_offset++; _offset++; + break; + case T_INT: pass_int(); _jni_offset++; _offset++; break; case T_FLOAT: @@ -418,6 +422,8 @@ class NativeSignatureIterator: public SignatureIterator { virtual void pass_long() = 0; virtual void pass_object() = 0; // objects, arrays, inlines virtual void pass_float() = 0; + virtual void pass_byte() { pass_int(); }; + virtual void pass_short() { pass_int(); }; #ifdef _LP64 virtual void pass_double() = 0; #else diff --git a/src/hotspot/share/runtime/stackOverflow.cpp b/src/hotspot/share/runtime/stackOverflow.cpp index 01aba3ea2de47..0b706e717038f 100644 --- a/src/hotspot/share/runtime/stackOverflow.cpp +++ b/src/hotspot/share/runtime/stackOverflow.cpp @@ -103,7 +103,7 @@ void StackOverflow::create_stack_guard_pages() { } else { log_warning(os, thread)("Attempt to protect stack guard pages failed (" PTR_FORMAT "-" PTR_FORMAT ").", p2i(low_addr), p2i(low_addr + len)); - if (os::uncommit_memory((char *) low_addr, len)) { + if (os::uncommit_memory((char *) low_addr, len, !ExecMem)) { log_warning(os, thread)("Attempt to deallocate stack guard pages failed."); } return; diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 5fd47ff57ac5b..f382ed2c54cbe 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -31,7 +31,7 @@ #include "runtime/interfaceSupport.inline.hpp" #include "runtime/timerTrace.hpp" #include "runtime/sharedRuntime.hpp" -#include "runtime/stubRoutines.hpp" +#include "runtime/stubRoutines.inline.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" #include "utilities/vmError.hpp" @@ -319,6 +319,8 @@ void StubRoutines::initialize2() { #ifdef ASSERT + os::current_thread_enable_wx(WXExec); + #define TEST_ARRAYCOPY(type) \ test_arraycopy_func( type##_arraycopy(), sizeof(type)); \ test_arraycopy_func( type##_disjoint_arraycopy(), sizeof(type)); \ @@ -399,6 +401,8 @@ void StubRoutines::initialize2() { test_safefetchN(); #endif + os::current_thread_enable_wx(WXWrite); + #endif } diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 6af4caaaa144f..5e84e9aafbe08 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -487,24 +487,4 @@ class StubRoutines: AllStatic { static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); }; -// Safefetch allows to load a value from a location that's not known -// to be valid. If the load causes a fault, the error value is returned. -inline int SafeFetch32(int* adr, int errValue) { - assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated"); - return StubRoutines::SafeFetch32_stub()(adr, errValue); -} -inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) { - assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated"); - return StubRoutines::SafeFetchN_stub()(adr, errValue); -} - - -// returns true if SafeFetch32 and SafeFetchN can be used safely (stubroutines are already generated) -inline bool CanUseSafeFetch32() { - return StubRoutines::SafeFetch32_stub() ? true : false; -} - -inline bool CanUseSafeFetchN() { - return StubRoutines::SafeFetchN_stub() ? true : false; -} #endif // SHARE_RUNTIME_STUBROUTINES_HPP diff --git a/src/hotspot/share/runtime/stubRoutines.inline.hpp b/src/hotspot/share/runtime/stubRoutines.inline.hpp new file mode 100644 index 0000000000000..4ce59480300fb --- /dev/null +++ b/src/hotspot/share/runtime/stubRoutines.inline.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_RUNTIME_STUBROUTINES_INLINE_HPP +#define SHARE_RUNTIME_STUBROUTINES_INLINE_HPP + +#include +#include + +// Safefetch allows to load a value from a location that's not known +// to be valid. If the load causes a fault, the error value is returned. +inline int SafeFetch32(int* adr, int errValue) { + assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated"); + Thread::WXExecFromWriteSetter wx_exec; + return StubRoutines::SafeFetch32_stub()(adr, errValue); +} +inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) { + assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated"); + Thread::WXExecFromWriteSetter wx_exec; + return StubRoutines::SafeFetchN_stub()(adr, errValue); +} + + +// returns true if SafeFetch32 and SafeFetchN can be used safely (stubroutines are already generated) +inline bool CanUseSafeFetch32() { + return StubRoutines::SafeFetch32_stub() ? true : false; +} + +inline bool CanUseSafeFetchN() { + return StubRoutines::SafeFetchN_stub() ? true : false; +} + +#endif // SHARE_RUNTIME_STUBROUTINES_INLINE_HPP diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index e7771575c3dfe..c0d12abacc401 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -323,6 +323,8 @@ Thread::Thread() { // If the main thread creates other threads before the barrier set that is an error. assert(Thread::current_or_null() == NULL, "creating thread before barrier set"); } + + DEBUG_ONLY(_wx_init = false); } void Thread::initialize_thread_current() { @@ -380,6 +382,8 @@ void Thread::call_run() { register_thread_stack_with_NMT(); + this->init_wx(); + JFR_ONLY(Jfr::on_thread_start(this);) log_debug(os, thread)("Thread " UINTX_FORMAT " stack dimensions: " @@ -2654,6 +2658,8 @@ void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread // Note only the native==>VM/Java barriers can call this function and when // thread state is _thread_in_native_trans. void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) { + Thread::WXWriteFromExecSetter wx_write; + check_safepoint_and_suspend_for_native_trans(thread); // After returning from native, it could be that the stack frames are not @@ -2678,6 +2684,8 @@ void JavaThread::check_special_condition_for_native_trans(JavaThread *thread) { void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) { check_special_condition_for_native_trans(thread); + Thread::WXWriteFromExecSetter wx_write; + // Finish the transition thread->set_thread_state(_thread_in_Java); @@ -3609,6 +3617,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // Initialize the os module os::init(); + os::current_thread_enable_wx(WXWrite); + // Record VM creation timing statistics TraceVmCreationTime create_vm_timer; create_vm_timer.start(); @@ -3712,6 +3722,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { main_thread->record_stack_base_and_size(); main_thread->register_thread_stack_with_NMT(); main_thread->set_active_handles(JNIHandleBlock::allocate_block()); + main_thread->init_wx(); if (!main_thread->set_as_starting_thread()) { vm_shutdown_during_initialization( @@ -4144,6 +4155,7 @@ void Threads::shutdown_vm_agents() { if (unload_entry != NULL) { JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; HandleMark hm(thread); (*unload_entry)(&main_vm); } @@ -4163,6 +4175,7 @@ void Threads::create_vm_init_libraries() { // Invoke the JVM_OnLoad function JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; HandleMark hm(thread); jint err = (*on_load_entry)(&main_vm, agent->options(), NULL); if (err != JNI_OK) { diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index cb6eaa024bcb1..1b6e2c1fffd96 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -856,6 +856,81 @@ class Thread: public ThreadShadow { static void SpinRelease(volatile int * Lock); static void muxAcquire(volatile intptr_t * Lock, const char * Name); static void muxRelease(volatile intptr_t * Lock); + +private: +#ifdef ASSERT + bool _wx_init; + WXMode _wx_state; + static inline void verify_wx_init(WXMode state) { + Thread* current = Thread::current(); + assert(!current->_wx_init, "second init"); + current->_wx_init = true; + current->_wx_state = state; + } + static inline void verify_wx_transition(WXMode from, WXMode to) { + Thread* current = Thread::current(); + assert(current->_wx_init, "no init"); + assert(current->_wx_state == from, "wrong state"); + current->_wx_init = true; + current->_wx_state = to; + } + static inline void verify_wx_state(WXMode now) { + Thread* current = Thread::current(); + assert(current->_wx_init, "no init"); + assert(current->_wx_state == now, "wrong state"); + } +#else + static inline void verify_wx_init(WXMode state) { } + static inline void verify_wx_transition(WXMode from, WXMode to) { } + static inline void verify_wx_state(WXMode now) { } +#endif // ASSERT +public: + void init_wx() { + WXMode init_mode = WXWrite; + verify_wx_init(init_mode); + os::current_thread_enable_wx(init_mode); + } + static inline void enable_wx_from_write(WXMode to) { + verify_wx_transition(WXWrite, to); + os::current_thread_enable_wx(to); + } + static inline void enable_wx_from_exec(WXMode to) { + verify_wx_transition(WXExec, to); + os::current_thread_enable_wx(to); + } + + class WXWriteFromExecSetter { + public: + WXWriteFromExecSetter() { + enable_wx_from_exec(WXWrite); + } + ~WXWriteFromExecSetter() { + enable_wx_from_write(WXExec); + } + }; + + class WXExecFromWriteSetter { + public: + WXExecFromWriteSetter() { + enable_wx_from_write(WXExec); + } + ~WXExecFromWriteSetter() { + enable_wx_from_exec(WXWrite); + } + }; + + class WXWriteVerifier { + public: + WXWriteVerifier() { + verify_wx_state(WXWrite); + } + }; + class WXExecVerifier { + public: + WXExecVerifier() { + verify_wx_state(WXExec); + } + }; }; // Inline implementation of Thread::current() diff --git a/src/hotspot/share/services/diagnosticCommand.cpp b/src/hotspot/share/services/diagnosticCommand.cpp index e884a3f3580cd..e0032886e0286 100644 --- a/src/hotspot/share/services/diagnosticCommand.cpp +++ b/src/hotspot/share/services/diagnosticCommand.cpp @@ -1076,6 +1076,7 @@ void DebugOnCmdStartDCmd::execute(DCmdSource source, TRAPS) { JavaThread* thread = THREAD->as_Java_thread(); jthread jt = JNIHandles::make_local(thread->threadObj()); ThreadToNativeFromVM ttn(thread); + Thread::WXExecFromWriteSetter wx_exec; const char *error = "Could not find jdwp agent."; if (!dvc_start_ptr) { diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp index 4a9ecebbec946..19d0bfb1d85af 100644 --- a/src/hotspot/share/utilities/vmError.cpp +++ b/src/hotspot/share/utilities/vmError.cpp @@ -42,6 +42,7 @@ #include "runtime/init.hpp" #include "runtime/os.hpp" #include "runtime/safepointMechanism.hpp" +#include "runtime/stubRoutines.inline.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" #include "runtime/vmThread.hpp" diff --git a/src/java.base/macosx/native/libjli/java_md_macosx.m b/src/java.base/macosx/native/libjli/java_md_macosx.m index cf55e05f35d35..d6b13c4afdbaf 100644 --- a/src/java.base/macosx/native/libjli/java_md_macosx.m +++ b/src/java.base/macosx/native/libjli/java_md_macosx.m @@ -210,6 +210,8 @@ preferredJVM = "client"; #elif defined(__x86_64__) preferredJVM = "server"; +#elif defined(__aarch64__) + preferredJVM = "server"; #else #error "Unknown architecture - needs definition" #endif diff --git a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java index ebf8ae0d27da7..c1c5ff6e740d5 100644 --- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java +++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/ArgumentHandler.java @@ -531,6 +531,8 @@ class CheckedFeatures { {"linux-s390x", "com.sun.jdi.SharedMemoryAttach"}, {"macosx-amd64", "com.sun.jdi.SharedMemoryAttach"}, {"mac-x64", "com.sun.jdi.SharedMemoryAttach"}, + {"macosx-aarch64", "com.sun.jdi.SharedMemoryAttach"}, + {"mac-aarch64", "com.sun.jdi.SharedMemoryAttach"}, {"aix-ppc64", "com.sun.jdi.SharedMemoryAttach"}, // listening connectors @@ -554,6 +556,8 @@ class CheckedFeatures { {"linux-s390x", "com.sun.jdi.SharedMemoryListen"}, {"macosx-amd64", "com.sun.jdi.SharedMemoryListen"}, {"mac-x64", "com.sun.jdi.SharedMemoryListen"}, + {"macosx-aarch64", "com.sun.jdi.SharedMemoryListen"}, + {"mac-aarch64", "com.sun.jdi.SharedMemoryListen"}, {"aix-ppc64", "com.sun.jdi.SharedMemoryListen"}, // launching connectors @@ -611,8 +615,14 @@ class CheckedFeatures { {"macosx-amd64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, {"macosx-amd64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, - {"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, - {"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + {"mac-x64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, + {"mac-x64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + + {"macosx-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, + {"macosx-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, + + {"mac-aarch64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, + {"mac-aarch64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, {"aix-ppc64", "com.sun.jdi.CommandLineLaunch", "dt_shmem"}, {"aix-ppc64", "com.sun.jdi.RawCommandLineLaunch", "dt_shmem"}, @@ -629,6 +639,8 @@ class CheckedFeatures { {"linux-s390x", "dt_shmem"}, {"macosx-amd64", "dt_shmem"}, {"mac-x64", "dt_shmem"}, + {"macosx-aarch64", "dt_shmem"}, + {"mac-aarch64", "dt_shmem"}, {"aix-ppc64", "dt_shmem"}, }; }