Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8310644: Make panama memory segment close use async handshakes #16792

Closed
wants to merge 10 commits into from
144 changes: 72 additions & 72 deletions src/hotspot/share/prims/scopedMemoryAccess.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,54 +35,72 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframe.inline.hpp"

class CloseScopedMemoryFindOopClosure : public OopClosure {
oop _deopt;
bool _found;

public:
CloseScopedMemoryFindOopClosure(jobject deopt) :
_deopt(JNIHandles::resolve(deopt)),
_found(false) {}

template <typename T>
void do_oop_work(T* p) {
if (_found) {
return;
static bool is_in_scoped_access(JavaThread* jt, oop session) {
const int max_critical_stack_depth = 10;
int depth = 0;
for (vframeStream stream(jt); !stream.at_end(); stream.next()) {
Method* m = stream.method();
if (m->is_scoped()) {
StackValueCollection* locals = stream.asJavaVFrame()->locals();
for (int i = 0; i < locals->size(); i++) {
StackValue* var = locals->at(i);
if (var->type() == T_OBJECT) {
if (var->get_obj() == session) {
assert(depth < max_critical_stack_depth, "can't have more than %d critical frames", max_critical_stack_depth);
return true;
}
}
}
break;
}
if (RawAccess<>::oop_load(p) == _deopt) {
_found = true;
depth++;
#ifndef ASSERT
if (depth >= max_critical_stack_depth) {
break;
}
#endif
}

virtual void do_oop(oop* p) {
do_oop_work(p);
}
return false;
}

class ScopedAsyncExceptionHandshake : public AsyncExceptionHandshake {
OopHandle _session;

virtual void do_oop(narrowOop* p) {
do_oop_work(p);
public:
ScopedAsyncExceptionHandshake(OopHandle& session, OopHandle& error)
: AsyncExceptionHandshake(error),
_session(session) {}

~ScopedAsyncExceptionHandshake() {
_session.release(Universe::vm_global());
}

bool found() {
return _found;
virtual void do_thread(Thread* thread) {
JavaThread* jt = JavaThread::cast(thread);
ResourceMark rm;
if (is_in_scoped_access(jt, _session.resolve())) {
// Throw exception to unwind out from the scoped access
AsyncExceptionHandshake::do_thread(thread);
}
}
};

class CloseScopedMemoryClosure : public HandshakeClosure {
jobject _deopt;
jobject _session;
jobject _error;

public:
jboolean _found;

CloseScopedMemoryClosure(jobject deopt, jobject exception)
CloseScopedMemoryClosure(jobject session, jobject error)
: HandshakeClosure("CloseScopedMemory")
, _deopt(deopt)
, _found(false) {}
, _session(session)
, _error(error) {}

void do_thread(Thread* thread) {

JavaThread* jt = JavaThread::cast(thread);

if (!jt->has_last_Java_frame()) {
// No frames; not in a scoped memory access
return;
}

Expand All @@ -97,44 +115,27 @@ class CloseScopedMemoryClosure : public HandshakeClosure {
}

ResourceMark rm;
if (_deopt != nullptr && last_frame.is_compiled_frame() && last_frame.can_be_deoptimized()) {
CloseScopedMemoryFindOopClosure cl(_deopt);
CompiledMethod* cm = last_frame.cb()->as_compiled_method();

/* FIXME: this doesn't work if reachability fences are violated by C2
last_frame.oops_do(&cl, nullptr, &register_map);
if (cl.found()) {
//Found the deopt oop in a compiled method; deoptimize.
Deoptimization::deoptimize(jt, last_frame);
}
so... we unconditionally deoptimize, for now: */
if (last_frame.is_compiled_frame() && last_frame.can_be_deoptimized()) {
// FIXME: we would like to conditionally deoptimize only if the corresponding
// _session is reachable from the frame, but reachabilityFence doesn't currently
// work the way it should. Therefore we deopt unconditionally for now.
Deoptimization::deoptimize(jt, last_frame);
}

const int max_critical_stack_depth = 10;
int depth = 0;
for (vframeStream stream(jt); !stream.at_end(); stream.next()) {
Method* m = stream.method();
if (m->is_scoped()) {
StackValueCollection* locals = stream.asJavaVFrame()->locals();
for (int i = 0; i < locals->size(); i++) {
StackValue* var = locals->at(i);
if (var->type() == T_OBJECT) {
if (var->get_obj() == JNIHandles::resolve(_deopt)) {
assert(depth < max_critical_stack_depth, "can't have more than %d critical frames", max_critical_stack_depth);
_found = true;
return;
}
}
}
break;
}
depth++;
#ifndef ASSERT
if (depth >= max_critical_stack_depth) {
break;
}
#endif
if (jt->has_async_exception_condition()) {
// Target thread just about to throw an async exception using async handshakes,
// we will then unwind out from the scoped memory access.
return;
}

if (is_in_scoped_access(jt, JNIHandles::resolve(_session))) {
// We have found that the target thread is inside of a scoped access.
// An asynchronous handshake is sent to the target thread, telling it
// to throw an exception, which will unwind the target thread out from
// the scoped access.
OopHandle session(Universe::vm_global(), JNIHandles::resolve(_session));
OopHandle error(Universe::vm_global(), JNIHandles::resolve(_error));
jt->install_async_exception(new ScopedAsyncExceptionHandshake(session, error));
}
}
};
Expand All @@ -146,34 +147,33 @@ class CloseScopedMemoryClosure : public HandshakeClosure {
* class annotated with the '@Scoped' annotation), and whose local variables mention the session being
* closed (deopt), this method returns false, signalling that the session cannot be closed safely.
*/
JVM_ENTRY(jboolean, ScopedMemoryAccess_closeScope(JNIEnv *env, jobject receiver, jobject deopt, jobject exception))
CloseScopedMemoryClosure cl(deopt, exception);
JVM_ENTRY(void, ScopedMemoryAccess_closeScope(JNIEnv *env, jobject receiver, jobject session, jobject error))
CloseScopedMemoryClosure cl(session, error);
Handshake::execute(&cl);
return !cl._found;
JVM_END

/// JVM_RegisterUnsafeMethods

#define PKG_MISC "Ljdk/internal/misc/"
#define PKG_FOREIGN "Ljdk/internal/foreign/"

#define MEMACCESS "ScopedMemoryAccess"
#define SCOPE PKG_FOREIGN "MemorySessionImpl;"
#define SCOPED_SESSION PKG_FOREIGN "MemorySessionImpl;"
#define SCOPED_ERROR PKG_MISC "ScopedMemoryAccess$ScopedAccessError;"

#define CC (char*) /*cast a literal from (const char*)*/
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)

static JNINativeMethod jdk_internal_misc_ScopedMemoryAccess_methods[] = {
{CC "closeScope0", CC "(" SCOPE ")Z", FN_PTR(ScopedMemoryAccess_closeScope)},
{CC "closeScope0", CC "(" SCOPED_SESSION SCOPED_ERROR ")V", FN_PTR(ScopedMemoryAccess_closeScope)},
};

#undef CC
#undef FN_PTR

#undef PKG_MISC
#undef PKG_FOREIGN
#undef MEMACCESS
#undef SCOPE
#undef SCOPED_SESSION
#undef SCOPED_ERROR

// This function is exported, used by NativeLookup.

Expand Down
81 changes: 42 additions & 39 deletions src/hotspot/share/prims/unsafe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,36 @@
( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
+ ((julong)max_jint * sizeof(double)) )


#define UNSAFE_ENTRY(result_type, header) \
JVM_ENTRY(static result_type, header)

#define UNSAFE_LEAF(result_type, header) \
JVM_LEAF(static result_type, header)

// Note that scoped accesses (cf. scopedMemoryAccess.cpp) can install
// an async handshake on the entry to an Unsafe method. When that happens,
// it is expected that we are not allowed to touch the underlying memory
// that might have gotten unmapped. Therefore, we check at the entry
// to unsafe functions, if we have such async exception conditions,
// and return immediately if that is the case.
//
// We also use NoSafepointVerifier to block potential safepoints.
// It would be problematic if an async exception handshake were installed later on
// during another safepoint in the function, but before the memory access happens,
// as the memory will be freed after the handshake is installed. We must notice
// the installed handshake and return early before doing the memory access to prevent
// accesses to freed memory.
//
// Note also that we MUST do a scoped memory access in the VM (or Java) thread
// state. Since we rely on a handshake to check for threads that are accessing
// scoped memory, and we need the handshaking thread to wait until we get to a
// safepoint, in order to make sure we are not in the middle of accessing memory
// that is about to be freed. (i.e. there can be no UNSAFE_LEAF_SCOPED)
#define UNSAFE_ENTRY_SCOPED(result_type, header) \
JVM_ENTRY(static result_type, header) \
if (thread->has_async_exception_condition()) {return (result_type)0;} \
NoSafepointVerifier nsv;

#define UNSAFE_END JVM_END


Expand Down Expand Up @@ -279,11 +302,11 @@ UNSAFE_ENTRY(jobject, Unsafe_GetUncompressedObject(JNIEnv *env, jobject unsafe,

#define DEFINE_GETSETOOP(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
UNSAFE_ENTRY_SCOPED(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
return MemoryAccess<java_type>(thread, obj, offset).get(); \
} UNSAFE_END \
\
UNSAFE_ENTRY(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
UNSAFE_ENTRY_SCOPED(void, Unsafe_Put##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
MemoryAccess<java_type>(thread, obj, offset).put(x); \
} UNSAFE_END \
\
Expand All @@ -302,11 +325,11 @@ DEFINE_GETSETOOP(jdouble, Double);

#define DEFINE_GETSETOOP_VOLATILE(java_type, Type) \
\
UNSAFE_ENTRY(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
UNSAFE_ENTRY_SCOPED(java_type, Unsafe_Get##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
return MemoryAccess<java_type>(thread, obj, offset).get_volatile(); \
} UNSAFE_END \
\
UNSAFE_ENTRY(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
UNSAFE_ENTRY_SCOPED(void, Unsafe_Put##Type##Volatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, java_type x)) { \
MemoryAccess<java_type>(thread, obj, offset).put_volatile(x); \
} UNSAFE_END \
\
Expand Down Expand Up @@ -362,7 +385,7 @@ UNSAFE_LEAF(void, Unsafe_FreeMemory0(JNIEnv *env, jobject unsafe, jlong addr)) {
os::free(p);
} UNSAFE_END

UNSAFE_ENTRY(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong size, jbyte value)) {
UNSAFE_ENTRY_SCOPED(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong size, jbyte value)) {
size_t sz = (size_t)size;

oop base = JNIHandles::resolve(obj);
Expand All @@ -371,7 +394,7 @@ UNSAFE_ENTRY(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject obj, j
Copy::fill_to_memory_atomic(p, sz, value);
} UNSAFE_END

UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size)) {
UNSAFE_ENTRY_SCOPED(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size)) {
size_t sz = (size_t)size;

oop srcp = JNIHandles::resolve(srcObj);
Expand All @@ -390,39 +413,19 @@ UNSAFE_ENTRY(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcOb
}
} UNSAFE_END

// This function is a leaf since if the source and destination are both in native memory
// the copy may potentially be very large, and we don't want to disable GC if we can avoid it.
// If either source or destination (or both) are on the heap, the function will enter VM using
// JVM_ENTRY_FROM_LEAF
UNSAFE_LEAF(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size, jlong elemSize)) {
UNSAFE_ENTRY_SCOPED(void, Unsafe_CopySwapMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size, jlong elemSize)) {
size_t sz = (size_t)size;
size_t esz = (size_t)elemSize;

if (srcObj == nullptr && dstObj == nullptr) {
// Both src & dst are in native memory
address src = (address)srcOffset;
address dst = (address)dstOffset;

{
JavaThread* thread = JavaThread::thread_from_jni_environment(env);
GuardUnsafeAccess guard(thread);
Copy::conjoint_swap(src, dst, sz, esz);
}
} else {
// At least one of src/dst are on heap, transition to VM to access raw pointers

JVM_ENTRY_FROM_LEAF(env, void, Unsafe_CopySwapMemory0) {
oop srcp = JNIHandles::resolve(srcObj);
oop dstp = JNIHandles::resolve(dstObj);
oop srcp = JNIHandles::resolve(srcObj);
oop dstp = JNIHandles::resolve(dstObj);

address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);
address src = (address)index_oop_from_field_offset_long(srcp, srcOffset);
address dst = (address)index_oop_from_field_offset_long(dstp, dstOffset);

{
GuardUnsafeAccess guard(thread);
Copy::conjoint_swap(src, dst, sz, esz);
}
} JVM_END
{
GuardUnsafeAccess guard(thread);
Copy::conjoint_swap(src, dst, sz, esz);
}
} UNSAFE_END

Expand Down Expand Up @@ -718,13 +721,13 @@ UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeReference(JNIEnv *env, jobject un
return JNIHandles::make_local(THREAD, res);
} UNSAFE_END

UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
UNSAFE_ENTRY_SCOPED(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
oop p = JNIHandles::resolve(obj);
volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
return Atomic::cmpxchg(addr, e, x);
} UNSAFE_END

UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
UNSAFE_ENTRY_SCOPED(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
oop p = JNIHandles::resolve(obj);
volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
return Atomic::cmpxchg(addr, e, x);
Expand All @@ -739,13 +742,13 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetReference(JNIEnv *env, jobject unsafe
return ret == e;
} UNSAFE_END

UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
UNSAFE_ENTRY_SCOPED(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
oop p = JNIHandles::resolve(obj);
volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
return Atomic::cmpxchg(addr, e, x) == e;
} UNSAFE_END

UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
UNSAFE_ENTRY_SCOPED(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
oop p = JNIHandles::resolve(obj);
volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
return Atomic::cmpxchg(addr, e, x) == e;
Expand Down
8 changes: 0 additions & 8 deletions src/hotspot/share/runtime/interfaceSupport.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -406,14 +406,6 @@ extern "C" { \
VM_LEAF_BASE(result_type, header)


#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \
{ { \
JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
ThreadInVMfromNative __tiv(thread); \
debug_only(VMNativeEntryWrapper __vew;) \
VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)


#define JVM_END } }

#endif // SHARE_RUNTIME_INTERFACESUPPORT_INLINE_HPP
2 changes: 1 addition & 1 deletion src/hotspot/share/runtime/javaThread.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,9 @@ class JavaThread: public Thread {
friend class AsyncExceptionHandshake;
friend class HandshakeState;

void install_async_exception(AsyncExceptionHandshake* aec = nullptr);
void handle_async_exception(oop java_throwable);
public:
void install_async_exception(AsyncExceptionHandshake* aec = nullptr);
bool has_async_exception_condition();
inline void set_pending_unsafe_access_error();
static void send_async_exception(JavaThread* jt, oop java_throwable);
Expand Down
Loading